From: Hugh Dickins Nick has tracked scheduling-while-atomic errors to shmem's fragile kmap avoidance: the root error appears to lie deeper, but rework that fragility. Plus I've been indicted for war crimes at the end of shmem_swp_entry: my apologia scorned, so now hide the evidence. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton --- 25-akpm/mm/shmem.c | 42 +++++++++++++++++++----------------------- 1 files changed, 19 insertions(+), 23 deletions(-) diff -puN mm/shmem.c~tmpfs-scheduling-while-atomic-fix mm/shmem.c --- 25/mm/shmem.c~tmpfs-scheduling-while-atomic-fix Thu Jul 8 16:05:44 2004 +++ 25-akpm/mm/shmem.c Thu Jul 8 16:05:44 2004 @@ -105,22 +105,24 @@ static inline void shmem_dir_unmap(struc static swp_entry_t *shmem_swp_map(struct page *page) { + return (swp_entry_t *)kmap_atomic(page, KM_USER1); +} + +static inline void shmem_swp_balance_unmap(void) +{ /* - * We have to avoid the unconditional inc_preempt_count() - * in kmap_atomic(), since shmem_swp_unmap() will also be - * applied to the low memory addresses within i_direct[]. - * PageHighMem and high_memory tests are good for all arches - * and configs: highmem_start_page and FIXADDR_START are not. + * When passing a pointer to an i_direct entry, to code which + * also handles indirect entries and so will shmem_swp_unmap, + * we must arrange for the preempt count to remain in balance. + * What kmap_atomic of a lowmem page does depends on config + * and architecture, so pretend to kmap_atomic some lowmem page. */ - return PageHighMem(page)? - (swp_entry_t *)kmap_atomic(page, KM_USER1): - (swp_entry_t *)page_address(page); + (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); } static inline void shmem_swp_unmap(swp_entry_t *entry) { - if (entry >= (swp_entry_t *)high_memory) - kunmap_atomic(entry, KM_USER1); + kunmap_atomic(entry, KM_USER1); } static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) @@ -263,8 +265,10 @@ static swp_entry_t *shmem_swp_entry(stru struct page **dir; struct page *subdir; - if (index < SHMEM_NR_DIRECT) + if (index < SHMEM_NR_DIRECT) { + shmem_swp_balance_unmap(); return info->i_direct+index; + } if (!info->i_indirect) { if (page) { info->i_indirect = *page; @@ -306,17 +310,7 @@ static swp_entry_t *shmem_swp_entry(stru *page = NULL; } shmem_dir_unmap(dir); - - /* - * With apologies... caller shmem_swp_alloc passes non-NULL - * page (though perhaps NULL *page); and now we know that this - * indirect page has been allocated, we can shortcut the final - * kmap if we know it contains no swap entries, as is commonly - * the case: return pointer to a 0 which doesn't need kmapping. - */ - return (page && !subdir->nr_swapped)? - (swp_entry_t *)&subdir->nr_swapped: - shmem_swp_map(subdir) + offset; + return shmem_swp_map(subdir) + offset; } static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) @@ -650,8 +644,10 @@ static int shmem_unuse_inode(struct shme if (size > SHMEM_NR_DIRECT) size = SHMEM_NR_DIRECT; offset = shmem_find_swp(entry, ptr, ptr+size); - if (offset >= 0) + if (offset >= 0) { + shmem_swp_balance_unmap(); goto found; + } if (!info->i_indirect) goto lost2; /* we might be racing with shmem_truncate */ _