From: "Andi Kleen" The PTEs can point to ioremap mappings too, and these are often outside mem_map. The NUMA hash page lookup functions cannot handle out of bounds accesses properly. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton --- arch/x86_64/mm/fault.c | 11 +++++++++-- 1 files changed, 9 insertions(+), 2 deletions(-) diff -puN arch/x86_64/mm/fault.c~x86_64-when-checking-vmalloc-mappings-dont-use arch/x86_64/mm/fault.c --- 25/arch/x86_64/mm/fault.c~x86_64-when-checking-vmalloc-mappings-dont-use 2005-05-13 22:54:58.000000000 -0700 +++ 25-akpm/arch/x86_64/mm/fault.c 2005-05-13 22:54:58.000000000 -0700 @@ -234,6 +234,8 @@ static noinline void pgtable_bad(unsigne /* * Handle a fault on the vmalloc or module mapping area + * + * This assumes no large pages in there. */ static int vmalloc_fault(unsigned long address) { @@ -272,7 +274,10 @@ static int vmalloc_fault(unsigned long a if (!pte_present(*pte_ref)) return -1; pte = pte_offset_kernel(pmd, address); - if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref)) + /* Don't use pte_page here, because the mappings can point + outside mem_map, and the NUMA hash lookup cannot handle + that. */ + if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) BUG(); __flush_tlb_all(); return 0; @@ -346,7 +351,9 @@ asmlinkage void do_page_fault(struct pt_ * protection error (error_code & 1) == 0. */ if (unlikely(address >= TASK_SIZE)) { - if (!(error_code & 5)) { + if (!(error_code & 5) && + ((address >= VMALLOC_START && address < VMALLOC_END) || + (address >= MODULES_VADDR && address < MODULES_END))) { if (vmalloc_fault(address) < 0) goto bad_area_nosemaphore; return; _