From: Andrea Arcangeli Reject zero page vm-area request, align size properly and hide the guard page from the callers like ioremap - this avoids a kernel crash due one more page being passed to change_page_attr Signed-off-by: Andrea Arcangeli Signed-off-by: Andrew Morton --- 25-akpm/mm/vmalloc.c | 15 +++++++++++---- 1 files changed, 11 insertions(+), 4 deletions(-) diff -puN mm/vmalloc.c~fix-iounmap-and-a-pageattr-memleak-x86-and-x86-64 mm/vmalloc.c --- 25/mm/vmalloc.c~fix-iounmap-and-a-pageattr-memleak-x86-and-x86-64 Thu Feb 17 16:33:02 2005 +++ 25-akpm/mm/vmalloc.c Thu Feb 17 16:33:02 2005 @@ -252,20 +252,22 @@ struct vm_struct *__get_vm_area(unsigned align = 1ul << bit; } addr = ALIGN(start, align); + size = PAGE_ALIGN(size); area = kmalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) return NULL; - /* - * We always allocate a guard page. - */ - size += PAGE_SIZE; if (unlikely(!size)) { kfree (area); return NULL; } + /* + * We always allocate a guard page. + */ + size += PAGE_SIZE; + write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { if ((unsigned long)tmp->addr < addr) { @@ -345,6 +347,11 @@ found: unmap_vm_area(tmp); *p = tmp->next; write_unlock(&vmlist_lock); + + /* + * Remove the guard page. + */ + tmp->size -= PAGE_SIZE; return tmp; } _