From: Andrew Morton Cc: Wolfgang Wander Signed-off-by: Andrew Morton --- arch/arm/mm/mmap.c | 6 +++--- arch/i386/mm/hugetlbpage.c | 15 ++++++++------- arch/ia64/kernel/sys_ia64.c | 6 +++--- arch/ppc64/mm/hugetlbpage.c | 16 ++++++++-------- arch/sh/kernel/sys_sh.c | 4 ++-- arch/sparc64/kernel/sys_sparc.c | 4 ++-- arch/x86_64/kernel/sys_x86_64.c | 4 ++-- fs/binfmt_aout.c | 2 +- fs/binfmt_elf.c | 2 +- fs/hugetlbfs/inode.c | 4 +--- mm/mmap.c | 18 ++++++++++-------- 11 files changed, 41 insertions(+), 40 deletions(-) diff -puN arch/arm/mm/mmap.c~avoiding-mmap-fragmentation-tidy arch/arm/mm/mmap.c --- 25/arch/arm/mm/mmap.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/arch/arm/mm/mmap.c 2005-05-11 18:35:30.000000000 -0700 @@ -73,9 +73,9 @@ arch_get_unmapped_area(struct file *filp (!vma || addr + len <= vma->vm_start)) return addr; } - if( len > mm->cached_hole_size ) + if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; - else { + } else { start_addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } @@ -107,7 +107,7 @@ full_search: mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_align) diff -puN arch/i386/mm/hugetlbpage.c~avoiding-mmap-fragmentation-tidy arch/i386/mm/hugetlbpage.c --- 25/arch/i386/mm/hugetlbpage.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/arch/i386/mm/hugetlbpage.c 2005-05-11 18:35:30.000000000 -0700 @@ -140,9 +140,9 @@ static unsigned long hugetlb_get_unmappe struct vm_area_struct *vma; unsigned long start_addr; - if( len > mm->cached_hole_size ) + if (len > mm->cached_hole_size) { start_addr = mm->free_area_cache; - else { + } else { start_addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } @@ -168,7 +168,7 @@ full_search: mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = ALIGN(vma->vm_end, HPAGE_SIZE); } @@ -188,7 +188,7 @@ static unsigned long hugetlb_get_unmappe if (mm->free_area_cache > base) mm->free_area_cache = base; - if( len <= largest_hole ) { + if (len <= largest_hole) { largest_hole = 0; mm->free_area_cache = base; } @@ -212,19 +212,20 @@ try_again: * vma->vm_start, use it: */ if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { + (!prev_vma || (addr >= prev_vma->vm_end))) { /* remember the address as a hint for next time */ mm->cached_hole_size = largest_hole; return (mm->free_area_cache = addr); - } else + } else { /* pull free_area_cache down to the first hole */ if (mm->free_area_cache == vma->vm_end) { mm->free_area_cache = vma->vm_start; mm->cached_hole_size = largest_hole; } + } /* remember the largest hole we saw so far */ - if( addr + largest_hole < vma->vm_start ) + if (addr + largest_hole < vma->vm_start) largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ diff -puN arch/ia64/kernel/sys_ia64.c~avoiding-mmap-fragmentation-tidy arch/ia64/kernel/sys_ia64.c --- 25/arch/ia64/kernel/sys_ia64.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/arch/ia64/kernel/sys_ia64.c 2005-05-11 18:35:30.000000000 -0700 @@ -39,9 +39,9 @@ arch_get_unmapped_area (struct file *fil addr = 0; #endif if (!addr) { - if( len > mm->cached_hole_size ) + if (len > mm->cached_hole_size) { addr = mm->free_area_cache; - else { + } else { addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } @@ -75,7 +75,7 @@ arch_get_unmapped_area (struct file *fil mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = (vma->vm_end + align_mask) & ~align_mask; } diff -puN arch/ppc64/mm/hugetlbpage.c~avoiding-mmap-fragmentation-tidy arch/ppc64/mm/hugetlbpage.c --- 25/arch/ppc64/mm/hugetlbpage.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/arch/ppc64/mm/hugetlbpage.c 2005-05-11 18:35:30.000000000 -0700 @@ -292,9 +292,9 @@ unsigned long arch_get_unmapped_area(str && !is_hugepage_only_range(mm, addr,len)) return addr; } - if( len > mm->cached_hole_size ) + if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; - else { + } else { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } @@ -321,7 +321,7 @@ full_search: mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; vma = vma->vm_next; @@ -373,9 +373,9 @@ arch_get_unmapped_area_topdown(struct fi return addr; } - if( len <= largest_hole ) { + if (len <= largest_hole) { largest_hole = 0; - mm->free_area_cache = base; + mm->free_area_cache = base; } try_again: /* make sure it can fit in the remaining address space */ @@ -409,16 +409,16 @@ hugepage_recheck: /* remember the address as a hint for next time */ mm->cached_hole_size = largest_hole; return (mm->free_area_cache = addr); - } - else + } else { /* pull free_area_cache down to the first hole */ if (mm->free_area_cache == vma->vm_end) { mm->free_area_cache = vma->vm_start; mm->cached_hole_size = largest_hole; } + } /* remember the largest hole we saw so far */ - if( addr + largest_hole < vma->vm_start ) + if (addr + largest_hole < vma->vm_start) largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ diff -puN arch/sh/kernel/sys_sh.c~avoiding-mmap-fragmentation-tidy arch/sh/kernel/sys_sh.c --- 25/arch/sh/kernel/sys_sh.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/arch/sh/kernel/sys_sh.c 2005-05-11 18:35:30.000000000 -0700 @@ -79,7 +79,7 @@ unsigned long arch_get_unmapped_area(str (!vma || addr + len <= vma->vm_start)) return addr; } - if( len <= mm->cached_hole_size ) { + if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = TASK_UNMAPPED_BASE; } @@ -111,7 +111,7 @@ full_search: mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; diff -puN arch/sparc64/kernel/sys_sparc.c~avoiding-mmap-fragmentation-tidy arch/sparc64/kernel/sys_sparc.c --- 25/arch/sparc64/kernel/sys_sparc.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/arch/sparc64/kernel/sys_sparc.c 2005-05-11 18:35:30.000000000 -0700 @@ -84,7 +84,7 @@ unsigned long arch_get_unmapped_area(str return addr; } - if( len <= mm->cached_hole_size ) { + if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = TASK_UNMAPPED_BASE; } @@ -119,7 +119,7 @@ full_search: mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; diff -puN arch/x86_64/kernel/sys_x86_64.c~avoiding-mmap-fragmentation-tidy arch/x86_64/kernel/sys_x86_64.c --- 25/arch/x86_64/kernel/sys_x86_64.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/arch/x86_64/kernel/sys_x86_64.c 2005-05-11 18:35:30.000000000 -0700 @@ -111,7 +111,7 @@ arch_get_unmapped_area(struct file *filp (!vma || addr + len <= vma->vm_start)) return addr; } - if( len <= mm->cached_hole_size ) { + if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = begin; } @@ -142,7 +142,7 @@ full_search: mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; diff -puN fs/binfmt_elf.c~avoiding-mmap-fragmentation-tidy fs/binfmt_elf.c --- 25/fs/binfmt_elf.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/fs/binfmt_elf.c 2005-05-11 18:35:30.000000000 -0700 @@ -775,7 +775,7 @@ static int load_elf_binary(struct linux_ change some of these later */ set_mm_counter(current->mm, rss, 0); current->mm->free_area_cache = current->mm->mmap_base; - current->mm->cached_hole_size = current->mm->cached_hole_size; + current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); if (retval < 0) { diff -puN fs/hugetlbfs/inode.c~avoiding-mmap-fragmentation-tidy fs/hugetlbfs/inode.c --- 25/fs/hugetlbfs/inode.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/fs/hugetlbfs/inode.c 2005-05-11 18:35:30.000000000 -0700 @@ -122,11 +122,9 @@ hugetlb_get_unmapped_area(struct file *f start_addr = mm->free_area_cache; - if(len <= mm->cached_hole_size ) + if (len <= mm->cached_hole_size) start_addr = TASK_UNMAPPED_BASE; - - full_search: addr = ALIGN(start_addr, HPAGE_SIZE); diff -puN mm/mmap.c~avoiding-mmap-fragmentation-tidy mm/mmap.c --- 25/mm/mmap.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:35:30.000000000 -0700 +++ 25-akpm/mm/mmap.c 2005-05-11 18:35:30.000000000 -0700 @@ -1175,9 +1175,9 @@ arch_get_unmapped_area(struct file *filp (!vma || addr + len <= vma->vm_start)) return addr; } - if( len > mm->cached_hole_size ) + if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; - else { + } else { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } @@ -1191,7 +1191,8 @@ full_search: * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + addr = TASK_UNMAPPED_BASE; + start_addr = addr; mm->cached_hole_size = 0; goto full_search; } @@ -1204,7 +1205,7 @@ full_search: mm->free_area_cache = addr + len; return addr; } - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; } @@ -1219,7 +1220,8 @@ void arch_unmap_area(struct vm_area_stru if (area->vm_start >= TASK_UNMAPPED_BASE && area->vm_start < area->vm_mm->free_area_cache) { unsigned area_size = area->vm_end-area->vm_start; - if( area->vm_mm->cached_hole_size < area_size ) + + if (area->vm_mm->cached_hole_size < area_size) area->vm_mm->cached_hole_size = area_size; else area->vm_mm->cached_hole_size = ~0UL; @@ -1254,9 +1256,9 @@ arch_get_unmapped_area_topdown(struct fi } /* check if free_area_cache is useful for us */ - if( len <= mm->cached_hole_size ) { + if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; + mm->free_area_cache = mm->mmap_base; } /* either no address requested or can't fit in requested address hole */ @@ -1284,7 +1286,7 @@ arch_get_unmapped_area_topdown(struct fi return (mm->free_area_cache = addr); /* remember the largest hole we saw so far */ - if( addr + mm->cached_hole_size < vma->vm_start ) + if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ diff -puN fs/binfmt_aout.c~avoiding-mmap-fragmentation-tidy fs/binfmt_aout.c --- 25/fs/binfmt_aout.c~avoiding-mmap-fragmentation-tidy 2005-05-11 18:47:53.000000000 -0700 +++ 25-akpm/fs/binfmt_aout.c 2005-05-11 18:47:58.000000000 -0700 @@ -316,7 +316,7 @@ static int load_aout_binary(struct linux current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = current->mm->mmap_base; - current->mm->cached_hole_size = current->mm->cached_hole_size; + current->mm->cached_hole_size = 0; set_mm_counter(current->mm, rss, 0); current->mm->mmap = NULL; _