From: Nick Piggin Uses the zero length array which seems to be quite abundant throughout the tree to reduce the size of the zone padding to zero. 1536 before, 1152 afterwards for me. Also try to be a bit smarter about getting commonly accessed fields together, which surely can't be worse than before. Signed-off-by: Andrew Morton --- 25-akpm/include/linux/mmzone.h | 37 +++++++++++++++++++------------------ 1 files changed, 19 insertions(+), 18 deletions(-) diff -puN include/linux/mmzone.h~mm-help-zone-padding include/linux/mmzone.h --- 25/include/linux/mmzone.h~mm-help-zone-padding 2004-10-21 21:16:12.270688440 -0700 +++ 25-akpm/include/linux/mmzone.h 2004-10-21 21:16:12.275687680 -0700 @@ -35,7 +35,7 @@ struct pglist_data; */ #if defined(CONFIG_SMP) struct zone_padding { - int x; + char x[0]; } ____cacheline_maxaligned_in_smp; #define ZONE_PADDING(name) struct zone_padding name; #else @@ -108,10 +108,7 @@ struct per_cpu_pageset { */ struct zone { - /* - * Commonly accessed fields: - */ - spinlock_t lock; + /* Fields commonly accessed by the page allocator */ unsigned long free_pages; unsigned long pages_min, pages_low, pages_high; /* @@ -128,8 +125,18 @@ struct zone { */ unsigned long protection[MAX_NR_ZONES]; + struct per_cpu_pageset pageset[NR_CPUS]; + + /* + * free areas of different sizes + */ + spinlock_t lock; + struct free_area free_area[MAX_ORDER]; + + ZONE_PADDING(_pad1_) + /* Fields commonly accessed by the page reclaim scanner */ spinlock_t lru_lock; struct list_head active_list; struct list_head inactive_list; @@ -137,10 +144,8 @@ struct zone { unsigned long nr_scan_inactive; unsigned long nr_active; unsigned long nr_inactive; - int all_unreclaimable; /* All pages pinned */ unsigned long pages_scanned; /* since last reclaim */ - - ZONE_PADDING(_pad2_) + int all_unreclaimable; /* All pages pinned */ /* * prev_priority holds the scanning priority for this zone. It is @@ -161,10 +166,9 @@ struct zone { int temp_priority; int prev_priority; - /* - * free areas of different sizes - */ - struct free_area free_area[MAX_ORDER]; + + ZONE_PADDING(_pad2_) + /* Rarely used or read-mostly fields */ /* * wait_table -- the array holding the hash table @@ -194,10 +198,6 @@ struct zone { unsigned long wait_table_size; unsigned long wait_table_bits; - ZONE_PADDING(_pad3_) - - struct per_cpu_pageset pageset[NR_CPUS]; - /* * Discontig memory support fields. */ @@ -206,12 +206,13 @@ struct zone { /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; + unsigned long spanned_pages; /* total size, including holes */ + unsigned long present_pages; /* amount of memory (excluding holes) */ + /* * rarely used fields: */ char *name; - unsigned long spanned_pages; /* total size, including holes */ - unsigned long present_pages; /* amount of memory (excluding holes) */ } ____cacheline_maxaligned_in_smp; _