aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-25 23:31:13 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 08:48:46 -0700
commitfb0e7942bdcbbd2f90e61cb4cfa4fa892a873f8a (patch)
tree71344e9afafbd631f4ac010bc8c48e0b16737299
parent2f1b6248682f8b39ca3c7e549dfc216d26c4109b (diff)
downloadlinux-fb0e7942bdcbbd2f90e61cb4cfa4fa892a873f8a.tar.gz
[PATCH] reduce MAX_NR_ZONES: make ZONE_DMA32 optional
Make ZONE_DMA32 optional - Add #ifdefs around ZONE_DMA32 specific code and definitions. - Add CONFIG_ZONE_DMA32 config option and use that for x86_64 that alone needs this zone. - Remove the use of CONFIG_DMA_IS_DMA32 and CONFIG_DMA_IS_NORMAL for ia64 and fix up the way per node ZVCs are calculated. - Fall back to prior GFP_ZONEMASK of 0x03 if there is no DMA32 zone. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/x86_64/Kconfig4
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/mmzone.h16
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--mm/page_alloc.c6
6 files changed, 25 insertions, 16 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index db274da7dba1e4..f521f2f60a7802 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -66,15 +66,6 @@ config IA64_UNCACHED_ALLOCATOR
bool
select GENERIC_ALLOCATOR
-config DMA_IS_DMA32
- bool
- default y
-
-config DMA_IS_NORMAL
- bool
- depends on IA64_SGI_SN2
- default y
-
config AUDIT_ARCH
bool
default y
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 6cd4878625f1af..581ce9af0ec8f5 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -24,6 +24,10 @@ config X86
bool
default y
+config ZONE_DMA32
+ bool
+ default y
+
config LOCKDEP_SUPPORT
bool
default y
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index cc9e608444845c..14610b56c132e8 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -13,7 +13,7 @@ struct vm_area_struct;
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low three bits) */
#define __GFP_DMA ((__force gfp_t)0x01u)
#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
-#ifdef CONFIG_DMA_IS_DMA32
+#ifndef CONFIG_ZONE_DMA32
#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */
#elif BITS_PER_LONG < 64
#define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 03a5a6eb0ffa6f..adae3c9159387f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -109,12 +109,14 @@ enum zone_type {
* <16M.
*/
ZONE_DMA,
+#ifdef CONFIG_ZONE_DMA32
/*
* x86_64 needs two ZONE_DMAs because it supports devices that are
* only able to do DMA to the lower 16M but also 32 bit devices that
* can only do DMA areas below 4G.
*/
ZONE_DMA32,
+#endif
/*
* Normal addressable memory is in ZONE_NORMAL. DMA operations can be
* performed on pages in ZONE_NORMAL if the DMA devices support
@@ -161,9 +163,13 @@ enum zone_type {
*
* NOTE! Make sure this matches the zones in <linux/gfp.h>
*/
-#define GFP_ZONEMASK 0x07
-/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
-#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
+#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
+
+#ifdef CONFIG_ZONE_DMA32
+#define GFP_ZONEMASK 0x07
+#else
+#define GFP_ZONEMASK 0x03
+#endif
struct zone {
/* Fields commonly accessed by the page allocator */
@@ -429,7 +435,11 @@ static inline int is_normal(struct zone *zone)
static inline int is_dma32(struct zone *zone)
{
+#ifdef CONFIG_ZONE_DMA32
return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
+#else
+ return 0;
+#endif
}
static inline int is_dma(struct zone *zone)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d9b1b60798aca..9c6e62c56ec25a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -124,12 +124,10 @@ static inline unsigned long node_page_state(int node,
struct zone *zones = NODE_DATA(node)->node_zones;
return
-#ifndef CONFIG_DMA_IS_NORMAL
-#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
+#ifdef CONFIG_ZONE_DMA32
zone_page_state(&zones[ZONE_DMA32], item) +
#endif
zone_page_state(&zones[ZONE_NORMAL], item) +
-#endif
#ifdef CONFIG_HIGHMEM
zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2410a3cb1c5327..5b5cbb5e181604 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -70,7 +70,9 @@ static void __free_pages_ok(struct page *page, unsigned int order);
*/
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
256,
+#ifdef CONFIG_ZONE_DMA32
256,
+#endif
32
};
@@ -85,7 +87,9 @@ EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = {
"DMA",
+#ifdef CONFIG_ZONE_DMA32
"DMA32",
+#endif
"Normal",
"HighMem"
};
@@ -1373,8 +1377,10 @@ static inline int highest_zone(int zone_bits)
int res = ZONE_NORMAL;
if (zone_bits & (__force int)__GFP_HIGHMEM)
res = ZONE_HIGHMEM;
+#ifdef CONFIG_ZONE_DMA32
if (zone_bits & (__force int)__GFP_DMA32)
res = ZONE_DMA32;
+#endif
if (zone_bits & (__force int)__GFP_DMA)
res = ZONE_DMA;
return res;