aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkeith mannthey <kmannth@us.ibm.com>2006-09-25 23:31:03 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 08:48:45 -0700
commit91023300057e96de7f46e95166a3e02394ae72f9 (patch)
treeb28306089d7f5631bb023c7657808380359df316
parentb221385bc41d6789edde3d2fa0cb20d5045730eb (diff)
downloadlinux-91023300057e96de7f46e95166a3e02394ae72f9.tar.gz
[PATCH] convert i386 NUMA KVA space to bootmem
Address a long standing issue of booting with an initrd on an i386 numa system. Currently (and always) the numa kva area is mapped into low memory by finding the end of low memory and moving that mark down (thus creating space for the kva). The issue with this is that Grub loads initrds into this similar space so when the kernel check the initrd it finds it outside max_low_pfn and disables it (it thinks the initrd is not mapped into usable memory) thus initrd enabled kernels can't boot i386 numa :( My solution to the problem just converts the numa kva area to use the bootmem allocator to save it's area (instead of moving the end of low memory). Using bootmem allows the kva area to be mapped into more diverse addresses (not just the end of low memory) and enables the kva area to be mapped below the initrd if present. I have tested this patch on numaq(no initrd) and summit(initrd) i386 numa based systems. [akpm@osdl.org: cleanups] Signed-off-by: Keith Mannthey <kmannth@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/setup.c3
-rw-r--r--arch/i386/mm/discontig.c29
-rw-r--r--include/asm-i386/mmzone.h6
3 files changed, 29 insertions, 9 deletions
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index f1682206d304d3..27d4dc0d3ef1ed 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -53,6 +53,7 @@
#include <asm/apic.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
+#include <asm/mmzone.h>
#include <asm/setup.h>
#include <asm/arch_hooks.h>
#include <asm/sections.h>
@@ -1258,7 +1259,7 @@ void __init setup_bootmem_allocator(void)
*/
find_smp_config();
#endif
-
+ numa_kva_reserve();
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 7c392dc553b896..2e36eff8aff9fa 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -117,7 +117,8 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
void *node_remap_end_vaddr[MAX_NUMNODES];
void *node_remap_alloc_vaddr[MAX_NUMNODES];
-
+static unsigned long kva_start_pfn;
+static unsigned long kva_pages;
/*
* FLAT - support for basic PC memory model with discontig enabled, essentially
* a single node with all available processors in it with a flat
@@ -286,7 +287,6 @@ unsigned long __init setup_memory(void)
{
int nid;
unsigned long system_start_pfn, system_max_low_pfn;
- unsigned long reserve_pages;
/*
* When mapping a NUMA machine we allocate the node_mem_map arrays
@@ -298,14 +298,23 @@ unsigned long __init setup_memory(void)
find_max_pfn();
get_memcfg_numa();
- reserve_pages = calculate_numa_remap_pages();
+ kva_pages = calculate_numa_remap_pages();
/* partially used pages are not usable - thus round upwards */
system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
- system_max_low_pfn = max_low_pfn = find_max_low_pfn() - reserve_pages;
- printk("reserve_pages = %ld find_max_low_pfn() ~ %ld\n",
- reserve_pages, max_low_pfn + reserve_pages);
+ kva_start_pfn = find_max_low_pfn() - kva_pages;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Numa kva area is below the initrd */
+ if (LOADER_TYPE && INITRD_START)
+ kva_start_pfn = PFN_DOWN(INITRD_START) - kva_pages;
+#endif
+ kva_start_pfn -= kva_start_pfn & (PTRS_PER_PTE-1);
+
+ system_max_low_pfn = max_low_pfn = find_max_low_pfn();
+ printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
+ kva_start_pfn, max_low_pfn);
printk("max_pfn = %ld\n", max_pfn);
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;
@@ -323,7 +332,7 @@ unsigned long __init setup_memory(void)
(ulong) pfn_to_kaddr(max_low_pfn));
for_each_online_node(nid) {
node_remap_start_vaddr[nid] = pfn_to_kaddr(
- highstart_pfn + node_remap_offset[nid]);
+ kva_start_pfn + node_remap_offset[nid]);
/* Init the node remap allocator */
node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
(node_remap_size[nid] * PAGE_SIZE);
@@ -338,7 +347,6 @@ unsigned long __init setup_memory(void)
}
printk("High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
- vmalloc_earlyreserve = reserve_pages * PAGE_SIZE;
for_each_online_node(nid)
find_max_pfn_node(nid);
@@ -348,6 +356,11 @@ unsigned long __init setup_memory(void)
return max_low_pfn;
}
+void __init numa_kva_reserve(void)
+{
+ reserve_bootmem(PFN_PHYS(kva_start_pfn),PFN_PHYS(kva_pages));
+}
+
void __init zone_sizes_init(void)
{
int nid;
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 22cb07cc8f32cc..61b07332200683 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -38,10 +38,16 @@ static inline void get_memcfg_numa(void)
}
extern int early_pfn_to_nid(unsigned long pfn);
+extern void numa_kva_reserve(void);
#else /* !CONFIG_NUMA */
+
#define get_memcfg_numa get_memcfg_numa_flat
#define get_zholes_size(n) (0)
+
+static inline void numa_kva_reserve(void)
+{
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DISCONTIGMEM