aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2015-05-14 01:02:17 +0000
committerJohannes Weiner <hannes@cmpxchg.org>2015-05-14 01:02:17 +0000
commit9d30f625d84e04f345d5c7d57aeb2e1a00dfd0ab (patch)
tree9e0c37674070d25dbf030252287ffb84f5addbe9
parent8398a475adb29e3878eb7a5497985c7b9f4952e2 (diff)
downloadmm-next-9d30f625d84e04f345d5c7d57aeb2e1a00dfd0ab.tar.gz
Add debugging aid for memory initialisation problems
A number of bug reports have been submitted related to memory initialisation that would have been easier to debug if the PFN of page addresses were available. The dmesg output is often insufficient to find that information so debugging patches need to be sent to the reporting user. This patch prints out information on the memmap when it is being allocated and the sizeof(struct page) when loglevel is set high enough. In most architectures, this output is produced in generic code. x86_64 and ia64 both setup node_mem_map in an architecture-specific manner requiring arch-specfic changes. Th memmap information can be used to translate any valid page address into a PFN. page_to_pfn() cannot be used directly in bad_page() because there is no guarantee that the address pointer is valid in any way and the translation can produce garbage. Information on memmap is not printed out for the SPARSEMEM memory model. This only applies to FLATMEM and DISCONTIG configurations. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--arch/ia64/mm/contig.c3
-rw-r--r--arch/ia64/mm/discontig.c3
-rw-r--r--mm/page_alloc.c5
3 files changed, 11 insertions, 0 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 52715a71aede0..191e649015552 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -266,6 +266,9 @@ paging_init (void)
*/
NODE_DATA(0)->node_mem_map = vmem_map +
find_min_pfn_with_active_regions();
+ printk(KERN_DEBUG
+ "Node %d memmap at 0x%p size %lu first pfn 0x%p\n",
+ 0, vmem_map, map_size, NODE_DATA(0)->node_mem_map);
free_area_init_nodes(max_zone_pfns);
printk("Virtual mem_map starts at 0x%p\n", mem_map);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 8786268053693..0dbbe866798ab 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -717,6 +717,9 @@ void __init paging_init(void)
#ifdef CONFIG_VIRTUAL_MEM_MAP
NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
+ printk(KERN_DEBUG
+ "Node %d memmap at 0x%p size %u first pfn 0x%p\n",
+ node, vmem_map, 0, NODE_DATA(node)->node_mem_map);
#endif
if (mem_data[node].max_pfn > max_pfn)
max_pfn = mem_data[node].max_pfn;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index afd5459c04206..efe21b7d407fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5443,6 +5443,8 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
map = memblock_virt_alloc_node_nopanic(size,
pgdat->node_id);
pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
+ pr_debug("Node %d memmap at 0x%p size %lu first pfn 0x%p\n",
+ pgdat->node_id, map, size, pgdat->node_mem_map);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
/*
@@ -5837,6 +5839,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
find_zone_movable_pfns_for_nodes();
+ /* Print out the page size for debugging meminit problems */
+ printk(KERN_DEBUG "sizeof(struct page) = %zd\n", sizeof(struct page));
+
/* Print out the zone ranges */
pr_info("Zone ranges:\n");
for (i = 0; i < MAX_NR_ZONES; i++) {