[PATCH 7/7] Print out debugging information during initialisation

From: Mel Gorman
Date: Tue Apr 18 2006 - 09:02:53 EST



The zone and hole sizing code is new and unexpected problems showed up
on machines that were not covered by the pre-release tests. This patch
prints out useful information when those unexpected situations occur.

It is not expected that this patch become a permanent part of the set.


mem_init.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 47 insertions(+), 4 deletions(-)

Signed-off-by: Mel Gorman <mel@xxxxxxxxx>
diff -rup -X /usr/src/patchset-0.5/bin//dontdiff linux-2.6.17-rc1-106-breakout_mem_init/mm/mem_init.c linux-2.6.17-rc1-107-debug/mm/mem_init.c
--- linux-2.6.17-rc1-106-breakout_mem_init/mm/mem_init.c 2006-04-18 10:22:00.000000000 +0100
+++ linux-2.6.17-rc1-107-debug/mm/mem_init.c 2006-04-18 10:23:46.000000000 +0100
@@ -593,6 +593,7 @@ static __meminit void init_currently_emp
}

#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+
/* Note: nid == MAX_NUMNODES returns first region */
static int __init first_active_region_index_in_nid(int nid)
{
@@ -645,13 +646,24 @@ void __init free_bootmem_with_active_reg
for_each_active_range_index_in_nid(i, nid) {
unsigned long size_pages = 0;
unsigned long end_pfn = early_node_map[i].end_pfn;
- if (early_node_map[i].start_pfn >= max_low_pfn)
+ if (early_node_map[i].start_pfn >= max_low_pfn) {
+ printk("start_pfn %lu >= %lu\n", early_node_map[i].start_pfn,
+ max_low_pfn);
continue;
+ }

- if (end_pfn > max_low_pfn)
+ if (end_pfn > max_low_pfn) {
+ printk("end_pfn %lu going back to %lu\n", early_node_map[i].end_pfn,
+ max_low_pfn);
end_pfn = max_low_pfn;
+ }

size_pages = end_pfn - early_node_map[i].start_pfn;
+ printk("free_bootmem_node(%d, %lu, %lu) :::: pfn ranges (%d, %lu, %lu)\n",
+ early_node_map[i].nid,
+ PFN_PHYS(early_node_map[i].start_pfn),
+ PFN_PHYS(size_pages),
+ early_node_map[i].nid, early_node_map[i].start_pfn, end_pfn);
free_bootmem_node(NODE_DATA(early_node_map[i].nid),
PFN_PHYS(early_node_map[i].start_pfn),
PFN_PHYS(size_pages));
@@ -661,10 +673,15 @@ void __init free_bootmem_with_active_reg
void __init sparse_memory_present_with_active_regions(int nid)
{
unsigned int i;
- for_each_active_range_index_in_nid(i, nid)
+ for_each_active_range_index_in_nid(i, nid) {
+ printk("memory_present(%d, %lu, %lu)\n",
+ early_node_map[i].nid,
+ early_node_map[i].start_pfn,
+ early_node_map[i].end_pfn);
memory_present(early_node_map[i].nid,
early_node_map[i].start_pfn,
early_node_map[i].end_pfn);
+ }
}

void __init get_pfn_range_for_nid(unsigned int nid,
@@ -722,6 +739,8 @@ unsigned long __init __absent_pages_in_r
unsigned long prev_end_pfn = 0, hole_pages = 0;
unsigned long start_pfn;

+ printk("__absent_pages_in_range(%d, %lu, %lu) = ", nid,
+ range_start_pfn, range_end_pfn);
/* Find the end_pfn of the first active range of pfns in the node */
i = first_active_region_index_in_nid(nid);
prev_end_pfn = early_node_map[i].start_pfn;
@@ -749,6 +768,8 @@ unsigned long __init __absent_pages_in_r
prev_end_pfn = early_node_map[i].end_pfn;
}

+ printk("%lu\n", hole_pages);
+
return hole_pages;
}

@@ -911,6 +932,9 @@ void __init add_active_range(unsigned in
{
unsigned int i;

+ printk("add_active_range(%d, %lu, %lu): ",
+ nid, start_pfn, end_pfn);
+
/* Merge with existing active regions if possible */
for (i = 0; early_node_map[i].end_pfn; i++) {
if (early_node_map[i].nid != nid)
@@ -918,12 +942,15 @@ void __init add_active_range(unsigned in

/* Skip if an existing region covers this new one */
if (start_pfn >= early_node_map[i].start_pfn &&
- end_pfn <= early_node_map[i].end_pfn)
+ end_pfn <= early_node_map[i].end_pfn) {
+ printk("Existing\n");
return;
+ }

/* Merge forward if suitable */
if (start_pfn <= early_node_map[i].end_pfn &&
end_pfn > early_node_map[i].end_pfn) {
+ printk("Merging forward\n");
early_node_map[i].end_pfn = end_pfn;
return;
}
@@ -931,6 +958,7 @@ void __init add_active_range(unsigned in
/* Merge backward if suitable */
if (start_pfn < early_node_map[i].end_pfn &&
end_pfn >= early_node_map[i].start_pfn) {
+ printk("Merging backwards\n");
early_node_map[i].start_pfn = start_pfn;
return;
}
@@ -942,6 +970,7 @@ void __init add_active_range(unsigned in
return;
}

+ printk("New\n");
early_node_map[i].nid = nid;
early_node_map[i].start_pfn = start_pfn;
early_node_map[i].end_pfn = end_pfn;
@@ -971,6 +1000,14 @@ static void __init sort_node_map(void)

sort(early_node_map, num, sizeof(struct node_active_region),
cmp_node_active_region, NULL);
+
+ printk("Dumping sorted node map\n");
+ for (num = 0; early_node_map[num].end_pfn; num++) {
+ printk("entry %lu: %d %lu -> %lu\n", num,
+ early_node_map[num].nid,
+ early_node_map[num].start_pfn,
+ early_node_map[num].end_pfn);
+ }
}

/* Find the lowest pfn for a node. This depends on a sorted early_node_map */
@@ -1012,6 +1049,10 @@ void __init free_area_init_nodes(unsigne
unsigned long nid;
int zone_index;

+ printk("free_area_init_nodes(%lu, %lu, %lu, %lu)\n",
+ arch_max_dma_pfn, arch_max_dma32_pfn,
+ arch_max_low_pfn, arch_max_high_pfn);
+
/* Record where the zone boundaries are */
memset(arch_zone_lowest_possible_pfn, 0,
sizeof(arch_zone_lowest_possible_pfn));
@@ -1028,6 +1069,8 @@ void __init free_area_init_nodes(unsigne
arch_zone_highest_possible_pfn[zone_index-1];
}

+ printk("free_area_init_nodes(): find_min_pfn = %lu\n", find_min_pfn_with_active_regions());
+
/* Regions in the early_node_map can be in any order */
sort_node_map();

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/