[PATCH v7 4/5] arm: arm64: introduce pfn_valid_region()
From: Jia He
Date: Thu Apr 05 2018 - 04:05:57 EST
Commit b92df1de5d28 ("mm: page_alloc: skip over regions of invalid pfns
where possible") optimized the loop in memmap_init_zone(). But there is
still some room for improvement. E.g. in early_pfn_valid(), we can record
the last returned memblock region. If current pfn and last pfn are in the
same memory region, we needn't do the unnecessary binary searches because
memblock_is_nomap is the same result for whole memory region.
Signed-off-by: Jia He <jia.he@xxxxxxxxxxxxxxxx>
---
include/linux/arm96_common.h | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/include/linux/arm96_common.h b/include/linux/arm96_common.h
index 2f4dea4..bb86bd3 100644
--- a/include/linux/arm96_common.h
+++ b/include/linux/arm96_common.h
@@ -48,5 +48,29 @@ ulong __init_memblock memblock_next_valid_pfn(ulong pfn)
return PHYS_PFN(regions[early_region_idx].base);
}
EXPORT_SYMBOL(memblock_next_valid_pfn);
+
+int pfn_valid_region(ulong pfn)
+{
+ ulong start_pfn, end_pfn;
+ struct memblock_type *type = &memblock.memory;
+ struct memblock_region *regions = type->regions;
+
+ if (early_region_idx != -1) {
+ start_pfn = PFN_DOWN(regions[early_region_idx].base);
+ end_pfn = PFN_DOWN(regions[early_region_idx].base +
+ regions[early_region_idx].size);
+
+ if (pfn >= start_pfn && pfn < end_pfn)
+ return !memblock_is_nomap(
+ ®ions[early_region_idx]);
+ }
+
+ early_region_idx = memblock_search_pfn_regions(pfn);
+ if (early_region_idx == -1)
+ return false;
+
+ return !memblock_is_nomap(®ions[early_region_idx]);
+}
+EXPORT_SYMBOL(pfn_valid_region);
#endif /*CONFIG_HAVE_ARCH_PFN_VALID*/
#endif /*__ARM96_COMMON_H*/
--
2.7.4