Re: [PATCH v3 5/5] mm: page_alloc: reduce unnecessary binary search in early_pfn_valid()

From: Jia He
Date: Tue Mar 27 2018 - 22:10:57 EST




On 3/28/2018 1:51 AM, Daniel Vacek Wrote:
On Mon, Mar 26, 2018 at 5:02 AM, Jia He <hejianet@xxxxxxxxx> wrote:
Commit b92df1de5d28 ("mm: page_alloc: skip over regions of invalid pfns
where possible") optimized the loop in memmap_init_zone(). But there is
still some room for improvement. E.g. in early_pfn_valid(), if pfn and
pfn+1 are in the same memblock region, we can record the last returned
memblock region index and check check pfn++ is still in the same region.

Currently it only improve the performance on arm64 and will have no
impact on other arches.

Signed-off-by: Jia He <jia.he@xxxxxxxxxxxxxxxx>
---
arch/x86/include/asm/mmzone_32.h | 2 +-
include/linux/mmzone.h | 12 +++++++++---
mm/page_alloc.c | 2 +-
3 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 73d8dd1..329d3ba 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -49,7 +49,7 @@ static inline int pfn_valid(int pfn)
return 0;
}

-#define early_pfn_valid(pfn) pfn_valid((pfn))
+#define early_pfn_valid(pfn, last_region_idx) pfn_valid((pfn))

#endif /* CONFIG_DISCONTIGMEM */

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d797716..3a686af 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1267,9 +1267,15 @@ static inline int pfn_present(unsigned long pfn)
})
#else
#define pfn_to_nid(pfn) (0)
-#endif
+#endif /*CONFIG_NUMA*/
+
+#ifdef CONFIG_HAVE_ARCH_PFN_VALID
+#define early_pfn_valid(pfn, last_region_idx) \
+ pfn_valid_region(pfn, last_region_idx)
+#else
+#define early_pfn_valid(pfn, last_region_idx) pfn_valid(pfn)
+#endif /*CONFIG_HAVE_ARCH_PFN_VALID*/

-#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
@@ -1288,7 +1294,7 @@ struct mminit_pfnnid_cache {
};

#ifndef early_pfn_valid
-#define early_pfn_valid(pfn) (1)
+#define early_pfn_valid(pfn, last_region_idx) (1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0bb0274..debccf3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5484,7 +5484,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (context != MEMMAP_EARLY)
goto not_early;

- if (!early_pfn_valid(pfn)) {
+ if (!early_pfn_valid(pfn, &idx)) {
#if (defined CONFIG_HAVE_MEMBLOCK) && (defined CONFIG_HAVE_ARCH_PFN_VALID)
/*
* Skip to the pfn preceding the next valid one (or
--
2.7.4

Hmm, what about making index global variable instead of changing all
the prototypes? Similar to early_pfnnid_cache for example. Something
like:

#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int early_region_idx __meminitdata;
#define early_pfn_valid(pfn) \
pfn_valid_region(pfn, &early_region_idx)
#else
#define early_pfn_valid(pfn) pfn_valid(pfn)
#endif /*CONFIG_HAVE_ARCH_PFN_VALID*/

And move this to arch/arm64/include/asm/page.h ?

--nX

Yes. ok with me

--
Cheers,
Jia