[PATCH v1 3/4] mm/memory_hotplug: Simplify online_pages_range()

From: David Hildenbrand
Date: Fri Aug 09 2019 - 08:57:44 EST


move_pfn_range_to_zone() will set all pages to PG_reserved via
memmap_init_zone(). The only way a page could no longer be reserved
would be if a MEM_GOING_ONLINE notifier would clear PG_reserved - which
is not done (the online_page callback is used for that purpose by
e.g., Hyper-V instead). walk_system_ram_range() will never call
online_pages_range() with duplicate PFNs, so drop the PageReserved() check.

Simplify the handling, as online_pages always corresponds to nr_pages.
There is no need for online_pages_blocks() anymore.

Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Oscar Salvador <osalvador@xxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxx>
Cc: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx>
Cc: Dan Williams <dan.j.williams@xxxxxxxxx>
Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
mm/memory_hotplug.c | 42 ++++++++++++++++++------------------------
1 file changed, 18 insertions(+), 24 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2abd938c8c45..87f85597a19e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -632,37 +632,31 @@ static void generic_online_page(struct page *page, unsigned int order)
#endif
}

-static int online_pages_blocks(unsigned long start, unsigned long nr_pages)
+static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg)
{
- unsigned long end = start + nr_pages;
- int order, onlined_pages = 0;
+ const unsigned long end_pfn = start_pfn + nr_pages;
+ unsigned long pfn;
+ int order;

- while (start < end) {
- order = min(MAX_ORDER - 1,
- get_order(PFN_PHYS(end) - PFN_PHYS(start)));
+ /*
+ * Online the pages. The callback might decide to keep some pages
+ * PG_reserved (to add them to the buddy later), but we still account
+ * them as being online/belonging to this zone ("present").
+ */
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1ul << order) {
+ order = min(MAX_ORDER - 1, get_order(PFN_PHYS(end_pfn - pfn)));
/* make sure the PFN is aligned and we don't exceed the range */
- while (!IS_ALIGNED(start, 1ul << order) ||
- (1ul << order) > end - start)
+ while (!IS_ALIGNED(start_pfn, 1ul << order) ||
+ (1ul << order) > end_pfn - pfn)
order--;
- (*online_page_callback)(pfn_to_page(start), order);
-
- onlined_pages += (1UL << order);
- start += (1UL << order);
+ (*online_page_callback)(pfn_to_page(pfn), order);
}
- return onlined_pages;
-}
-
-static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
- void *arg)
-{
- unsigned long onlined_pages = *(unsigned long *)arg;
-
- if (PageReserved(pfn_to_page(start_pfn)))
- onlined_pages += online_pages_blocks(start_pfn, nr_pages);

- online_mem_sections(start_pfn, start_pfn + nr_pages);
+ /* mark all involved sections as online */
+ online_mem_sections(start_pfn, end_pfn);

- *(unsigned long *)arg = onlined_pages;
+ *(unsigned long *)arg += nr_pages;
return 0;
}

--
2.21.0