[PATCH 2/3] x86, memhotplug: reserve memory from bootmem allocatorfor memory hotplug
From: Jiang Liu
Date: Sun Dec 02 2012 - 08:26:21 EST
There's no mechanism to migrate pages allocated from bootmem allocator,
thus a memory device may become irremovable if bootmem allocates any
pages from it.
This patch introduces a mechanism to
1) reserve memory from bootmem allocator for hotplug early 'enough'
during boot.
2) free reserve memory into buddy system at late when memory hogplug
infrastructure has been initialized.
Signed-off-by: Jiang Liu <jiang.liu@xxxxxxxxxx>
---
arch/x86/kernel/setup.c | 11 ++++++++
arch/x86/mm/init.c | 56 ++++++++++++++++++++++++++++++++++++++++
arch/x86/mm/init_32.c | 2 ++
arch/x86/mm/init_64.c | 2 ++
include/linux/memblock.h | 1 +
include/linux/memory_hotplug.h | 5 ++++
mm/Kconfig | 1 +
7 files changed, 78 insertions(+)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ca45696..93f6f10 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -940,6 +940,17 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = max_pfn;
}
#endif
+
+ /*
+ * Try to reserve memory from bootmem allocator for memory hotplug
+ * before updating memblock.current_limit to cover all low memory.
+ * Until now memblock.current_limit is still set to the initial value
+ * of max_pfn_mapped, which is 512M on x86_64 and xxx on i386. And
+ * memblock allocates available memory in reverse order, so we almost
+ * have no chance to reserve memory below 512M for memory hotplug.
+ */
+ reserve_memory_for_hotplug();
+
memblock.current_limit = get_max_mapped();
dma_contiguous_reserve(0);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d7aea41..36bb5c2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -424,3 +424,59 @@ void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns);
}
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static int __init reserve_bootmem_for_hotplug(phys_addr_t base,
+ phys_addr_t size)
+{
+ if (memblock_is_region_reserved(base, size) ||
+ memblock_reserve(base, size) < 0)
+ return -EBUSY;
+
+ BUG_ON(memblock_mark_tag(base, size, MEMBLOCK_TAG_HOTPLUG, NULL));
+
+ return 0;
+}
+
+/*
+ * Try to reserve low memory for hotplug according to user configured
+ * movablecore_map. Movable zone hasn't been determined yet, so can't rely
+ * on zone_movable_is_highmem() but to reserve all low memory configured by
+ * movablecore_map parameter.
+ * Assume entries in movablecore_map.map are sorted in increasing order.
+ */
+static int __init reserve_hotplug_memory_from_movable_map(void)
+{
+ int i;
+ phys_addr_t start, end;
+ struct movablecore_entry *ep;
+
+ if (movablecore_map.nr_map == 0)
+ return 0;
+
+ for (i = 0; i < movablecore_map.nr_map; i++) {
+ ep = &movablecore_map.map[i];
+ start = ep->start << PAGE_SHIFT;
+ end = (min(ep->end, max_low_pfn) + 1) << PAGE_SHIFT;
+ if (end <= start)
+ break;
+
+ if (reserve_bootmem_for_hotplug(start, end - start))
+ pr_warn("mm: failed to reserve lowmem [%#016llx-%#016llx] for hotplug.",
+ (unsigned long long)start,
+ (unsigned long long)end - 1);
+ }
+
+ return 1;
+}
+
+void __init reserve_memory_for_hotplug(void)
+{
+ if (reserve_hotplug_memory_from_movable_map())
+ return;
+}
+
+void __init free_memory_reserved_for_hotplug(void)
+{
+ memblock_free_all_with_tag(MEMBLOCK_TAG_HOTPLUG);
+}
+#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 11a5800..815700a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -745,6 +745,8 @@ void __init mem_init(void)
*/
set_highmem_pages_init();
+ free_memory_reserved_for_hotplug();
+
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3baff25..1a92fd6 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -695,6 +695,8 @@ void __init mem_init(void)
reservedpages = 0;
+ free_memory_reserved_for_hotplug();
+
/* this will put all low memory onto the freelists */
#ifdef CONFIG_NUMA
totalram_pages = numa_free_all_bootmem();
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 40dea53..5420ed9 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -124,6 +124,7 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
#ifdef CONFIG_HAVE_MEMBLOCK_TAG
#define MEMBLOCK_TAG_DEFAULT 0x0 /* default tag for bootmem allocatror */
+#define MEMBLOCK_TAG_HOTPLUG 0x1 /* reserved for memory hotplug */
int memblock_mark_tag(phys_addr_t base, phys_addr_t size, int tag, void *data);
void memblock_free_all_with_tag(int tag);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 95573ec..edf183d 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -222,6 +222,8 @@ static inline void unlock_memory_hotplug(void) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
+extern void reserve_memory_for_hotplug(void);
+extern void free_memory_reserved_for_hotplug(void);
#else
static inline int is_mem_section_removable(unsigned long pfn,
@@ -229,6 +231,9 @@ static inline int is_mem_section_removable(unsigned long pfn,
{
return 0;
}
+
+static inline void reserve_memory_for_hotplug(void) {}
+static inline void free_memory_reserved_for_hotplug(void) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern int mem_online_node(int nid);
diff --git a/mm/Kconfig b/mm/Kconfig
index 5080390..9d69e5d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -160,6 +160,7 @@ config MEMORY_HOTPLUG_SPARSE
config MEMORY_HOTREMOVE
bool "Allow for memory hot remove"
+ select HAVE_MEMBLOCK_TAG
depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
depends on MIGRATION
--
1.7.9.5
--------------060103020608020905070302
Content-Type: text/x-patch;
name="0003-CMA-use-new-memblock-interfaces-to-simplify-implemen.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
filename*0="0003-CMA-use-new-memblock-interfaces-to-simplify-implemen.pa";
filename*1="tch"