From: Hui Zhu <teawater@xxxxxxxxxx>
We did some virtio-mem resize tests in high memory pressure environment.
Memory increases slowly and sometimes fails in these tests.
This is a way to reproduce the issue.
Start a qemu with a small size of memory (132Mb) and resize the
virtio-mem to hotplug memory.
Then will get following error:
[ 8.097461] virtio_mem virtio0: requested size: 0x10000000
[ 8.098038] virtio_mem virtio0: plugging memory: 0x100000000 -
0x107ffffff
[ 8.098829] virtio_mem virtio0: adding memory: 0x100000000 -
0x107ffffff
[ 8.106298] kworker/0:1: vmemmap alloc failure: order:9,
mode:0x4cc0(GFP_KERNEL|__GFP_RETRY_MAYFAIL),
nodemask=(null),cpuset=/,mems_allowed=0
[ 8.107609] CPU: 0 PID: 14 Comm: kworker/0:1 Not tainted 5.13.0-rc7+
[ 8.108295] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 8.109476] Workqueue: events_freezable virtio_mem_run_wq
[ 8.110039] Call Trace:
[ 8.110305] dump_stack+0x76/0x94
[ 8.110654] warn_alloc.cold+0x7b/0xdf
[ 8.111054] ? __alloc_pages+0x2c2/0x310
[ 8.111462] vmemmap_alloc_block+0x86/0xdc
[ 8.111891] vmemmap_populate+0xfc/0x325
[ 8.112309] __populate_section_memmap+0x38/0x4e
[ 8.112787] sparse_add_section+0x167/0x244
[ 8.113226] __add_pages+0xa6/0x130
[ 8.113592] add_pages+0x12/0x60
[ 8.113934] add_memory_resource+0x114/0x2d0
[ 8.114377] add_memory_driver_managed+0x7c/0xc0
[ 8.114852] virtio_mem_add_memory+0x57/0xe0
[ 8.115304] virtio_mem_sbm_plug_and_add_mb+0x9a/0x130
[ 8.115833] virtio_mem_run_wq+0x9d5/0x1100
I think allocating 2 Mb contiguous memory will be slow and failed
in some cases, especially in high memory pressure environment.
This commit try to add support of memory_hotplug.memmap_on_memory to
handle this issue.
Just let SBM mode support it because memory_hotplug.memmap_on_memory
need a single memory block.
Add nr_vmemmap_pages and sbs_vmemmap to struct sbm.
If memory_hotplug.memmap_on_memory is open, pages number of a memory
block's internal metadata will be store in nr_vmemmap_pages.
sbs_vmemmap is the number of vmemmap subblocks per Linux memory block.
The pages in the vmemmap subblocks should bigger than nr_vmemmap_pages
because sb_size need to span at least MAX_ORDER_NR_PAGES and
pageblock_nr_pages pages (virtio_mem_init).
All the pages in vmemmap subblocks is not going to add to the buddy
even if the pages that are not used to store the internal metadata
(struct pages) because they should not work reliably with
alloc_contig_range().
When resize virtio-mem, sbs_vmemmap is going to count in
virtio_mem_sbm_plug_and_add_mb, virtio_mem_sbm_unplug_any_sb_offline
and virtio_mem_sbm_unplug_any_sb_online.
Because internal metadata also need the real pages in the host to store
it. I think resize virtio-mem size same with the actual memory
footprint
on the host is better if we want setup a memory cgroup for QEMU.
@@ -1534,12 +1615,15 @@ static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
unsigned long mb_id, uint64_t *nb_sb)
{
- const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
+ int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
int rc;
if (WARN_ON_ONCE(!count))
return -EINVAL;
+ if (vm->sbm.sbs_vmemmap)
+ count = max_t(int, count, vm->sbm.sbs_vmemmap);
+
/*
* Remove the block from Linux - this should never fail.
* Hinder the block from getting onlined by marking it
@@ -1840,6 +1934,23 @@ static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
rc = virtio_mem_sbm_remove_mb(vm, mb_id);
BUG_ON(rc);
mutex_lock(&vm->hotplug_mutex);
+
+ /* Remove vmemmap pages. */
+ if (vm->sbm.sbs_vmemmap) {
+ rc = virtio_mem_sbm_unplug_sb(vm, mb_id, 0,
+ vm->sbm.sbs_vmemmap);
+ /*
+ * Just warn because this error will
+ * not affect next plug.
+ */
+ WARN_ON(rc);
- if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
+ if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, vm->sbm.sbs_vmemmap,
+ vm->sbm.sbs_per_mb - vm->sbm.sbs_vmemmap)) {
mutex_unlock(&vm->hotplug_mutex);
rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
mutex_lock(&vm->hotplug_mutex);
- if (!rc)
+ if (!rc) {
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_UNUSED);
+ /* Remove vmemmap pages. */
+ if (vm->sbm.sbs_vmemmap) {
+ rc = virtio_mem_sbm_unplug_sb(vm, mb_id, 0,
+ vm->sbm.sbs_vmemmap);
+ /*
+ * Just warn because this error will
+ * not affect next plug.
+ */
+ WARN_ON(rc);
+ if (!rc) {
+ if (*nb_sb >= vm->sbm.sbs_vmemmap)
+ *nb_sb -= vm->sbm.sbs_vmemmap;
+ else
+ *nb_sb = 0;
+ }
+ }
+ }
}
return 0;
@@ -2444,6 +2578,15 @@ static int virtio_mem_init(struct virtio_mem *vm)
memory_block_size_bytes() - 1;
vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
vm->sbm.next_mb_id = vm->sbm.first_mb_id;
+ if (mhp_supports_memmap_on_memory(memory_block_size_bytes())) {
+ vm->sbm.nr_vmemmap_pages
+ = PFN_DOWN(PFN_DOWN(memory_block_size_bytes()) *
+ sizeof(struct page));