[PATCH v3] mm/vmalloc: fix corrupted list of vbq->free

From: hailong.liu
Date: Sat Jun 01 2024 - 13:14:07 EST


From: "Hailong.Liu" <hailong.liu@xxxxxxxx>

The function xa_for_each() in _vm_unmap_aliases() loops through all
vbs. However, since commit 062eacf57ad9 ("mm: vmalloc: remove a global
vmap_blocks xarray") the vb from xarray may not be on the corresponding
CPU vmap_block_queue. Consequently, purge_fragmented_block() might
use the wrong vbq->lock to protect the free list, leading to vbq->free
breakage.

Incorrect lock protection can exhaust all vmalloc space as follows:
CPU0 CPU1
+--------------------------------------------+
| +--------------------+ +-----+ |
+--> | |---->| |------+
| CPU1:vbq free_list | | vb1 |
+--- | |<----| |<-----+
| +--------------------+ +-----+ |
+--------------------------------------------+

_vm_unmap_aliases() vb_alloc()
new_vmap_block()
xa_for_each(&vbq->vmap_blocks, idx, vb)
--> vb in CPU1:vbq->freelist

purge_fragmented_block(vb)
spin_lock(&vbq->lock) spin_lock(&vbq->lock)
--> use CPU0:vbq->lock --> use CPU1:vbq->lock

list_del_rcu(&vb->free_list) list_add_tail_rcu(&vb->free_list, &vbq->free)
__list_del(vb->prev, vb->next)
next->prev = prev
+--------------------+
| |
| CPU1:vbq free_list |
+---| |<--+
| +--------------------+ |
+----------------------------+
__list_add(new, head->prev, head)
+--------------------------------------------+
| +--------------------+ +-----+ |
+--> | |---->| |------+
| CPU1:vbq free_list | | vb2 |
+--- | |<----| |<-----+
| +--------------------+ +-----+ |
+--------------------------------------------+

prev->next = next
+--------------------------------------------+
|----------------------------+ |
| +--------------------+ | +-----+ |
+--> | |--+ | |------+
| CPU1:vbq free_list | | vb2 |
+--- | |<----| |<-----+
| +--------------------+ +-----+ |
+--------------------------------------------+
Here’s a list breakdown. All vbs, which were to be added to
‘prev’, cannot be used by list_for_each_entry_rcu(vb, &vbq->free,
free_list) in vb_alloc(). Thus, vmalloc space is exhausted.

This issue affects both erofs and f2fs, the stacktrace is as follows:
erofs:
[<ffffffd4ffb93ad4>] __switch_to+0x174
[<ffffffd4ffb942f0>] __schedule+0x624
[<ffffffd4ffb946f4>] schedule+0x7c
[<ffffffd4ffb947cc>] schedule_preempt_disabled+0x24
[<ffffffd4ffb962ec>] __mutex_lock+0x374
[<ffffffd4ffb95998>] __mutex_lock_slowpath+0x14
[<ffffffd4ffb95954>] mutex_lock+0x24
[<ffffffd4fef2900c>] reclaim_and_purge_vmap_areas+0x44
[<ffffffd4fef25908>] alloc_vmap_area+0x2e0
[<ffffffd4fef24ea0>] vm_map_ram+0x1b0
[<ffffffd4ff1b46f4>] z_erofs_lz4_decompress+0x278
[<ffffffd4ff1b8ac4>] z_erofs_decompress_queue+0x650
[<ffffffd4ff1b8328>] z_erofs_runqueue+0x7f4
[<ffffffd4ff1b66a8>] z_erofs_read_folio+0x104
[<ffffffd4feeb6fec>] filemap_read_folio+0x6c
[<ffffffd4feeb68c4>] filemap_fault+0x300
[<ffffffd4fef0ecac>] __do_fault+0xc8
[<ffffffd4fef0c908>] handle_mm_fault+0xb38
[<ffffffd4ffb9f008>] do_page_fault+0x288
[<ffffffd4ffb9ed64>] do_translation_fault[jt]+0x40
[<ffffffd4fec39c78>] do_mem_abort+0x58
[<ffffffd4ffb8c3e4>] el0_ia+0x70
[<ffffffd4ffb8c260>] el0t_64_sync_handler[jt]+0xb0
[<ffffffd4fec11588>] ret_to_user[jt]+0x0

f2fs:
[<ffffffd4ffb93ad4>] __switch_to+0x174
[<ffffffd4ffb942f0>] __schedule+0x624
[<ffffffd4ffb946f4>] schedule+0x7c
[<ffffffd4ffb947cc>] schedule_preempt_disabled+0x24
[<ffffffd4ffb962ec>] __mutex_lock+0x374
[<ffffffd4ffb95998>] __mutex_lock_slowpath+0x14
[<ffffffd4ffb95954>] mutex_lock+0x24
[<ffffffd4fef2900c>] reclaim_and_purge_vmap_areas+0x44
[<ffffffd4fef25908>] alloc_vmap_area+0x2e0
[<ffffffd4fef24ea0>] vm_map_ram+0x1b0
[<ffffffd4ff1a3b60>] f2fs_prepare_decomp_mem+0x144
[<ffffffd4ff1a6c24>] f2fs_alloc_dic+0x264
[<ffffffd4ff175468>] f2fs_read_multi_pages+0x428
[<ffffffd4ff17b46c>] f2fs_mpage_readpages+0x314
[<ffffffd4ff1785c4>] f2fs_readahead+0x50
[<ffffffd4feec3384>] read_pages+0x80
[<ffffffd4feec32c0>] page_cache_ra_unbounded+0x1a0
[<ffffffd4feec39e8>] page_cache_ra_order+0x274
[<ffffffd4feeb6cec>] do_sync_mmap_readahead+0x11c
[<ffffffd4feeb6764>] filemap_fault+0x1a0
[<ffffffd4ff1423bc>] f2fs_filemap_fault+0x28
[<ffffffd4fef0ecac>] __do_fault+0xc8
[<ffffffd4fef0c908>] handle_mm_fault+0xb38
[<ffffffd4ffb9f008>] do_page_fault+0x288
[<ffffffd4ffb9ed64>] do_translation_fault[jt]+0x40
[<ffffffd4fec39c78>] do_mem_abort+0x58
[<ffffffd4ffb8c3e4>] el0_ia+0x70
[<ffffffd4ffb8c260>] el0t_64_sync_handler[jt]+0xb0
[<ffffffd4fec11588>] ret_to_user[jt]+0x0

To fix this, introduce vbq_lock in vmap_block.

Cc: <stable@xxxxxxxxxxxxxxx>
Fixes: fc1e0d980037 ("mm/vmalloc: prevent stale TLBs in fully utilized blocks")
Suggested-by: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx>
Signed-off-by: Hailong.Liu <hailong.liu@xxxxxxxx>
---
mm/vmalloc.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 125427cbdb87..f4cfdf3fd925 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2467,6 +2467,7 @@ struct vmap_block_queue {

struct vmap_block {
spinlock_t lock;
+ spinlock_t *vbq_lock;
struct vmap_area *va;
unsigned long free, dirty;
DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
@@ -2603,6 +2604,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
}

vbq = raw_cpu_ptr(&vmap_block_queue);
+ vb->vbq_lock = &vbq->lock;
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@@ -2630,7 +2632,7 @@ static void free_vmap_block(struct vmap_block *vb)
}

static bool purge_fragmented_block(struct vmap_block *vb,
- struct vmap_block_queue *vbq, struct list_head *purge_list,
+ struct list_head *purge_list,
bool force_purge)
{
if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
@@ -2647,9 +2649,9 @@ static bool purge_fragmented_block(struct vmap_block *vb,
WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
vb->dirty_min = 0;
vb->dirty_max = VMAP_BBMAP_BITS;
- spin_lock(&vbq->lock);
+ spin_lock(vb->vbq_lock);
list_del_rcu(&vb->free_list);
- spin_unlock(&vbq->lock);
+ spin_unlock(vb->vbq_lock);
list_add_tail(&vb->purge, purge_list);
return true;
}
@@ -2680,7 +2682,7 @@ static void purge_fragmented_blocks(int cpu)
continue;

spin_lock(&vb->lock);
- purge_fragmented_block(vb, vbq, &purge, true);
+ purge_fragmented_block(vb, &purge, true);
spin_unlock(&vb->lock);
}
rcu_read_unlock();
@@ -2817,7 +2819,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
* not purgeable, check whether there is dirty
* space to be flushed.
*/
- if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
+ if (!purge_fragmented_block(vb, &purge_list, false) &&
vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
unsigned long va_start = vb->va->va_start;
unsigned long s, e;
---
Changes since v2 [2]:
- simply revert the patch has other effects, per Zhaoyang

Changes since v1 [1]:
- add runtime effect in commit msg, per Andrew.

BTW,

1. use xa_for_each to iterate all vb, we need a mapping from vb to vbq.
Zhaoyang use cpuid to get vbq as follows:
https://lore.kernel.org/all/20240531030520.1615833-1-zhaoyang.huang@xxxxxxxxxx/
IMO, why not just store the address of the lock, which might waste a few more
bytes but would look clearer.

Baoquan and Hillf save directly to the queue correspoding to va,
- vbq = raw_cpu_ptr(&vmap_block_queue);
+ vbq = container_of(xa, struct vmap_block_queue, vmap_blocks);
spin_lock(&vbq->lock);
,

/*
* We should probably have a fallback mechanism to allocate virtual memory
* out of partially filled vmap blocks. However vmap block sizing should be
@@ -2626,7 +2634,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
return ERR_PTR(err);
}

- vbq = raw_cpu_ptr(&vmap_block_queue);
+ vbq = addr_to_vbq(va->va_start);
If in this case we actually don't need the percpu variable at all, because
each address directly to the correspoding index through hash function.

Does anyone have a btter suggestion?

https://lore.kernel.org/all/ZlqIp9V1Jknm7axa@MiWiFi-R3L-srv/

[1] https://lore.kernel.org/all/20240530093108.4512-1-hailong.liu@xxxxxxxx/
[2] https://lore.kernel.org/all/20240531024820.5507-1-hailong.liu@xxxxxxxx/
--
2.34.1