Re: [PATCH v2 2/6] mm/zswap: reuse dstmem when decompress

From: Yosry Ahmed
Date: Mon Dec 18 2023 - 04:25:13 EST


On Mon, Dec 18, 2023 at 12:22 AM Chengming Zhou
<zhouchengming@xxxxxxxxxxxxx> wrote:
>
> In the !zpool_can_sleep_mapped() case such as zsmalloc, we need to first
> copy the entry->handle memory to a temporary memory, which is allocated
> using kmalloc.
>
> Obviously we can reuse the per-compressor dstmem to avoid allocating
> every time, since it's percpu-compressor and protected in percpu mutex.
>
> Reviewed-by: Nhat Pham <nphamcs@xxxxxxxxx>
> Acked-by: Chris Li <chrisl@xxxxxxxxxx>
> Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx>

Reviewed-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx>

> ---
> mm/zswap.c | 44 ++++++++++++--------------------------------
> 1 file changed, 12 insertions(+), 32 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 976f278aa507..6b872744e962 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1417,19 +1417,13 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> struct crypto_acomp_ctx *acomp_ctx;
> struct zpool *pool = zswap_find_zpool(entry);
> bool page_was_allocated;
> - u8 *src, *tmp = NULL;
> + u8 *src;
> unsigned int dlen;
> int ret;
> struct writeback_control wbc = {
> .sync_mode = WB_SYNC_NONE,
> };
>
> - if (!zpool_can_sleep_mapped(pool)) {
> - tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
> - if (!tmp)
> - return -ENOMEM;
> - }
> -
> /* try to allocate swap cache page */
> mpol = get_task_policy(current);
> page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
> @@ -1465,15 +1459,15 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> /* decompress */
> acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> dlen = PAGE_SIZE;
> + mutex_lock(acomp_ctx->mutex);
>
> src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
> if (!zpool_can_sleep_mapped(pool)) {
> - memcpy(tmp, src, entry->length);
> - src = tmp;
> + memcpy(acomp_ctx->dstmem, src, entry->length);
> + src = acomp_ctx->dstmem;
> zpool_unmap_handle(pool, entry->handle);
> }
>
> - mutex_lock(acomp_ctx->mutex);
> sg_init_one(&input, src, entry->length);
> sg_init_table(&output, 1);
> sg_set_page(&output, page, PAGE_SIZE, 0);
> @@ -1482,9 +1476,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> dlen = acomp_ctx->req->dlen;
> mutex_unlock(acomp_ctx->mutex);
>
> - if (!zpool_can_sleep_mapped(pool))
> - kfree(tmp);
> - else
> + if (zpool_can_sleep_mapped(pool))
> zpool_unmap_handle(pool, entry->handle);
>
> BUG_ON(ret);
> @@ -1508,9 +1500,6 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> return ret;
>
> fail:
> - if (!zpool_can_sleep_mapped(pool))
> - kfree(tmp);
> -
> /*
> * If we get here because the page is already in swapcache, a
> * load may be happening concurrently. It is safe and okay to
> @@ -1771,7 +1760,7 @@ bool zswap_load(struct folio *folio)
> struct zswap_entry *entry;
> struct scatterlist input, output;
> struct crypto_acomp_ctx *acomp_ctx;
> - u8 *src, *dst, *tmp;
> + u8 *src, *dst;
> struct zpool *zpool;
> unsigned int dlen;
> bool ret;
> @@ -1796,26 +1785,19 @@ bool zswap_load(struct folio *folio)
> }
>
> zpool = zswap_find_zpool(entry);
> - if (!zpool_can_sleep_mapped(zpool)) {
> - tmp = kmalloc(entry->length, GFP_KERNEL);
> - if (!tmp) {
> - ret = false;
> - goto freeentry;
> - }
> - }
>
> /* decompress */
> dlen = PAGE_SIZE;
> - src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
> + acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> + mutex_lock(acomp_ctx->mutex);
>
> + src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
> if (!zpool_can_sleep_mapped(zpool)) {
> - memcpy(tmp, src, entry->length);
> - src = tmp;
> + memcpy(acomp_ctx->dstmem, src, entry->length);
> + src = acomp_ctx->dstmem;
> zpool_unmap_handle(zpool, entry->handle);
> }
>
> - acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> - mutex_lock(acomp_ctx->mutex);
> sg_init_one(&input, src, entry->length);
> sg_init_table(&output, 1);
> sg_set_page(&output, page, PAGE_SIZE, 0);
> @@ -1826,15 +1808,13 @@ bool zswap_load(struct folio *folio)
>
> if (zpool_can_sleep_mapped(zpool))
> zpool_unmap_handle(zpool, entry->handle);
> - else
> - kfree(tmp);
>
> ret = true;
> stats:
> count_vm_event(ZSWPIN);
> if (entry->objcg)
> count_objcg_event(entry->objcg, ZSWPIN);
> -freeentry:
> +
> spin_lock(&tree->lock);
> if (ret && zswap_exclusive_loads_enabled) {
> zswap_invalidate_entry(tree, entry);
>
> --
> b4 0.10.1