Re: [PATCH v9 6/7] mm: zswap: Support large folios in zswap_store().

From: Yosry Ahmed
Date: Tue Oct 01 2024 - 02:01:43 EST


[..]
> > > store_failed:
> > > zpool_free(entry->pool->zpool, entry->handle);
> > > -put_pool:
> > > - zswap_pool_put(entry->pool);
> > > -freepage:
> > > +put_pool_objcg:
> > > + zswap_pool_put(pool);
> > > + obj_cgroup_put(objcg);
> >
> > I think if we reorder the function we can drop these calls, make the
> > comments positioned a bit better, and centralize the entry
> > initializations. I am also not a fan of passing a semi-initialized
> > entry to zswap_compress() to get the pool pointer.
> >
> > Does the following diff improve things or did I miss something?
>
> We shouldn’t be adding the entry to the xarray before initializing its pool
> and objcg, right? Please let me know if I am misunderstanding what you're
> proposing in the diff.

It should be safe. We already initialize entry->lru after we insert
the entry in the tree. See the comment above the call to
zswap_lru_add(). Basically we are protected against concurrent
stores/loads through the folio lock, and are protected against
writeback because the entry is not on the LRU yet.

>
> >
> > diff --git a/mm/zswap.c b/mm/zswap.c
> > index b74c8de996468..eac1f846886a6 100644
> > --- a/mm/zswap.c
> > +++ b/mm/zswap.c
> > @@ -881,7 +881,8 @@ static int zswap_cpu_comp_dead(unsigned int cpu,
> > struct hlist_node *node)
> > return 0;
> > }
> >
> > -static bool zswap_compress(struct page *page, struct zswap_entry *entry)
> > +static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> > + struct zswap_pool *pool)
> > {
> > struct crypto_acomp_ctx *acomp_ctx;
> > struct scatterlist input, output;
> > @@ -893,7 +894,7 @@ static bool zswap_compress(struct page *page,
> > struct zswap_entry *entry)
> > gfp_t gfp;
> > u8 *dst;
> >
> > - acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> > + acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
> >
> > mutex_lock(&acomp_ctx->mutex);
> >
> > @@ -926,7 +927,7 @@ static bool zswap_compress(struct page *page,
> > struct zswap_entry *entry)
> > if (comp_ret)
> > goto unlock;
> >
> > - zpool = entry->pool->zpool;
> > + zpool = pool->zpool;
> > gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
> > if (zpool_malloc_support_movable(zpool))
> > gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> > @@ -1435,23 +1436,11 @@ static bool zswap_store_page(struct page
> > *page,
> > entry = zswap_entry_cache_alloc(GFP_KERNEL,
> > folio_nid(page_folio(page)));
> > if (!entry) {
> > zswap_reject_kmemcache_fail++;
> > - goto reject;
> > + return false;
> > }
> >
> > - /* zswap_store() already holds a ref on 'objcg' and 'pool' */
> > - if (objcg)
> > - obj_cgroup_get(objcg);
> > - zswap_pool_get(pool);
> > -
> > - /* if entry is successfully added, it keeps the reference */
> > - entry->pool = pool;
> > -
> > - if (!zswap_compress(page, entry))
> > - goto put_pool_objcg;
> > -
> > - entry->swpentry = page_swap_entry(page);
> > - entry->objcg = objcg;
> > - entry->referenced = true;
> > + if (!zswap_compress(page, entry, pool))
> > + goto compress_failed;
> >
> > old = xa_store(tree, swp_offset(entry->swpentry), entry, GFP_KERNEL);
> > if (xa_is_err(old)) {
> > @@ -1470,6 +1459,16 @@ static bool zswap_store_page(struct page *page,
> > if (old)
> > zswap_entry_free(old);
> >
> > + /*
> > + * The entry is successfully compressed and stored in the tree, there is
> > + * no further possibility of failure. Grab refs to the pool and objcg.
> > + * These refs will be dropped by zswap_entry_free() when the entry is
> > + * removed from the tree.
> > + */
> > + zswap_pool_get(pool);
> > + if (objcg)
> > + obj_cgroup_get(objcg);
> > +
> > /*
> > * We finish initializing the entry while it's already in xarray.
> > * This is safe because:
> > @@ -1480,26 +1479,22 @@ static bool zswap_store_page(struct page
> > *page,
> > * The publishing order matters to prevent writeback from seeing
> > * an incoherent entry.
> > */

I am referring to the comment here ^

> > + entry->pool = pool;
> > + entry->swpentry = page_swap_entry(page);
> > + entry->objcg = objcg;
> > + entry->referenced = true;
> > if (entry->length) {
> > *compressed_bytes += entry->length;
> > INIT_LIST_HEAD(&entry->lru);
> > zswap_lru_add(&zswap_list_lru, entry);
> > }