[PATCH 1/2] mm: zswap: increase shrinking protection for zswap swapins only

From: Yosry Ahmed
Date: Tue Mar 19 2024 - 22:09:01 EST


Currently, the number of protected zswap entries corresponding to an
lruvec are incremented every time we swapin a page. This happens
regardless of whether or not the page originated in zswap. Hence,
swapins from disk will lead to increasing protection on potentially
stale zswap entries. Furthermore, the increased shrinking protection can
lead to more pages skipping zswap and going to disk, eventually leading
to even more swapins from disk and starting a vicious circle.

Instead, only increase the protection when pages are loaded from zswap.
This also has a nice side effect of removing zswap_folio_swapin() and
replacing it with a static helper that is only called from zswap_load().

No problems were observed in practice, this was found through code
inspection.

Signed-off-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx>
---
include/linux/zswap.h | 2 --
mm/swap_state.c | 8 ++------
mm/zswap.c | 10 +++-------
3 files changed, 5 insertions(+), 15 deletions(-)

diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 2a85b941db975..1f020b5427e3d 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -34,7 +34,6 @@ int zswap_swapon(int type, unsigned long nr_pages);
void zswap_swapoff(int type);
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
void zswap_lruvec_state_init(struct lruvec *lruvec);
-void zswap_folio_swapin(struct folio *folio);
bool is_zswap_enabled(void);
#else

@@ -58,7 +57,6 @@ static inline int zswap_swapon(int type, unsigned long nr_pages)
static inline void zswap_swapoff(int type) {}
static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
-static inline void zswap_folio_swapin(struct folio *folio) {}

static inline bool is_zswap_enabled(void)
{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index bfc7e8c58a6d3..32e151054ec47 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -696,10 +696,8 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
/* The page was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
- if (unlikely(page_allocated)) {
- zswap_folio_swapin(folio);
+ if (unlikely(page_allocated))
swap_read_folio(folio, false, NULL);
- }
return folio;
}

@@ -872,10 +870,8 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
/* The folio was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
- if (unlikely(page_allocated)) {
- zswap_folio_swapin(folio);
+ if (unlikely(page_allocated))
swap_read_folio(folio, false, NULL);
- }
return folio;
}

diff --git a/mm/zswap.c b/mm/zswap.c
index b31c977f53e9c..323f1dea43d22 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -773,14 +773,9 @@ void zswap_lruvec_state_init(struct lruvec *lruvec)
atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
}

-void zswap_folio_swapin(struct folio *folio)
+static void zswap_lruvec_inc_protected(struct lruvec *lruvec)
{
- struct lruvec *lruvec;
-
- if (folio) {
- lruvec = folio_lruvec(folio);
- atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
- }
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
}

void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
@@ -1644,6 +1639,7 @@ bool zswap_load(struct folio *folio)
zswap_entry_free(entry);

folio_mark_dirty(folio);
+ zswap_lruvec_inc_protected(folio_lruvec(folio));

return true;
}
--
2.44.0.291.gc1ea87d7ee-goog