[PATCH v2] mempolicy: alloc_pages_mpol() for NUMA policy without vma: fix
From: Hugh Dickins
Date: Tue Oct 24 2023 - 12:09:59 EST
mm-unstable commit 48a7bd12d57f ("mempolicy: alloc_pages_mpol() for NUMA
policy without vma") ended read_swap_cache_async() supporting NULL vma -
okay; but missed the NULL mpol being passed to __read_swap_cache_async()
by zswap_writeback_entry() - oops!
Since its other callers all give good mpol, add get_task_policy(current)
there in mm/zswap.c, to produce the same good-enough behaviour as before
(and task policy, acted on in current task, does not require the refcount
to be dup'ed).
But if that policy is (quite reasonably) MPOL_INTERLEAVE, then ilx must
be NO_INTERLEAVE_INDEX rather than 0, to provide the same distribution
as before: move that definition from mempolicy.c to mempolicy.h.
Reported-by: Domenico Cerasuolo <mimmocerasuolo@xxxxxxxxx>
Closes: https://lore.kernel.org/linux-mm/74e34633-6060-f5e3-aee-7040d43f2e93@xxxxxxxxxx/T/#mf08c877d1884fc7867f9e328cdf02257ff3b3ae9
Suggested-by: Johannes Weiner <hannes@xxxxxxxxxxx>
Fixes: 48a7bd12d57f ("mempolicy: alloc_pages_mpol() for NUMA policy without vma")
Signed-off-by: Hugh Dickins <hughd@xxxxxxxxxx>
---
v2: !CONFIG_NUMA builds with a get_task_policy() added in mempolicy.h
include/linux/mempolicy.h | 7 +++++++
mm/mempolicy.c | 2 --
mm/zswap.c | 7 +++++--
3 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 2801d5b0a4e9..931b118336f4 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -17,6 +17,8 @@
struct mm_struct;
+#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */
+
#ifdef CONFIG_NUMA
/*
@@ -179,6 +181,11 @@ extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
struct mempolicy {};
+static inline struct mempolicy *get_task_policy(struct task_struct *p)
+{
+ return NULL;
+}
+
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return true;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 898ee2e3c85b..989293180eb6 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -114,8 +114,6 @@
#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
#define MPOL_MF_WRLOCK (MPOL_MF_INTERNAL << 2) /* Write-lock walked vmas */
-#define NO_INTERLEAVE_INDEX (-1UL)
-
static struct kmem_cache *policy_cache;
static struct kmem_cache *sn_cache;
diff --git a/mm/zswap.c b/mm/zswap.c
index 37d2b1cb2ecb..060857adca76 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -24,6 +24,7 @@
#include <linux/swap.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include <linux/mempolicy.h>
#include <linux/mempool.h>
#include <linux/zpool.h>
#include <crypto/acompress.h>
@@ -1057,6 +1058,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
{
swp_entry_t swpentry = entry->swpentry;
struct page *page;
+ struct mempolicy *mpol;
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
struct zpool *pool = zswap_find_zpool(entry);
@@ -1075,8 +1077,9 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
}
/* try to allocate swap cache page */
- page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0,
- &page_was_allocated);
+ mpol = get_task_policy(current);
+ page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
+ NO_INTERLEAVE_INDEX, &page_was_allocated);
if (!page) {
ret = -ENOMEM;
goto fail;
--
2.35.3