[PATCH 8/8] mm/zswap: Use local lock to protect per-CPU data
From: Sebastian Andrzej Siewior
Date: Tue May 19 2020 - 16:20:19 EST
From: "Luis Claudio R. Goncalves" <lgoncalv@xxxxxxxxxx>
zwap uses per-CPU compression. The per-CPU data pointer is acquired with
get_cpu_ptr() which implicitly disables preemption. It allocates
memory inside the preempt disabled region which conflicts with the
PREEMPT_RT semantics.
Replace the implicit preemption control with an explicit local lock.
This allows RT kernels to substitute it with a real per CPU lock, which
serializes the access but keeps the code section preemptible. On non RT
kernels this maps to preempt_disable() as before, i.e. no functional
change.
[bigeasy: Use local_lock(), additional hunks, patch description]
Cc: Seth Jennings <sjenning@xxxxxxxxxx>
Cc: Dan Streetman <ddstreet@xxxxxxxx>
Cc: Vitaly Wool <vitaly.wool@xxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: linux-mm@xxxxxxxxx
Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@xxxxxxxxxx>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
---
mm/zswap.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index fbb782924ccc5..1db2ad941e501 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/locallock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/frontswap.h>
@@ -388,6 +389,8 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
* per-cpu code
**********************************/
static DEFINE_PER_CPU(u8 *, zswap_dstmem);
+/* Used for zswap_dstmem and tfm */
+static DEFINE_LOCAL_LOCK(zswap_cpu_lock);
static int zswap_dstmem_prepare(unsigned int cpu)
{
@@ -919,10 +922,11 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
dlen = PAGE_SIZE;
src = (u8 *)zhdr + sizeof(struct zswap_header);
dst = kmap_atomic(page);
- tfm = *get_cpu_ptr(entry->pool->tfm);
+ local_lock(zswap_cpu_lock);
+ tfm = *this_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length,
dst, &dlen);
- put_cpu_ptr(entry->pool->tfm);
+ local_unlock(zswap_cpu_lock);
kunmap_atomic(dst);
BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE);
@@ -1074,12 +1078,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
}
/* compress */
- dst = get_cpu_var(zswap_dstmem);
- tfm = *get_cpu_ptr(entry->pool->tfm);
+ local_lock(zswap_cpu_lock);
+ dst = *this_cpu_ptr(&zswap_dstmem);
+ tfm = *this_cpu_ptr(entry->pool->tfm);
src = kmap_atomic(page);
ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
kunmap_atomic(src);
- put_cpu_ptr(entry->pool->tfm);
if (ret) {
ret = -EINVAL;
goto put_dstmem;
@@ -1103,7 +1107,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
memcpy(buf, &zhdr, hlen);
memcpy(buf + hlen, dst, dlen);
zpool_unmap_handle(entry->pool->zpool, handle);
- put_cpu_var(zswap_dstmem);
+ local_unlock(zswap_cpu_lock);
/* populate entry */
entry->offset = offset;
@@ -1131,7 +1135,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
return 0;
put_dstmem:
- put_cpu_var(zswap_dstmem);
+ local_unlock(zswap_cpu_lock);
zswap_pool_put(entry->pool);
freepage:
zswap_entry_cache_free(entry);
@@ -1176,9 +1180,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
if (zpool_evictable(entry->pool->zpool))
src += sizeof(struct zswap_header);
dst = kmap_atomic(page);
- tfm = *get_cpu_ptr(entry->pool->tfm);
+ local_lock(zswap_cpu_lock);
+ tfm = *this_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
- put_cpu_ptr(entry->pool->tfm);
+ local_unlock(zswap_cpu_lock);
kunmap_atomic(dst);
zpool_unmap_handle(entry->pool->zpool, entry->handle);
BUG_ON(ret);
--
2.26.2