[PATCHv3 3/8] zram: Factor out WB and non-WB zram read functions
From: Sergey Senozhatsky
Date: Sun Oct 09 2022 - 05:07:52 EST
We will use non-WB variant in ZRAM page recompression path.
Signed-off-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx>
---
drivers/block/zram/zram_drv.c | 73 ++++++++++++++++++++++++-----------
1 file changed, 50 insertions(+), 23 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a8ef3c0c3dae..94c62d7ea818 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1314,8 +1314,30 @@ static void zram_free_page(struct zram *zram, size_t index)
~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
-static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
- struct bio *bio, bool partial_io)
+/*
+ * Reads a page from the writeback devices. Corresponding ZRAM slot
+ * should be unlocked.
+ */
+static int zram_read_from_writeback(struct zram *zram, struct page *page,
+ u32 index, struct bio *bio,
+ bool partial_io)
+{
+ struct bio_vec bvec;
+
+ bvec.bv_page = page;
+ bvec.bv_len = PAGE_SIZE;
+ bvec.bv_offset = 0;
+ return read_from_bdev(zram, &bvec,
+ zram_get_element(zram, index),
+ bio, partial_io);
+}
+
+/*
+ * Reads (decompresses if needed) a page from zspool (zsmalloc).
+ * Corresponding ZRAM slot should be locked.
+ */
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index)
{
struct zcomp_strm *zstrm;
unsigned long handle;
@@ -1323,23 +1345,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
void *src, *dst;
int ret;
- zram_slot_lock(zram, index);
- if (zram_test_flag(zram, index, ZRAM_WB)) {
- struct bio_vec bvec;
-
- zram_slot_unlock(zram, index);
- /* A null bio means rw_page was used, we must fallback to bio */
- if (!bio)
- return -EOPNOTSUPP;
-
- bvec.bv_page = page;
- bvec.bv_len = PAGE_SIZE;
- bvec.bv_offset = 0;
- return read_from_bdev(zram, &bvec,
- zram_get_element(zram, index),
- bio, partial_io);
- }
-
handle = zram_get_handle(zram, index);
if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
unsigned long value;
@@ -1349,7 +1354,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
mem = kmap_atomic(page);
zram_fill_page(mem, PAGE_SIZE, value);
kunmap_atomic(mem);
- zram_slot_unlock(zram, index);
return 0;
}
@@ -1371,17 +1375,40 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_ZCOMP]);
}
zs_unmap_object(zram->mem_pool, handle);
- zram_slot_unlock(zram, index);
+ return ret;
+}
+
+static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
+ struct bio *bio, bool partial_io)
+{
+ int ret;
+
+ zram_slot_lock(zram, index);
+ if (!zram_test_flag(zram, index, ZRAM_WB)) {
+ /* Slot should be locked through out the function call */
+ ret = zram_read_from_zspool(zram, page, index);
+ zram_slot_unlock(zram, index);
+ } else {
+ /* Slot should be unlocked before the function call */
+ zram_slot_unlock(zram, index);
+
+ /* A null bio means rw_page was used, we must fallback to bio */
+ if (!bio)
+ return -EOPNOTSUPP;
+
+ ret = zram_read_from_writeback(zram, page, index, bio,
+ partial_io);
+ }
/* Should NEVER happen. Return bio error if it does. */
- if (WARN_ON(ret))
+ if (WARN_ON(ret < 0))
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
return ret;
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page;
--
2.38.0.rc1.362.ged0d419d3c-goog