Re: [PATCH] zram: factor out partial IO routine

From: Minchan Kim
Date: Thu Mar 30 2017 - 19:34:11 EST


Hi Sergey,

On Thu, Mar 30, 2017 at 01:12:38PM +0900, Sergey Senozhatsky wrote:
> On (03/29/17 16:48), Minchan Kim wrote:
> > For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
> > However, the mixed code for handling normal/partial IO is too mess,
> > error-prone to modify IO handler functions with upcoming feature
> > so this patch aims for cleaning up via factoring out partial IO
> > routines to zram_bvec_partial_[read|write] which will be disabled
> > for most 4K page architecures.
> >
> > x86(4K architecure)
> > add/remove: 0/1 grow/shrink: 0/1 up/down: 0/-664 (-664)
> > function old new delta
> > zram_bvec_rw 2301 2039 -262
> > zram_decompress_page.isra 402 - -402
> >
> > So, we will save 662 bytes.
> >
> > However, as side effect, it will increase binary size in
> > non-4K architecure but it's not major for zram so I believe
> > benefit(maintainance, binary size for most architecture) is bigger.
>
> a bigger side effect is that now we double the amount of lines we need
> to change in certain patches and, thus, the amount of work - when we
> add new functionality/fix something in zram_bvec_{write, read} we also
> would need to touch zram_bvec_partial_{write, read}.

Yes, that is a pain, too. However, I thought it would be more easier
because as-is partial IO routine is more error-prone to me. :)
>
> still probably worth it.
>
> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@xxxxxxxxx>

Thanks for the review.
so I tried clean-up further to make you happy. :)

How about this?
It's totally untested and I have no time until Monday next week.
So, please review with having enough time.

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fefdf260503a..68eee48c5a9d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -98,10 +98,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}

+#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
+#else
+static inline bool is_partial_io(struct bio_vec *bvec)
+{
+ return false;
+}
+#endif

static void zram_revalidate_disk(struct zram *zram)
{
@@ -191,7 +198,7 @@ static bool page_same_filled(void *ptr, unsigned long *element)
return true;
}

-static void handle_same_page(struct bio_vec *bvec, unsigned long element)
+static void __handle_same_page(struct bio_vec *bvec, unsigned long element)
{
struct page *page = bvec->bv_page;
void *user_mem;
@@ -199,8 +206,6 @@ static void handle_same_page(struct bio_vec *bvec, unsigned long element)
user_mem = kmap_atomic(page);
zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
kunmap_atomic(user_mem);
-
- flush_dcache_page(page);
}

static ssize_t initstate_show(struct device *dev,
@@ -504,13 +509,14 @@ static void zram_free_page(struct zram *zram, size_t index)
zram_set_obj_size(meta, index, 0);
}

-static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
unsigned long handle;
unsigned int size;
+ void *mem;

bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
handle = meta->table[index].handle;
@@ -518,17 +524,23 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)

if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ mem = kmap_atomic(page);
zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
+ kunmap_atomic(mem);
return 0;
}

cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
+ mem = kmap_atomic(page);
copy_page(mem, cmem);
+ kunmap_atomic(mem);
} else {
struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);

+ mem = kmap_atomic(page);
ret = zcomp_decompress(zstrm, cmem, size, mem);
+ kunmap_atomic(mem);
zcomp_stream_put(zram->comp);
}
zs_unmap_object(meta->mem_pool, handle);
@@ -543,99 +555,70 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
return 0;
}

-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+static bool handle_special_page(struct zram *zram, struct bio_vec *bvec,
+ u32 index)
{
- int ret;
- struct page *page;
- unsigned char *user_mem, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
- page = bvec->bv_page;

bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_SAME)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- handle_same_page(bvec, meta->table[index].element);
- return 0;
+ __handle_same_page(bvec, meta->table[index].element);
+ return true;
}
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);

- if (is_partial_io(bvec))
- /* Use a temporary buffer to decompress the page */
- uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ return false;
+}

- user_mem = kmap_atomic(page);
- if (!is_partial_io(bvec))
- uncmem = user_mem;
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset)
+{
+ int ret;
+ struct page *page;

- if (!uncmem) {
- pr_err("Unable to allocate temp memory\n");
- ret = -ENOMEM;
- goto out_cleanup;
+ if (handle_special_page(zram, bvec, index))
+ return 0;
+
+ page = bvec->bv_page;
+ if (is_partial_io(bvec)) {
+ /* Use a temporary buffer to decompress the page */
+ page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
}

- ret = zram_decompress_page(zram, uncmem, index);
- /* Should NEVER happen. Return bio error if it does. */
+ ret = zram_decompress_page(zram, page, index);
if (unlikely(ret))
- goto out_cleanup;
+ goto out;

- if (is_partial_io(bvec))
- memcpy(user_mem + bvec->bv_offset, uncmem + offset,
- bvec->bv_len);
+ if (is_partial_io(bvec)) {
+ void *user_mem = kmap_atomic(bvec->bv_page);
+ void *uncmem = kmap_atomic(page);

- flush_dcache_page(page);
- ret = 0;
-out_cleanup:
- kunmap_atomic(user_mem);
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+ bvec->bv_len);
+ kunmap_atomic(uncmem);
+ kunmap_atomic(user_mem);
+ }
+out:
if (is_partial_io(bvec))
- kfree(uncmem);
+ __free_page(page);
+
+ flush_dcache_page(bvec->bv_page);
return ret;
}

-static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset)
+static bool handle_same_page(struct zram *zram, u32 index, struct page *page)
{
- int ret = 0;
- unsigned int clen;
- unsigned long handle = 0;
- struct page *page;
- unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
- struct zram_meta *meta = zram->meta;
- struct zcomp_strm *zstrm = NULL;
- unsigned long alloced_pages;
unsigned long element;
+ void *user_mem = kmap_atomic(page);

- page = bvec->bv_page;
- if (is_partial_io(bvec)) {
- /*
- * This is a partial IO. We need to read the full page
- * before to write the changes.
- */
- uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
- if (!uncmem) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zram_decompress_page(zram, uncmem, index);
- if (ret)
- goto out;
- }
+ if (page_same_filled(user_mem, &element)) {
+ struct zram_meta *meta = zram->meta;

-compress_again:
- user_mem = kmap_atomic(page);
- if (is_partial_io(bvec)) {
- memcpy(uncmem + offset, user_mem + bvec->bv_offset,
- bvec->bv_len);
kunmap_atomic(user_mem);
- user_mem = NULL;
- } else {
- uncmem = user_mem;
- }
-
- if (page_same_filled(uncmem, &element)) {
- if (user_mem)
- kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
@@ -644,29 +627,35 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);

atomic64_inc(&zram->stats.same_pages);
- ret = 0;
- goto out;
+ return true;
}
+ kunmap_atomic(user_mem);

- zstrm = zcomp_stream_get(zram->comp);
- ret = zcomp_compress(zstrm, uncmem, &clen);
- if (!is_partial_io(bvec)) {
- kunmap_atomic(user_mem);
- user_mem = NULL;
- uncmem = NULL;
- }
+ return false;
+}
+
+static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
+ struct page *page, unsigned long *p_handle,
+ unsigned int *p_clen)
+{
+ int ret;
+ unsigned long handle = 0;
+ unsigned int clen;
+ void *user_mem;
+ struct zram_meta *meta = zram->meta;
+
+compress_again:
+ user_mem = kmap_atomic(page);
+ ret = zcomp_compress(*zstrm, user_mem, &clen);
+ kunmap_atomic(user_mem);

if (unlikely(ret)) {
pr_err("Compression failed! err=%d\n", ret);
- goto out;
+ return ret;
}

- src = zstrm->buffer;
- if (unlikely(clen > max_zpage_size)) {
+ if (unlikely(clen > max_zpage_size))
clen = PAGE_SIZE;
- if (is_partial_io(bvec))
- src = uncmem;
- }

/*
* handle allocation has 2 paths:
@@ -689,20 +678,41 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
__GFP_MOVABLE);
if (!handle) {
zcomp_stream_put(zram->comp);
- zstrm = NULL;
-
atomic64_inc(&zram->stats.writestall);
-
handle = zs_malloc(meta->mem_pool, clen,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
+ *zstrm = zcomp_stream_get(zram->comp);
if (handle)
goto compress_again;
+ return -ENOMEM;
+ }

- pr_err("Error allocating memory for compressed page: %u, size=%u\n",
- index, clen);
- ret = -ENOMEM;
- goto out;
+ *p_handle = handle;
+ *p_clen = clen;
+ return 0;
+}
+
+static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset)
+{
+ int ret;
+ unsigned int clen;
+ unsigned long handle;
+ void *user_mem, *cmem;
+ struct zcomp_strm *zstrm;
+ unsigned long alloced_pages;
+ struct zram_meta *meta = zram->meta;
+ struct page *page = bvec->bv_page;
+
+ if (handle_same_page(zram, index, page))
+ return 0;
+
+ zstrm = zcomp_stream_get(zram->comp);
+ ret = zram_compress(zram, &zstrm, page, &handle, &clen);
+ if (ret) {
+ zcomp_stream_put(zram->comp);
+ return ret;
}

alloced_pages = zs_get_total_pages(meta->mem_pool);
@@ -710,22 +720,21 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,

if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zs_free(meta->mem_pool, handle);
- ret = -ENOMEM;
- goto out;
+ zcomp_stream_put(zram->comp);
+ return -ENOMEM;
}

cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);

- if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
- src = kmap_atomic(page);
- copy_page(cmem, src);
- kunmap_atomic(src);
+ if (clen == PAGE_SIZE) {
+ user_mem = kmap_atomic(page);
+ copy_page(cmem, user_mem);
+ kunmap_atomic(user_mem);
} else {
- memcpy(cmem, src, clen);
+ memcpy(cmem, zstrm->buffer, clen);
}

zcomp_stream_put(zram->comp);
- zstrm = NULL;
zs_unmap_object(meta->mem_pool, handle);

/*
@@ -734,7 +743,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
*/
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
-
meta->table[index].handle = handle;
zram_set_obj_size(meta, index, clen);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
@@ -742,11 +750,48 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.pages_stored);
+ return 0;
+}
+
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset)
+{
+ int ret = -ENOMEM;
+ struct page *page = NULL;
+ unsigned char *user_mem;
+ struct bio_vec vec;
+
+ vec = *bvec;
+ if (is_partial_io(bvec)) {
+ void *uncmem;
+ /*
+ * This is a partial IO. We need to read the full page
+ * before to write the changes.
+ */
+ page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
+ if (!page)
+ return ret;
+
+ ret = zram_decompress_page(zram, page, index);
+ if (ret)
+ goto out;
+
+ user_mem = kmap_atomic(bvec->bv_page);
+ uncmem = kmap_atomic(page);
+ memcpy(uncmem + offset, user_mem + bvec->bv_offset,
+ bvec->bv_len);
+ kunmap_atomic(uncmem);
+ kunmap_atomic(user_mem);
+
+ vec.bv_page = page;
+ vec.bv_len = PAGE_SIZE;
+ vec.bv_offset = 0;
+ }
+
+ ret = __zram_bvec_write(zram, &vec, index, offset);
out:
- if (zstrm)
- zcomp_stream_put(zram->comp);
if (is_partial_io(bvec))
- kfree(uncmem);
+ __free_page(page);
return ret;
}