On 2020/9/29 16:47, Jaegeuk Kim wrote:
On 09/29, Chao Yu wrote:
On 2020/9/29 16:23, Jaegeuk Kim wrote:
I found a bug related to the number of page pointer allocation related to
nr_cpages.
Jaegeuk,
If I didn't miss anything, you mean that nr_cpages could be larger
than nr_rpages, right? the problematic case here is lzo/lzo-rle:
cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
As we can't limited clen as we did for lz4/zstd:
cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
Yes, I've seen some memory corruption in lzo test. Here is another patch to fix
mem leak.
Signed-off-by: Jaegeuk Kim <jaegeuk@xxxxxxxxxx>
---
fs/f2fs/compress.c | 67 ++++++++++++++++++++++++++++------------------
1 file changed, 41 insertions(+), 26 deletions(-)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index f086ac43ca825..ba2d4897744d8 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -20,22 +20,20 @@
static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;
-static void *page_array_alloc(struct inode *inode)
+static void *page_array_alloc(struct inode *inode, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int size = sizeof(struct page *) <<
- F2FS_I(inode)->i_log_cluster_size;
+ unsigned int size = sizeof(struct page *) * nr;
if (likely(size == sbi->page_array_slab_size))
return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}
-static void page_array_free(struct inode *inode, void *pages)
+static void page_array_free(struct inode *inode, void *pages, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int size = sizeof(struct page *) <<
- F2FS_I(inode)->i_log_cluster_size;
+ unsigned int size = sizeof(struct page *) * nr;
if (!pages)
return;
@@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
if (cc->rpages)
return 0;
- cc->rpages = page_array_alloc(cc->inode);
+ cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
{
- page_array_free(cc->inode, cc->rpages);
+ page_array_free(cc->inode, cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
- unsigned int max_len, nr_cpages;
+ unsigned int max_len, new_nr_cpages;
+ struct page **new_cpages;
int i, ret;
trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
@@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
- cc->cpages = page_array_alloc(cc->inode);
+ cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
Well, cc->nr_cpages will be set to cc->nr_rpages - 1 for zstd/lz4 cases, so
this will make cpages allocation fallback to kmalloc, which can cause more
memory use.
Thanks,
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);
- nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+ new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+
+ /* Now we're going to cut unnecessary tail pages */
+ new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
+ if (new_cpages) {
+ ret = -ENOMEM;
+ goto out_vunmap_cbuf;
+ }
/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
- (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
+ (new_nr_cpages * PAGE_SIZE) -
+ (cc->clen + COMPRESS_HEADER_SIZE));
vm_unmap_ram(cc->cbuf, cc->nr_cpages);
vm_unmap_ram(cc->rbuf, cc->cluster_size);
- for (i = nr_cpages; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++) {
+ if (i < new_nr_cpages) {
+ new_cpages[i] = cc->cpages[i];
+ continue;
+ }
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
- cc->nr_cpages = nr_cpages;
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = new_cpages;
+ cc->nr_cpages = new_nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- page_array_free(cc->inode, cc->cpages);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}
- dic->tpages = page_array_alloc(dic->inode);
+ dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) {
ret = -ENOMEM;
goto out_free_dic;
@@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages);
- cic->rpages = page_array_alloc(cc->inode);
+ cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
@@ -1297,11 +1310,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc);
return 0;
out_destroy_crypt:
- page_array_free(cc->inode, cic->rpages);
+ page_array_free(cc->inode, cic->rpages, cc->cluster_size);
for (--i; i >= 0; i--)
fscrypt_finalize_bounce_page(&cc->cpages[i]);
@@ -1310,6 +1325,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
continue;
f2fs_put_page(cc->cpages[i], 1);
}
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = NULL;
out_put_cic:
kmem_cache_free(cic_entry_slab, cic);
out_put_dnode:
@@ -1345,7 +1362,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}
- page_array_free(cic->inode, cic->rpages);
+ page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
kmem_cache_free(cic_entry_slab, cic);
}
@@ -1442,8 +1459,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
err = f2fs_write_compressed_pages(cc, submitted,
wbc, io_type);
- page_array_free(cc->inode, cc->cpages);
- cc->cpages = NULL;
if (!err)
return 0;
f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
@@ -1468,7 +1483,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);
- dic->rpages = page_array_alloc(cc->inode);
+ dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!dic->rpages) {
kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
@@ -1487,7 +1502,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;
- dic->cpages = page_array_alloc(dic->inode);
+ dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
if (!dic->cpages)
goto out_free;
@@ -1522,7 +1537,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- page_array_free(dic->inode, dic->tpages);
+ page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
@@ -1531,10 +1546,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- page_array_free(dic->inode, dic->cpages);
+ page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
}
- page_array_free(dic->inode, dic->rpages);
+ page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
.