[PATCH 5.2 282/313] btrfs: fix allocation of free space cache v1 bitmap pages
From: Greg Kroah-Hartman
Date: Thu Oct 03 2019 - 13:07:23 EST
From: Christophe Leroy <christophe.leroy@xxxxxx>
commit 3acd48507dc43eeeb0a1fe965b8bad91cab904a7 upstream.
Various notifications of type "BUG kmalloc-4096 () : Redzone
overwritten" have been observed recently in various parts of the kernel.
After some time, it has been made a relation with the use of BTRFS
filesystem and with SLUB_DEBUG turned on.
[ 22.809700] BUG kmalloc-4096 (Tainted: G W ): Redzone overwritten
[ 22.810286] INFO: 0xbe1a5921-0xfbfc06cd. First byte 0x0 instead of 0xcc
[ 22.810866] INFO: Allocated in __load_free_space_cache+0x588/0x780 [btrfs] age=22 cpu=0 pid=224
[ 22.811193] __slab_alloc.constprop.26+0x44/0x70
[ 22.811345] kmem_cache_alloc_trace+0xf0/0x2ec
[ 22.811588] __load_free_space_cache+0x588/0x780 [btrfs]
[ 22.811848] load_free_space_cache+0xf4/0x1b0 [btrfs]
[ 22.812090] cache_block_group+0x1d0/0x3d0 [btrfs]
[ 22.812321] find_free_extent+0x680/0x12a4 [btrfs]
[ 22.812549] btrfs_reserve_extent+0xec/0x220 [btrfs]
[ 22.812785] btrfs_alloc_tree_block+0x178/0x5f4 [btrfs]
[ 22.813032] __btrfs_cow_block+0x150/0x5d4 [btrfs]
[ 22.813262] btrfs_cow_block+0x194/0x298 [btrfs]
[ 22.813484] commit_cowonly_roots+0x44/0x294 [btrfs]
[ 22.813718] btrfs_commit_transaction+0x63c/0xc0c [btrfs]
[ 22.813973] close_ctree+0xf8/0x2a4 [btrfs]
[ 22.814107] generic_shutdown_super+0x80/0x110
[ 22.814250] kill_anon_super+0x18/0x30
[ 22.814437] btrfs_kill_super+0x18/0x90 [btrfs]
[ 22.814590] INFO: Freed in proc_cgroup_show+0xc0/0x248 age=41 cpu=0 pid=83
[ 22.814841] proc_cgroup_show+0xc0/0x248
[ 22.814967] proc_single_show+0x54/0x98
[ 22.815086] seq_read+0x278/0x45c
[ 22.815190] __vfs_read+0x28/0x17c
[ 22.815289] vfs_read+0xa8/0x14c
[ 22.815381] ksys_read+0x50/0x94
[ 22.815475] ret_from_syscall+0x0/0x38
Commit 69d2480456d1 ("btrfs: use copy_page for copying pages instead of
memcpy") changed the way bitmap blocks are copied. But allthough bitmaps
have the size of a page, they were allocated with kzalloc().
Most of the time, kzalloc() allocates aligned blocks of memory, so
copy_page() can be used. But when some debug options like SLAB_DEBUG are
activated, kzalloc() may return unaligned pointer.
On powerpc, memcpy(), copy_page() and other copying functions use
'dcbz' instruction which provides an entire zeroed cacheline to avoid
memory read when the intention is to overwrite a full line. Functions
like memcpy() are writen to care about partial cachelines at the start
and end of the destination, but copy_page() assumes it gets pages. As
pages are naturally cache aligned, copy_page() doesn't care about
partial lines. This means that when copy_page() is called with a
misaligned pointer, a few leading bytes are zeroed.
To fix it, allocate bitmaps through kmem_cache instead of using kzalloc()
The cache pool is created with PAGE_SIZE alignment constraint.
Reported-by: Erhard F. <erhard_f@xxxxxxxxxxx>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204371
Fixes: 69d2480456d1 ("btrfs: use copy_page for copying pages instead of memcpy")
Cc: stable@xxxxxxxxxxxxxxx # 4.19+
Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxx>
Reviewed-by: David Sterba <dsterba@xxxxxxxx>
[ rename to btrfs_free_space_bitmap ]
Signed-off-by: David Sterba <dsterba@xxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
fs/btrfs/ctree.h | 1 +
fs/btrfs/free-space-cache.c | 20 +++++++++++++-------
fs/btrfs/inode.c | 8 ++++++++
3 files changed, 22 insertions(+), 7 deletions(-)
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -40,6 +40,7 @@ extern struct kmem_cache *btrfs_trans_ha
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
+extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
struct btrfs_ordered_sum;
struct btrfs_ref;
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -764,7 +764,8 @@ static int __load_free_space_cache(struc
} else {
ASSERT(num_bitmaps);
num_bitmaps--;
- e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+ e->bitmap = kmem_cache_zalloc(
+ btrfs_free_space_bitmap_cachep, GFP_NOFS);
if (!e->bitmap) {
kmem_cache_free(
btrfs_free_space_cachep, e);
@@ -1881,7 +1882,7 @@ static void free_bitmap(struct btrfs_fre
struct btrfs_free_space *bitmap_info)
{
unlink_free_space(ctl, bitmap_info);
- kfree(bitmap_info->bitmap);
+ kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
ctl->total_bitmaps--;
ctl->op->recalc_thresholds(ctl);
@@ -2135,7 +2136,8 @@ new_bitmap:
}
/* allocate the bitmap */
- info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+ info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
+ GFP_NOFS);
spin_lock(&ctl->tree_lock);
if (!info->bitmap) {
ret = -ENOMEM;
@@ -2146,7 +2148,9 @@ new_bitmap:
out:
if (info) {
- kfree(info->bitmap);
+ if (info->bitmap)
+ kmem_cache_free(btrfs_free_space_bitmap_cachep,
+ info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, info);
}
@@ -2802,7 +2806,8 @@ out:
if (entry->bytes == 0) {
ctl->free_extents--;
if (entry->bitmap) {
- kfree(entry->bitmap);
+ kmem_cache_free(btrfs_free_space_bitmap_cachep,
+ entry->bitmap);
ctl->total_bitmaps--;
ctl->op->recalc_thresholds(ctl);
}
@@ -3606,7 +3611,7 @@ again:
}
if (!map) {
- map = kzalloc(PAGE_SIZE, GFP_NOFS);
+ map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
if (!map) {
kmem_cache_free(btrfs_free_space_cachep, info);
return -ENOMEM;
@@ -3635,7 +3640,8 @@ again:
if (info)
kmem_cache_free(btrfs_free_space_cachep, info);
- kfree(map);
+ if (map)
+ kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
return 0;
}
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -73,6 +73,7 @@ static struct kmem_cache *btrfs_inode_ca
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep;
+struct kmem_cache *btrfs_free_space_bitmap_cachep;
static int btrfs_setsize(struct inode *inode, struct iattr *attr);
static int btrfs_truncate(struct inode *inode, bool skip_writeback);
@@ -9361,6 +9362,7 @@ void __cold btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_trans_handle_cachep);
kmem_cache_destroy(btrfs_path_cachep);
kmem_cache_destroy(btrfs_free_space_cachep);
+ kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
}
int __init btrfs_init_cachep(void)
@@ -9390,6 +9392,12 @@ int __init btrfs_init_cachep(void)
if (!btrfs_free_space_cachep)
goto fail;
+ btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
+ PAGE_SIZE, PAGE_SIZE,
+ SLAB_RED_ZONE, NULL);
+ if (!btrfs_free_space_bitmap_cachep)
+ goto fail;
+
return 0;
fail:
btrfs_destroy_cachep();