Re: [PATCH] btrfs: balance: fix null-ptr-deref in usage filters
From: Qu Wenruo
Date: Fri Mar 13 2026 - 17:29:36 EST
在 2026/3/14 00:36, ZhengYuan Huang 写道:
[...]
On a well-formed filesystem these two are kept in 1:1 correspondence.
However, btrfs_read_block_groups() builds the cache from block group
items in the extent tree, not directly from the chunk tree. A corrupted
image can therefore present a chunk item in the chunk tree whose
corresponding block group item is absent from the extent tree; that
chunk's block group is then never inserted into the in-memory cache.
This is unexpected in the first place.
We had a lot of extra checks regarding chunks/bgs, e.g. btrfs_verify_dev_extents() to verify there is no such missing mapping between chunks and their dev extents.
I believe you can also implement such check between chunks and bgs, in another patch of course.
When balance iterates the chunk tree and reaches such an orphaned chunk,
should_balance_chunk() calls chunk_usage_filter() or
chunk_usage_range_filter(), both of which query the block group cache:
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = cache->used; /* cache may be NULL */
btrfs_lookup_block_group() returns NULL silently when no cached entry
covers chunk_offset. Neither filter checks the return value, so the
immediately following dereference of cache->used triggers the crash.
[FIX]
Add a NULL check after btrfs_lookup_block_group() in both
chunk_usage_filter() and chunk_usage_range_filter(). When the lookup
fails, emit a btrfs_err() message identifying the offending bytenr and
return -EUCLEAN to indicate filesystem corruption.
Since both filter functions now have an error return path, change their
return type from bool to int (negative = error, 0 = do not balance,
positive = balance). Update should_balance_chunk() accordingly (bool ->
int, same convention) and add error propagation for both usage filter
branches. Finally, handle the new negative return in __btrfs_balance()
by jumping to the existing error path, which aborts the balance
operation and reports the error to userspace.
After the fix, the same corruption is correctly detected and reported
by the filters, and the null-ptr-deref is no longer triggered.
Fixes: 5ce5b3c0916b ("Btrfs: usage filter")
Fixes: bc3094673f22 ("btrfs: extend balance filter usage to take minimum and maximum")
Cc: stable@xxxxxxxxxxxxxxx # v3.3+
You may not want to add a version that's already EOL, just plain CC to stable should be good enough.
Signed-off-by: ZhengYuan Huang <gality369@xxxxxxxxx>
---
I was not sure whether these two bugs should be fixed in a single patch
or split into two. They share the same root cause, are very close to
each other in the code, and both depend on the same change to
should_balance_chunk(), so I kept them in one patch for now. If splitting
them would be preferred, I can respin this patch accordingly.
Considering the two filters are introduced in different patches, one fix for each will make backport much easier.
But still, both are very old thus backporting may not be that easy for older kernels.
Otherwise the code looks good to me.
Thanks,
Qu
---
fs/btrfs/volumes.c | 48 ++++++++++++++++++++++++++++++++++------------
1 file changed, 36 insertions(+), 12 deletions(-)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2bec544d8ba3..3aa44967c1dd 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3832,8 +3832,8 @@ static bool chunk_profiles_filter(u64 chunk_type, struct btrfs_balance_args *bar
return true;
}
-static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
- struct btrfs_balance_args *bargs)
+static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_block_group *cache;
u64 chunk_used;
@@ -3842,6 +3842,12 @@ static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_of
bool ret = true;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+ if (!cache) {
+ btrfs_err(fs_info,
+ "balance: chunk at bytenr %llu has no corresponding block group",
+ chunk_offset);
+ return -EUCLEAN;
+ }
chunk_used = cache->used;
if (bargs->usage_min == 0)
@@ -3863,14 +3869,20 @@ static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_of
return ret;
}
-static bool chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
- struct btrfs_balance_args *bargs)
+static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+ struct btrfs_balance_args *bargs)
{
struct btrfs_block_group *cache;
u64 chunk_used, user_thresh;
bool ret = true;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+ if (!cache) {
+ btrfs_err(fs_info,
+ "balance: chunk at bytenr %llu has no corresponding block group",
+ chunk_offset);
+ return -EUCLEAN;
+ }
chunk_used = cache->used;
if (bargs->usage_min == 0)
@@ -3986,8 +3998,8 @@ static bool chunk_soft_convert_filter(u64 chunk_type, struct btrfs_balance_args
return false;
}
-static bool should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
- u64 chunk_offset)
+static int should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk *chunk,
+ u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
@@ -4014,12 +4026,20 @@ static bool should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk
}
/* usage filter */
- if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
- chunk_usage_filter(fs_info, chunk_offset, bargs)) {
- return false;
- } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
- chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
- return false;
+ if (bargs->flags & BTRFS_BALANCE_ARGS_USAGE) {
+ int filter_ret = chunk_usage_filter(fs_info, chunk_offset, bargs);
+
+ if (filter_ret < 0)
+ return filter_ret;
+ if (filter_ret)
+ return 0;
+ } else if (bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) {
+ int filter_ret = chunk_usage_range_filter(fs_info, chunk_offset, bargs);
+
+ if (filter_ret < 0)
+ return filter_ret;
+ if (filter_ret)
+ return 0;
}
/* devid filter */
@@ -4172,6 +4192,10 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
ret = should_balance_chunk(leaf, chunk, found_key.offset);
btrfs_release_path(path);
+ if (ret < 0) {
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ goto error;
+ }
if (!ret) {
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto loop;