[RFC PATCH 2/3] f2fs: reduce zoned LFS memory by sharing SIT valid maps
From: 'wallentx
Date: Mon Mar 09 2026 - 06:40:27 EST
From: wallentx <william.allentx@xxxxxxxxx>
For zoned devices, F2FS only allows LFS mode. In that configuration SSR
is not used, IPU is not allowed, and discard defaults to section
granularity. Even so, F2FS still allocates per-segment cur/ckpt valid
maps for every segment, including the common cases where a segment is
trivially empty or trivially full.
Reduce that overhead by introducing shared zero/full SIT valid maps for
zoned LFS. Empty inactive segments point at the shared zero map, full
inactive segments point at the shared full map, and only active or
partially valid segments keep private maps. Update SIT rebuild and
runtime updates to move segments between shared and private maps safely,
and retire replaced private maps with RCU.
Also invalidate scanned SIT metadata pages after mount-time rebuild so
META_MAPPING does not retain the full SIT scan, update memory reporting
to reflect the new layout, and reject checkpoint=disable because its
checkpoint-era validity accounting does not fit the collapsed shared-SIT
representation.
On a test system with 43 HM-SMR zoned volumes (~550 TB total),
CONFIG_F2FS_CHECK_FS=y, and this patch applied on top of jaegeuk/f2fs
dev at 5f04e90eedd0, static F2FS memory dropped from 58.91 GiB to
27.70 GiB.
Signed-off-by: wallentx <william.allentx@xxxxxxxxx>
---
fs/f2fs/debug.c | 20 ++-
fs/f2fs/f2fs.h | 6 +
fs/f2fs/segment.c | 338 ++++++++++++++++++++++++++++++++++++++++++----
fs/f2fs/segment.h | 3 +
fs/f2fs/super.c | 4 +
fs/f2fs/sysfs.c | 3 +-
6 files changed, 341 insertions(+), 33 deletions(-)
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index af88db8fdb71..d8bfdac5c1e4 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -319,9 +319,23 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
- si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
- si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
- si->base_mem += SIT_VBLOCK_MAP_SIZE;
+
+ if (f2fs_use_shared_sit_map(sbi)) {
+ /* shared cur/ckpt maps (zero + full bitmaps) */
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * 2;
+ /* Approximate private bitmaps for active logs */
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * NR_CURSEG_TYPE;
+#ifdef CONFIG_F2FS_CHECK_FS
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+#endif
+ if (f2fs_block_unit_discard(sbi))
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ } else {
+ si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += SIT_VBLOCK_MAP_SIZE;
+ }
+
if (__is_large_section(sbi))
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 8942b2a63cfd..11f3601ffd34 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -4914,6 +4914,12 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
}
+/* Share SIT valid maps only for zoned LFS. */
+static inline bool f2fs_use_shared_sit_map(struct f2fs_sb_info *sbi)
+{
+ return f2fs_sb_has_blkzoned(sbi) && f2fs_lfs_mode(sbi);
+}
+
static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi,
block_t blkaddr)
{
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index c9cfc8f17698..0dab6b16ba56 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -31,6 +31,24 @@ static struct kmem_cache *discard_entry_slab;
static struct kmem_cache *discard_cmd_slab;
static struct kmem_cache *sit_entry_set_slab;
static struct kmem_cache *revoke_entry_slab;
+static struct kmem_cache *sit_bitmap_slab;
+
+struct f2fs_sit_bitmap {
+ struct rcu_head rcu;
+ unsigned char map[SIT_VBLOCK_MAP_SIZE];
+};
+
+static void f2fs_free_sit_bitmap_rcu(struct rcu_head *rcu)
+{
+ struct f2fs_sit_bitmap *b = container_of(rcu, struct f2fs_sit_bitmap, rcu);
+
+ kmem_cache_free(sit_bitmap_slab, b);
+}
+
+static struct f2fs_sit_bitmap *f2fs_sit_bitmap_from_map(void *map)
+{
+ return container_of(map, struct f2fs_sit_bitmap, map);
+}
static unsigned long __reverse_ulong(unsigned char *str)
{
@@ -2444,6 +2462,31 @@ static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_ent
f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
+ if (f2fs_use_shared_sit_map(sbi)) {
+ struct sit_info *sit_i = SIT_I(sbi);
+
+ if (se->cur_valid_map == sit_i->bitmap_full) {
+ struct f2fs_sit_bitmap *b = f2fs_kmem_cache_alloc(sit_bitmap_slab,
+ GFP_NOFS, true, sbi);
+ memset(b->map, 0xff, SIT_VBLOCK_MAP_SIZE);
+ rcu_assign_pointer(se->cur_valid_map, b->map);
+ se->ckpt_valid_map = b->map;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memcpy(se->cur_valid_map_mir, b->map, SIT_VBLOCK_MAP_SIZE);
+#endif
+ } else if (se->cur_valid_map == sit_i->bitmap_zero) {
+ /* Should not happen, freeing empty segment */
+ struct f2fs_sit_bitmap *b = f2fs_kmem_cache_alloc(sit_bitmap_slab,
+ GFP_NOFS, true, sbi);
+ memset(b->map, 0, SIT_VBLOCK_MAP_SIZE);
+ rcu_assign_pointer(se->cur_valid_map, b->map);
+ se->ckpt_valid_map = b->map;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memcpy(se->cur_valid_map_mir, b->map, SIT_VBLOCK_MAP_SIZE);
+#endif
+ }
+ }
+
for (i = 0; i < del_count; i++) {
exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
@@ -2478,10 +2521,18 @@ static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_ent
f2fs_test_and_clear_bit(offset + i, se->discard_map))
sbi->discard_blks++;
- if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
- se->ckpt_valid_blocks -= 1;
- if (__is_large_section(sbi))
- get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1;
+ if (se->cur_valid_map != se->ckpt_valid_map) {
+ if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
+ se->ckpt_valid_blocks -= 1;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1;
+ }
+ } else {
+ if (exist) {
+ se->ckpt_valid_blocks -= 1;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks -= 1;
+ }
}
}
@@ -2499,6 +2550,31 @@ static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry
bool mir_exist;
#endif
+ if (f2fs_use_shared_sit_map(sbi)) {
+ struct sit_info *sit_i = SIT_I(sbi);
+
+ if (se->cur_valid_map == sit_i->bitmap_zero) {
+ struct f2fs_sit_bitmap *b = f2fs_kmem_cache_alloc(sit_bitmap_slab,
+ GFP_NOFS, true, sbi);
+ memset(b->map, 0, SIT_VBLOCK_MAP_SIZE);
+ rcu_assign_pointer(se->cur_valid_map, b->map);
+ se->ckpt_valid_map = b->map;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memcpy(se->cur_valid_map_mir, b->map, SIT_VBLOCK_MAP_SIZE);
+#endif
+ } else if (se->cur_valid_map == sit_i->bitmap_full) {
+ /* Should not happen in LFS alloc, but for safety */
+ struct f2fs_sit_bitmap *b = f2fs_kmem_cache_alloc(sit_bitmap_slab,
+ GFP_NOFS, true, sbi);
+ memset(b->map, 0xff, SIT_VBLOCK_MAP_SIZE);
+ rcu_assign_pointer(se->cur_valid_map, b->map);
+ se->ckpt_valid_map = b->map;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memcpy(se->cur_valid_map_mir, b->map, SIT_VBLOCK_MAP_SIZE);
+#endif
+ }
+ }
+
exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
mir_exist = f2fs_test_and_set_bit(offset,
@@ -2525,14 +2601,23 @@ static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry
* or newly invalidated.
*/
if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
- if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) {
- se->ckpt_valid_blocks++;
- if (__is_large_section(sbi))
- get_sec_entry(sbi, segno)->ckpt_valid_blocks++;
+ if (se->cur_valid_map != se->ckpt_valid_map) {
+ if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) {
+ se->ckpt_valid_blocks++;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks++;
+ }
+ } else {
+ if (!exist) {
+ se->ckpt_valid_blocks++;
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, segno)->ckpt_valid_blocks++;
+ }
}
}
- if (!f2fs_test_bit(offset, se->ckpt_valid_map)) {
+ if (se->cur_valid_map != se->ckpt_valid_map &&
+ !f2fs_test_bit(offset, se->ckpt_valid_map)) {
se->ckpt_valid_blocks += del;
if (__is_large_section(sbi))
get_sec_entry(sbi, segno)->ckpt_valid_blocks += del;
@@ -2582,6 +2667,40 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (__is_large_section(sbi))
get_sec_entry(sbi, segno)->valid_blocks += del;
+
+ if (f2fs_use_shared_sit_map(sbi)) {
+ struct sit_info *sit_i = SIT_I(sbi);
+
+ if (new_vblocks == 0 &&
+ se->cur_valid_map != sit_i->bitmap_zero) {
+ void *old_map = se->cur_valid_map;
+
+ rcu_assign_pointer(se->cur_valid_map, sit_i->bitmap_zero);
+ se->ckpt_valid_map = sit_i->bitmap_zero;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memset(se->cur_valid_map_mir, 0, SIT_VBLOCK_MAP_SIZE);
+#endif
+ if (old_map != sit_i->bitmap_zero &&
+ old_map != sit_i->bitmap_full) {
+ call_rcu(&f2fs_sit_bitmap_from_map(old_map)->rcu,
+ f2fs_free_sit_bitmap_rcu);
+ }
+ } else if (new_vblocks == BLKS_PER_SEG(sbi) &&
+ se->cur_valid_map != sit_i->bitmap_full) {
+ void *old_map = se->cur_valid_map;
+
+ rcu_assign_pointer(se->cur_valid_map, sit_i->bitmap_full);
+ se->ckpt_valid_map = sit_i->bitmap_full;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memset(se->cur_valid_map_mir, 0xff, SIT_VBLOCK_MAP_SIZE);
+#endif
+ if (old_map != sit_i->bitmap_zero &&
+ old_map != sit_i->bitmap_full) {
+ call_rcu(&f2fs_sit_bitmap_from_map(old_map)->rcu,
+ f2fs_free_sit_bitmap_rcu);
+ }
+ }
+ }
}
void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
@@ -4812,6 +4931,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
char *src_bitmap, *bitmap;
unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
+ bool share_map = f2fs_use_shared_sit_map(sbi);
/* allocate memory for SIT information */
sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
@@ -4838,28 +4958,73 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
#else
bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
#endif
- sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
- if (!sit_i->bitmap)
- return -ENOMEM;
-
- bitmap = sit_i->bitmap;
- for (start = 0; start < MAIN_SEGS(sbi); start++) {
- rcu_assign_pointer(sit_i->sentries[start].cur_valid_map,
- bitmap);
- bitmap += SIT_VBLOCK_MAP_SIZE;
+ if (share_map) {
+ sit_i->bitmap_zero = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->bitmap_zero)
+ return -ENOMEM;
+ sit_i->bitmap_full = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->bitmap_full) {
+ kfree(sit_i->bitmap_zero);
+ sit_i->bitmap_zero = NULL;
+ return -ENOMEM;
+ }
+ memset(sit_i->bitmap_full, 0xff, SIT_VBLOCK_MAP_SIZE);
- sit_i->sentries[start].ckpt_valid_map = bitmap;
- bitmap += SIT_VBLOCK_MAP_SIZE;
+ bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * discard_map;
+#ifdef CONFIG_F2FS_CHECK_FS
+ bitmap_size += MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE;
+#endif
+ if (bitmap_size) {
+ sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
+ if (!sit_i->bitmap) {
+ kfree(sit_i->bitmap_full);
+ kfree(sit_i->bitmap_zero);
+ sit_i->bitmap_full = NULL;
+ sit_i->bitmap_zero = NULL;
+ return -ENOMEM;
+ }
+ bitmap = sit_i->bitmap;
+ }
+ for (start = 0; start < MAIN_SEGS(sbi); start++) {
+ rcu_assign_pointer(sit_i->sentries[start].cur_valid_map,
+ sit_i->bitmap_zero);
+ sit_i->sentries[start].ckpt_valid_map =
+ sit_i->bitmap_zero;
#ifdef CONFIG_F2FS_CHECK_FS
- sit_i->sentries[start].cur_valid_map_mir = bitmap;
- bitmap += SIT_VBLOCK_MAP_SIZE;
+ sit_i->sentries[start].cur_valid_map_mir =
+ bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
#endif
+ if (discard_map) {
+ sit_i->sentries[start].discard_map = bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
+ }
+ }
+ } else {
+ sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
+ if (!sit_i->bitmap)
+ return -ENOMEM;
+
+ bitmap = sit_i->bitmap;
+
+ for (start = 0; start < MAIN_SEGS(sbi); start++) {
+ rcu_assign_pointer(sit_i->sentries[start].cur_valid_map, bitmap);
+ bitmap += SIT_VBLOCK_MAP_SIZE;
- if (discard_map) {
- sit_i->sentries[start].discard_map = bitmap;
+ sit_i->sentries[start].ckpt_valid_map = bitmap;
bitmap += SIT_VBLOCK_MAP_SIZE;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ sit_i->sentries[start].cur_valid_map_mir = bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
+#endif
+
+ if (discard_map) {
+ sit_i->sentries[start].discard_map = bitmap;
+ bitmap += SIT_VBLOCK_MAP_SIZE;
+ }
}
}
@@ -5009,7 +5174,39 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
err = check_block_count(sbi, start, &sit);
if (err)
return err;
- seg_info_from_raw_sit(se, &sit);
+
+ if (f2fs_use_shared_sit_map(sbi)) {
+ unsigned int vblocks = GET_SIT_VBLOCKS(&sit);
+ unsigned char *map = NULL;
+ bool is_active = is_curseg(sbi, start);
+
+ if (vblocks == 0 && !is_active) {
+ map = sit_i->bitmap_zero;
+ } else if (vblocks == BLKS_PER_SEG(sbi) && !is_active) {
+ map = sit_i->bitmap_full;
+ } else {
+ struct f2fs_sit_bitmap *b =
+ f2fs_kmem_cache_alloc(sit_bitmap_slab,
+ GFP_KERNEL,
+ false, sbi);
+ if (!b)
+ return -ENOMEM;
+ map = b->map;
+ memcpy(map, sit.valid_map, SIT_VBLOCK_MAP_SIZE);
+ }
+
+ se->valid_blocks = vblocks;
+ se->ckpt_valid_blocks = vblocks;
+ rcu_assign_pointer(se->cur_valid_map, map);
+ se->ckpt_valid_map = map;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memcpy(se->cur_valid_map_mir, map, SIT_VBLOCK_MAP_SIZE);
+#endif
+ se->type = GET_SIT_TYPE(&sit);
+ se->mtime = le64_to_cpu(sit.mtime);
+ } else {
+ seg_info_from_raw_sit(se, &sit);
+ }
if (se->type >= NR_PERSISTENT_LOG) {
f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
@@ -5039,6 +5236,15 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks;
}
+ if (f2fs_use_shared_sit_map(sbi)) {
+ pgoff_t start_addr = sit_i->sit_base_addr + start_blk;
+ pgoff_t end_addr = start_addr + readed - 1;
+ pgoff_t alt_start_addr = start_addr + sit_i->sit_blocks;
+ pgoff_t alt_end_addr = alt_start_addr + readed - 1;
+
+ invalidate_mapping_pages(META_MAPPING(sbi), start_addr, end_addr);
+ invalidate_mapping_pages(META_MAPPING(sbi), alt_start_addr, alt_end_addr);
+ }
start_blk += readed;
} while (start_blk < sit_blk_cnt);
@@ -5065,7 +5271,52 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
err = check_block_count(sbi, start, &sit);
if (err)
break;
- seg_info_from_raw_sit(se, &sit);
+
+ if (f2fs_use_shared_sit_map(sbi)) {
+ unsigned int vblocks = GET_SIT_VBLOCKS(&sit);
+ unsigned char *map = se->cur_valid_map;
+ bool is_active = is_curseg(sbi, start);
+
+ if (vblocks == 0 && !is_active) {
+ if (map != sit_i->bitmap_zero &&
+ map != sit_i->bitmap_full)
+ kmem_cache_free(sit_bitmap_slab,
+ f2fs_sit_bitmap_from_map(map));
+ map = sit_i->bitmap_zero;
+ } else if (vblocks == BLKS_PER_SEG(sbi) && !is_active) {
+ if (map != sit_i->bitmap_zero &&
+ map != sit_i->bitmap_full)
+ kmem_cache_free(sit_bitmap_slab,
+ f2fs_sit_bitmap_from_map(map));
+ map = sit_i->bitmap_full;
+ } else {
+ if (map == sit_i->bitmap_zero ||
+ map == sit_i->bitmap_full) {
+ struct f2fs_sit_bitmap *b =
+ f2fs_kmem_cache_alloc(sit_bitmap_slab,
+ GFP_KERNEL,
+ false, sbi);
+ if (!b) {
+ up_read(&curseg->journal_rwsem);
+ return -ENOMEM;
+ }
+ map = b->map;
+ }
+ memcpy(map, sit.valid_map, SIT_VBLOCK_MAP_SIZE);
+ }
+
+ se->valid_blocks = vblocks;
+ se->ckpt_valid_blocks = vblocks;
+ rcu_assign_pointer(se->cur_valid_map, map);
+ se->ckpt_valid_map = map;
+#ifdef CONFIG_F2FS_CHECK_FS
+ memcpy(se->cur_valid_map_mir, map, SIT_VBLOCK_MAP_SIZE);
+#endif
+ se->type = GET_SIT_TYPE(&sit);
+ se->mtime = le64_to_cpu(sit.mtime);
+ } else {
+ seg_info_from_raw_sit(se, &sit);
+ }
if (se->type >= NR_PERSISTENT_LOG) {
f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
@@ -5844,8 +6095,30 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
if (!sit_i)
return;
- if (sit_i->sentries)
- kvfree(sit_i->bitmap);
+ if (sit_i->sentries) {
+ if (f2fs_use_shared_sit_map(sbi)) {
+ unsigned int start;
+
+ for (start = 0; start < MAIN_SEGS(sbi); start++) {
+ struct seg_entry *se = &sit_i->sentries[start];
+
+ if (se->cur_valid_map &&
+ se->cur_valid_map != sit_i->bitmap_zero &&
+ se->cur_valid_map != sit_i->bitmap_full) {
+ struct f2fs_sit_bitmap *b;
+
+ b = f2fs_sit_bitmap_from_map(se->cur_valid_map);
+ kmem_cache_free(sit_bitmap_slab, b);
+ }
+ }
+ kfree(sit_i->bitmap_zero);
+ kfree(sit_i->bitmap_full);
+ if (sit_i->bitmap)
+ kvfree(sit_i->bitmap);
+ } else {
+ kvfree(sit_i->bitmap);
+ }
+ }
kfree(sit_i->tmp_map);
kvfree(sit_i->sentries);
@@ -5898,8 +6171,16 @@ int __init f2fs_create_segment_manager_caches(void)
sizeof(struct revoke_entry));
if (!revoke_entry_slab)
goto destroy_sit_entry_set;
+
+ sit_bitmap_slab = f2fs_kmem_cache_create("f2fs_sit_bitmap",
+ sizeof(struct f2fs_sit_bitmap));
+ if (!sit_bitmap_slab)
+ goto destroy_revoke_entry;
+
return 0;
+destroy_revoke_entry:
+ kmem_cache_destroy(revoke_entry_slab);
destroy_sit_entry_set:
kmem_cache_destroy(sit_entry_set_slab);
destroy_discard_cmd:
@@ -5916,4 +6197,5 @@ void f2fs_destroy_segment_manager_caches(void)
kmem_cache_destroy(discard_cmd_slab);
kmem_cache_destroy(discard_entry_slab);
kmem_cache_destroy(revoke_entry_slab);
+ kmem_cache_destroy(sit_bitmap_slab);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 068845660b0f..cb45cfa7a658 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -233,6 +233,9 @@ struct sit_info {
unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */
unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
+
+ unsigned char *bitmap_zero; /* shared zero bitmap */
+ unsigned char *bitmap_full; /* shared full bitmap */
};
struct free_segmap_info {
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 8774c60b4be4..83ce88ce12cb 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2617,6 +2617,10 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
f2fs_err(sbi, "checkpoint=disable on readonly fs");
return -EINVAL;
}
+ if (f2fs_use_shared_sit_map(sbi)) {
+ f2fs_err(sbi, "checkpoint=disable is not supported in zoned shared SIT mode");
+ return -EOPNOTSUPP;
+ }
sbi->sb->s_flags |= SB_ACTIVE;
/* check if we need more GC first */
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 9c79f7b63583..2baf349721c9 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -1788,8 +1788,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
rcu_read_lock();
- memcpy(map, rcu_dereference(se->cur_valid_map),
- SIT_VBLOCK_MAP_SIZE);
+ memcpy(map, rcu_dereference(se->cur_valid_map), SIT_VBLOCK_MAP_SIZE);
rcu_read_unlock();
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
--
2.53.0