[PATCH 2/2] f2fs: introducr gc_urgent_idle_remaining sysfs node and mark gc_urgent_high_remaining node deprecated
From: Yangtao Li
Date: Fri Oct 21 2022 - 13:47:02 EST
Added a new sysfs node called gc_urgent_idle_remaining.
The user can set the trial count limit for GC urgent and
idle mode with this value. If GC thread gets to the limit,
the mode will turn back to GC normal mode finally.
This method is not only applicable to gc_urgent_high,
but applicable to all gc modes. Also mark
gc_urgent_high_remaining as deprecated, so that the node
can be removed in the future.
Signed-off-by: Yangtao Li <frank.li@xxxxxxxx>
---
Documentation/ABI/testing/sysfs-fs-f2fs | 8 ++++++++
fs/f2fs/f2fs.h | 6 ++++--
fs/f2fs/gc.c | 12 ++++++------
fs/f2fs/super.c | 2 +-
fs/f2fs/sysfs.c | 14 ++++++++++----
5 files changed, 29 insertions(+), 13 deletions(-)
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 483639fb727b..859c4e53a846 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -598,6 +598,14 @@ Contact: "Daeho Jeong" <daehojeong@xxxxxxxxxx>
Description: You can set the trial count limit for GC urgent high mode with this value.
If GC thread gets to the limit, the mode will turn back to GC normal mode.
By default, the value is zero, which means there is no limit like before.
+ <deprecated>
+
+What: /sys/fs/f2fs/<disk>/gc_urgent_idle_remaining
+Date: October 2022
+Contact: "Yangtao Li" <frank.li@xxxxxxxx>
+Description: You can set the trial count limit for GC urgent and idle mode with this value.
+ If GC thread gets to the limit, the mode will turn back to GC normal mode.
+ By default, the value is zero, which means there is no limit like before.
What: /sys/fs/f2fs/<disk>/max_roll_forward_node_blocks
Date: January 2022
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e6355a5683b7..2bad69cf9fd9 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1734,8 +1734,10 @@ struct f2fs_sb_info {
unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */
unsigned int next_victim_seg[2]; /* next segment in victim section */
- spinlock_t gc_urgent_high_lock;
- unsigned int gc_urgent_high_remaining; /* remaining trial count for GC_URGENT_HIGH */
+ spinlock_t gc_urgent_idle_lock;
+ /* remaining trial count for GC_URGENT_* and GC_IDLE_* */
+ unsigned int gc_urgent_idle_remaining;
+ unsigned int gc_urgent_high_remaining; /* deprecated */
/* for skip statistic */
unsigned long long skipped_gc_rwsem; /* FG_GC only */
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2f113fbcb85c..9641edc281b3 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -97,14 +97,14 @@ static int gc_thread_func(void *data)
* invalidated soon after by user update or deletion.
* So, I'd like to wait some time to collect dirty segments.
*/
- if (sbi->gc_mode == GC_URGENT_HIGH) {
- spin_lock(&sbi->gc_urgent_high_lock);
- if (sbi->gc_urgent_high_remaining) {
- sbi->gc_urgent_high_remaining--;
- if (!sbi->gc_urgent_high_remaining)
+ if (sbi->gc_mode != GC_NORMAL) {
+ spin_lock(&sbi->gc_urgent_idle_lock);
+ if (sbi->gc_urgent_idle_remaining) {
+ sbi->gc_urgent_idle_remaining--;
+ if (!sbi->gc_urgent_idle_remaining)
gc_normal_mode = true;
}
- spin_unlock(&sbi->gc_urgent_high_lock);
+ spin_unlock(&sbi->gc_urgent_idle_lock);
}
if (sbi->gc_mode == GC_URGENT_HIGH ||
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3834ead04620..f90a8c0a53ec 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -3616,7 +3616,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->seq_file_ra_mul = MIN_RA_MUL;
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
- spin_lock_init(&sbi->gc_urgent_high_lock);
+ spin_lock_init(&sbi->gc_urgent_idle_lock);
atomic64_set(&sbi->current_atomic_write, 0);
sbi->dir_level = DEF_DIR_LEVEL;
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index df27afd71ef4..2b1c653b37cf 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -531,10 +531,14 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
return count;
}
- if (!strcmp(a->attr.name, "gc_urgent_high_remaining")) {
- spin_lock(&sbi->gc_urgent_high_lock);
- sbi->gc_urgent_high_remaining = t;
- spin_unlock(&sbi->gc_urgent_high_lock);
+ /* deprecated */
+ if (!strcmp(a->attr.name, "gc_urgent_high_remaining"))
+ return -EINVAL;
+
+ if (!strcmp(a->attr.name, "gc_urgent_idle_remaining")) {
+ spin_lock(&sbi->gc_urgent_idle_lock);
+ sbi->gc_urgent_idle_remaining = t;
+ spin_unlock(&sbi->gc_urgent_idle_lock);
return count;
}
@@ -826,6 +830,7 @@ F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent_high_remaining, gc_urgent_high_remaining);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent_idle_remaining, gc_urgent_idle_remaining);
F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, ckpt_thread_ioprio, ckpt_thread_ioprio);
F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(free_segments);
@@ -953,6 +958,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(data_io_flag),
ATTR_LIST(node_io_flag),
ATTR_LIST(gc_urgent_high_remaining),
+ ATTR_LIST(gc_urgent_idle_remaining),
ATTR_LIST(ckpt_thread_ioprio),
ATTR_LIST(dirty_segments),
ATTR_LIST(free_segments),
--
2.25.1