Re: [PATCH v5 2/4] mm: huge_memory: refactor anon_enabled_store() with change_anon_orders()

From: Wei Yang

Date: Tue Mar 10 2026 - 23:12:49 EST


On Tue, Mar 10, 2026 at 10:57:08AM -0700, Breno Leitao wrote:
>Consolidate the repeated spin_lock/set_bit/clear_bit pattern in
>anon_enabled_store() into a new change_anon_orders() helper that
>loops over an orders[] array, setting the bit for the selected mode
>and clearing the others.
>
>Introduce enum anon_enabled_mode and anon_enabled_mode_strings[]
>for the per-order anon THP setting.
>
>Use sysfs_match_string() with the anon_enabled_mode_strings[] table
>to replace the if/else chain of sysfs_streq() calls.
>
>The helper uses test_and_set_bit()/test_and_clear_bit() to track
>whether the state actually changed, so start_stop_khugepaged() is
>only called when needed. When the mode is unchanged,
>set_recommended_min_free_kbytes() is called directly to preserve
>the watermark recalculation behavior of the original code.
>
>Signed-off-by: Breno Leitao <leitao@xxxxxxxxxx>
>Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>
>---
> mm/huge_memory.c | 84 +++++++++++++++++++++++++++++++++++---------------------
> 1 file changed, 52 insertions(+), 32 deletions(-)
>
>diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>index 8e2746ea74adf..e19dda5aaf195 100644
>--- a/mm/huge_memory.c
>+++ b/mm/huge_memory.c
>@@ -316,6 +316,20 @@ static ssize_t enabled_show(struct kobject *kobj,
> return sysfs_emit(buf, "%s\n", output);
> }
>
>+enum anon_enabled_mode {
>+ ANON_ENABLED_ALWAYS = 0,
>+ ANON_ENABLED_MADVISE = 1,
>+ ANON_ENABLED_INHERIT = 2,
>+ ANON_ENABLED_NEVER = 3,
>+};
>+
>+static const char * const anon_enabled_mode_strings[] = {
>+ [ANON_ENABLED_ALWAYS] = "always",
>+ [ANON_ENABLED_MADVISE] = "madvise",
>+ [ANON_ENABLED_INHERIT] = "inherit",
>+ [ANON_ENABLED_NEVER] = "never",
>+};
>+

Just one trivial thing, maybe keep the sequence as the sysfs output?

Currently the output of /sys/kernel/transparent_hugepage/hugepages-xxxkB is

always inherit madvise [never]

But no strong opinion on this.

> static ssize_t enabled_store(struct kobject *kobj,
> struct kobj_attribute *attr,
> const char *buf, size_t count)
>@@ -515,48 +529,54 @@ static ssize_t anon_enabled_show(struct kobject *kobj,
> return sysfs_emit(buf, "%s\n", output);
> }
>
>+static bool set_anon_enabled_mode(int order, enum anon_enabled_mode mode)
>+{
>+ static unsigned long *enabled_orders[] = {
>+ &huge_anon_orders_always,
>+ &huge_anon_orders_madvise,
>+ &huge_anon_orders_inherit,
>+ };
>+ enum anon_enabled_mode m;
>+ bool changed = false;
>+
>+ spin_lock(&huge_anon_orders_lock);
>+ for (m = 0; m < ARRAY_SIZE(enabled_orders); m++) {
>+ if (m == mode)
>+ changed |= !__test_and_set_bit(order, enabled_orders[m]);
>+ else
>+ changed |= __test_and_clear_bit(order, enabled_orders[m]);
>+ }
>+ spin_unlock(&huge_anon_orders_lock);
>+
>+ return changed;
>+}
>+

Nice cleanup, so

Reviewed-by: Wei Yang <richard.weiyang@xxxxxxxxx>

> static ssize_t anon_enabled_store(struct kobject *kobj,
> struct kobj_attribute *attr,
> const char *buf, size_t count)
> {
> int order = to_thpsize(kobj)->order;
>- ssize_t ret = count;
>+ int mode;
>
>- if (sysfs_streq(buf, "always")) {
>- spin_lock(&huge_anon_orders_lock);
>- clear_bit(order, &huge_anon_orders_inherit);
>- clear_bit(order, &huge_anon_orders_madvise);
>- set_bit(order, &huge_anon_orders_always);
>- spin_unlock(&huge_anon_orders_lock);
>- } else if (sysfs_streq(buf, "inherit")) {
>- spin_lock(&huge_anon_orders_lock);
>- clear_bit(order, &huge_anon_orders_always);
>- clear_bit(order, &huge_anon_orders_madvise);
>- set_bit(order, &huge_anon_orders_inherit);
>- spin_unlock(&huge_anon_orders_lock);
>- } else if (sysfs_streq(buf, "madvise")) {
>- spin_lock(&huge_anon_orders_lock);
>- clear_bit(order, &huge_anon_orders_always);
>- clear_bit(order, &huge_anon_orders_inherit);
>- set_bit(order, &huge_anon_orders_madvise);
>- spin_unlock(&huge_anon_orders_lock);
>- } else if (sysfs_streq(buf, "never")) {
>- spin_lock(&huge_anon_orders_lock);
>- clear_bit(order, &huge_anon_orders_always);
>- clear_bit(order, &huge_anon_orders_inherit);
>- clear_bit(order, &huge_anon_orders_madvise);
>- spin_unlock(&huge_anon_orders_lock);
>- } else
>- ret = -EINVAL;
>+ mode = sysfs_match_string(anon_enabled_mode_strings, buf);
>+ if (mode < 0)
>+ return -EINVAL;
>
>- if (ret > 0) {
>- int err;
>+ if (set_anon_enabled_mode(order, mode)) {
>+ int err = start_stop_khugepaged();
>
>- err = start_stop_khugepaged();
> if (err)
>- ret = err;
>+ return err;
>+ } else {
>+ /*
>+ * Recalculate watermarks even when the mode didn't
>+ * change, as the previous code always called
>+ * start_stop_khugepaged() which does this internally.
>+ */
>+ set_recommended_min_free_kbytes();
> }
>- return ret;
>+
>+ return count;
> }
>
> static struct kobj_attribute anon_enabled_attr =
>
>--
>2.52.0
>

--
Wei Yang
Help you, Help me