[PATCH 1/2] mm: thp: avoid calling start_stop_khugepaged() in anon_enabled_store()

From: Breno Leitao

Date: Wed Mar 04 2026 - 05:28:58 EST


Writing "never" (or any other value) multiple times to
/sys/kernel/mm/transparent_hugepage/hugepages-*/enabled calls
start_stop_khugepaged() each time, even when nothing actually changed.
This causes set_recommended_min_free_kbytes() to run unconditionally,
which is unnecessary and floods the printk buffer with "raising
min_free_kbytes" messages. Example:

# for i in $(seq 100); do
# echo never > /sys/kernel/mm/transparent_hugepage/enabled
# done

# dmesg | grep "min_free_kbytes is not updated" | wc -l
100

Use test_and_set_bit()/test_and_clear_bit() instead of the plain
variants to detect whether any bit actually flipped, and skip the
start_stop_khugepaged() call entirely when the configuration is
unchanged.

With this patch, redoing the same operation becomes a no-op.

Signed-off-by: Breno Leitao <leitao@xxxxxxxxxx>
---
mm/huge_memory.c | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8e2746ea74adf..9abfb115e9329 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -520,36 +520,37 @@ static ssize_t anon_enabled_store(struct kobject *kobj,
const char *buf, size_t count)
{
int order = to_thpsize(kobj)->order;
+ bool changed = false;
ssize_t ret = count;

if (sysfs_streq(buf, "always")) {
spin_lock(&huge_anon_orders_lock);
- clear_bit(order, &huge_anon_orders_inherit);
- clear_bit(order, &huge_anon_orders_madvise);
- set_bit(order, &huge_anon_orders_always);
+ changed = test_and_clear_bit(order, &huge_anon_orders_inherit);
+ changed |= test_and_clear_bit(order, &huge_anon_orders_madvise);
+ changed |= !test_and_set_bit(order, &huge_anon_orders_always);
spin_unlock(&huge_anon_orders_lock);
} else if (sysfs_streq(buf, "inherit")) {
spin_lock(&huge_anon_orders_lock);
- clear_bit(order, &huge_anon_orders_always);
- clear_bit(order, &huge_anon_orders_madvise);
- set_bit(order, &huge_anon_orders_inherit);
+ changed = test_and_clear_bit(order, &huge_anon_orders_always);
+ changed |= test_and_clear_bit(order, &huge_anon_orders_madvise);
+ changed |= !test_and_set_bit(order, &huge_anon_orders_inherit);
spin_unlock(&huge_anon_orders_lock);
} else if (sysfs_streq(buf, "madvise")) {
spin_lock(&huge_anon_orders_lock);
- clear_bit(order, &huge_anon_orders_always);
- clear_bit(order, &huge_anon_orders_inherit);
- set_bit(order, &huge_anon_orders_madvise);
+ changed = test_and_clear_bit(order, &huge_anon_orders_always);
+ changed |= test_and_clear_bit(order, &huge_anon_orders_inherit);
+ changed |= !test_and_set_bit(order, &huge_anon_orders_madvise);
spin_unlock(&huge_anon_orders_lock);
} else if (sysfs_streq(buf, "never")) {
spin_lock(&huge_anon_orders_lock);
- clear_bit(order, &huge_anon_orders_always);
- clear_bit(order, &huge_anon_orders_inherit);
- clear_bit(order, &huge_anon_orders_madvise);
+ changed = test_and_clear_bit(order, &huge_anon_orders_always);
+ changed |= test_and_clear_bit(order, &huge_anon_orders_inherit);
+ changed |= test_and_clear_bit(order, &huge_anon_orders_madvise);
spin_unlock(&huge_anon_orders_lock);
} else
ret = -EINVAL;

- if (ret > 0) {
+ if (ret > 0 && changed) {
int err;

err = start_stop_khugepaged();

--
2.47.3