[PATCH v2 24/27] rcu: Rename rcu_momentary_dyntick_idle() into rcu_momentary_eqs()

From: Valentin Schneider
Date: Tue Apr 30 2024 - 05:25:06 EST


The context_tracking.state RCU_DYNTICKS subvariable has been renamed to
RCU_WATCHING, replace "dyntick_idle" into "eqs" to drop the dyntick
reference.

Signed-off-by: Valentin Schneider <vschneid@xxxxxxxxxx>
---
include/linux/rcutiny.h | 2 +-
include/linux/rcutree.h | 2 +-
kernel/rcu/rcutorture.c | 4 ++--
kernel/rcu/tree.c | 4 ++--
kernel/rcu/tree_nocb.h | 2 +-
kernel/rcu/tree_plugin.h | 6 +++---
kernel/stop_machine.c | 2 +-
kernel/trace/trace_osnoise.c | 4 ++--
8 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d9ac7b136aeab..cf2b5a188f783 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -158,7 +158,7 @@ void rcu_scheduler_starting(void);
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline void rcu_momentary_dyntick_idle(void) { }
+static inline void rcu_momentary_eqs(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
static inline bool rcu_gp_might_be_stalled(void) { return false; }

diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 254244202ea97..7dbde2b6f714a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,7 +37,7 @@ void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);

void rcu_barrier(void);
-void rcu_momentary_dyntick_idle(void);
+void rcu_momentary_eqs(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index cafe047d046e8..dcd1f47748a2f 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -2679,7 +2679,7 @@ static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
rcu_torture_fwd_prog_cond_resched(freed);
if (tick_nohz_full_enabled()) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
}
@@ -2829,7 +2829,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
if (tick_nohz_full_enabled()) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b375c6006ed6b..e4783a5d18e0d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -355,7 +355,7 @@ bool rcu_watching_zero_in_eqs(int cpu, int *vp)
*
* The caller must have disabled interrupts and must not be idle.
*/
-notrace void rcu_momentary_dyntick_idle(void)
+notrace void rcu_momentary_eqs(void)
{
int seq;

@@ -365,7 +365,7 @@ notrace void rcu_momentary_dyntick_idle(void)
WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
rcu_preempt_deferred_qs(current);
}
-EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
+EXPORT_SYMBOL_GPL(rcu_momentary_eqs);

/**
* rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 3f85577bddd4e..2e0d2ff2ee8df 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -951,7 +951,7 @@ static void nocb_cb_wait(struct rcu_data *rdp)


local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
/*
* Disable BH to provide the expected environment. Also, when
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 340bbefe5f652..903e70ec39fc6 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -859,7 +859,7 @@ static void rcu_qs(void)

/*
* Register an urgently needed quiescent state. If there is an
- * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
+ * emergency, invoke rcu_momentary_eqs() to do a heavy-weight
* dyntick-idle quiescent state visible to other CPUs, which will in
* some cases serve for expedited as well as normal grace periods.
* Either way, register a lightweight quiescent state.
@@ -879,7 +879,7 @@ void rcu_all_qs(void)
this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
rcu_qs();
@@ -899,7 +899,7 @@ void rcu_note_context_switch(bool preempt)
goto out;
this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
out:
rcu_tasks_qs(current, preempt);
trace_rcu_utilization(TPS("End context switch"));
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index cedb17ba158a9..da821ce258ea7 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -251,7 +251,7 @@ static int multi_cpu_stop(void *data)
*/
touch_nmi_watchdog();
}
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
} while (curstate != MULTI_STOP_EXIT);

local_irq_restore(flags);
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index a8e28f9b9271c..a41e8a8212647 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1535,7 +1535,7 @@ static int run_osnoise(void)
* This will eventually cause unwarranted noise as PREEMPT_RCU
* will force preemption as the means of ending the current
* grace period. We avoid this problem by calling
- * rcu_momentary_dyntick_idle(), which performs a zero duration
+ * rcu_momentary_eqs(), which performs a zero duration
* EQS allowing PREEMPT_RCU to end the current grace period.
* This call shouldn't be wrapped inside an RCU critical
* section.
@@ -1547,7 +1547,7 @@ static int run_osnoise(void)
if (!disable_irq)
local_irq_disable();

- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();

if (!disable_irq)
local_irq_enable();
--
2.43.0