Re: [PATCH 17/19] rcu/context-tracking: Use accessor for dynticks counter value

From: Paul E. McKenney
Date: Thu Mar 10 2022 - 15:08:46 EST


On Wed, Mar 02, 2022 at 04:48:08PM +0100, Frederic Weisbecker wrote:
> The dynticks counter value is going to join the context tracking state
> in a single field. Use an accessor for this value to make the transition
> easier for all readers.
>
> Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx>

Acked-by: Paul E. McKenney <paulmck@xxxxxxxxxx>

> Cc: Paul E. McKenney <paulmck@xxxxxxxxxx>
> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> Cc: Neeraj Upadhyay <quic_neeraju@xxxxxxxxxxx>
> Cc: Uladzislau Rezki <uladzislau.rezki@xxxxxxxx>
> Cc: Joel Fernandes <joel@xxxxxxxxxxxxxxxxx>
> Cc: Boqun Feng <boqun.feng@xxxxxxxxx>
> Cc: Nicolas Saenz Julienne <nsaenz@xxxxxxxxxx>
> Cc: Marcelo Tosatti <mtosatti@xxxxxxxxxx>
> Cc: Xiongfeng Wang <wangxiongfeng2@xxxxxxxxxx>
> Cc: Yu Liao<liaoyu15@xxxxxxxxxx>
> Cc: Phil Auld <pauld@xxxxxxxxxx>
> Cc: Paul Gortmaker<paul.gortmaker@xxxxxxxxxxxxx>
> Cc: Alex Belits <abelits@xxxxxxxxxxx>
> ---
> include/linux/context_tracking_state.h | 17 +++++++++++++++++
> kernel/context_tracking.c | 10 +++++-----
> kernel/rcu/tree.c | 13 ++++---------
> 3 files changed, 26 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
> index 3da44987dc71..bca0d3e0bd3d 100644
> --- a/include/linux/context_tracking_state.h
> +++ b/include/linux/context_tracking_state.h
> @@ -40,6 +40,23 @@ static __always_inline int __ct_state(void)
> {
> return atomic_read(this_cpu_ptr(&context_tracking.state));
> }
> +
> +static __always_inline int ct_dynticks(void)
> +{
> + return atomic_read(this_cpu_ptr(&context_tracking.dynticks));
> +}
> +
> +static __always_inline int ct_dynticks_cpu(int cpu)
> +{
> + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
> + return atomic_read(&ct->dynticks);
> +}
> +
> +static __always_inline int ct_dynticks_cpu_acquire(int cpu)
> +{
> + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
> + return atomic_read_acquire(&ct->state);
> +}
> #endif
>
> #ifdef CONFIG_CONTEXT_TRACKING_USER
> diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
> index 69db43548768..fe9066fdfaab 100644
> --- a/kernel/context_tracking.c
> +++ b/kernel/context_tracking.c
> @@ -133,7 +133,7 @@ static noinstr void rcu_eqs_enter(bool user)
>
> lockdep_assert_irqs_disabled();
> instrumentation_begin();
> - trace_rcu_dyntick(TPS("Start"), ct->dynticks_nesting, 0, atomic_read(&ct->dynticks));
> + trace_rcu_dyntick(TPS("Start"), ct->dynticks_nesting, 0, ct_dynticks());
> WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> rcu_preempt_deferred_qs(current);
>
> @@ -178,7 +178,7 @@ noinstr void ct_nmi_exit(void)
> */
> if (ct->dynticks_nmi_nesting != 1) {
> trace_rcu_dyntick(TPS("--="), ct->dynticks_nmi_nesting, ct->dynticks_nmi_nesting - 2,
> - atomic_read(&ct->dynticks));
> + ct_dynticks());
> WRITE_ONCE(ct->dynticks_nmi_nesting, /* No store tearing. */
> ct->dynticks_nmi_nesting - 2);
> instrumentation_end();
> @@ -186,7 +186,7 @@ noinstr void ct_nmi_exit(void)
> }
>
> /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
> - trace_rcu_dyntick(TPS("Startirq"), ct->dynticks_nmi_nesting, 0, atomic_read(&ct->dynticks));
> + trace_rcu_dyntick(TPS("Startirq"), ct->dynticks_nmi_nesting, 0, ct_dynticks());
> WRITE_ONCE(ct->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
>
> // instrumentation for the noinstr rcu_dynticks_eqs_enter()
> @@ -231,7 +231,7 @@ static void noinstr rcu_eqs_exit(bool user)
> // instrumentation for the noinstr rcu_dynticks_eqs_exit()
> instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));
>
> - trace_rcu_dyntick(TPS("End"), ct->dynticks_nesting, 1, atomic_read(&ct->dynticks));
> + trace_rcu_dyntick(TPS("End"), ct->dynticks_nesting, 1, ct_dynticks());
> WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> WRITE_ONCE(ct->dynticks_nesting, 1);
> WARN_ON_ONCE(ct->dynticks_nmi_nesting);
> @@ -292,7 +292,7 @@ noinstr void ct_nmi_enter(void)
>
> trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
> ct->dynticks_nmi_nesting,
> - ct->dynticks_nmi_nesting + incby, atomic_read(&ct->dynticks));
> + ct->dynticks_nmi_nesting + incby, ct_dynticks());
> instrumentation_end();
> WRITE_ONCE(ct->dynticks_nmi_nesting, /* Prevent store tearing. */
> ct->dynticks_nmi_nesting + incby);
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index e55a44ed19b6..90a22dd2189d 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -272,9 +272,7 @@ void rcu_softirq_qs(void)
> */
> static void rcu_dynticks_eqs_online(void)
> {
> - struct context_tracking *ct = this_cpu_ptr(&context_tracking);
> -
> - if (atomic_read(&ct->dynticks) & 0x1)
> + if (ct_dynticks() & 0x1)
> return;
> rcu_dynticks_inc(1);
> }
> @@ -285,10 +283,8 @@ static void rcu_dynticks_eqs_online(void)
> */
> static int rcu_dynticks_snap(int cpu)
> {
> - struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
> -
> smp_mb(); // Fundamental RCU ordering guarantee.
> - return atomic_read_acquire(&ct->dynticks);
> + return ct_dynticks_cpu_acquire(cpu);
> }
>
> /*
> @@ -322,11 +318,10 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
> */
> bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
> {
> - struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
> int snap;
>
> // If not quiescent, force back to earlier extended quiescent state.
> - snap = atomic_read(&ct->dynticks) & ~0x1;
> + snap = ct_dynticks_cpu(cpu) & ~0x1;
>
> smp_rmb(); // Order ->dynticks and *vp reads.
> if (READ_ONCE(*vp))
> @@ -334,7 +329,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
> smp_rmb(); // Order *vp read and ->dynticks re-read.
>
> // If still in the same extended quiescent state, we are good!
> - return snap == atomic_read(&ct->dynticks);
> + return snap == ct_dynticks_cpu(cpu);
> }
>
> /*
> --
> 2.25.1
>