[PATCH v2 2/4] rcu/tree: Add better tracing for dyntick-idle
From: Joel Fernandes (Google)
Date: Sat Mar 28 2020 - 18:18:42 EST
The dyntick-idle traces are a bit confusing. This patch makes it simpler
and adds some missing cases such as EQS-enter due to user vs idle mode.
Following are the changes:
(1) Add a new context field to trace_rcu_dyntick tracepoint. This
context field can be "USER", "IDLE" or "IRQ".
(2) Remove the "++=" and "--=" strings and replace them with
"StillNonIdle". This is much easier on the eyes, and the -- and ++
are easily apparent in the dynticks_nesting counters we are printing
anyway.
Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx>
---
include/trace/events/rcu.h | 29 ++++++++++++++++-------------
kernel/rcu/tree.c | 20 +++++++++++++-------
2 files changed, 29 insertions(+), 20 deletions(-)
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5e49b06e81044..c9ac71e2afd46 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -435,26 +435,28 @@ TRACE_EVENT_RCU(rcu_fqs,
#endif /* #if defined(CONFIG_TREE_RCU) */
/*
- * Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
- * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
- * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
- * moving away from idle.
+ * Tracepoint for dyntick-idle entry/exit events. These take 2 strings
+ * as argument:
+ * polarilty: "Start", "End", "StillIdle" for entering, exiting or still being
+ * in dyntick-idle mode.
+ * context: "USER" or "KERNEL" or "IRQ".
+ * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
- * the ->dynticks counter. Note that task-related and interrupt-related
- * events use two separate counters, and that the "++=" and "--=" events
- * for irq/NMI will change the counter by two, otherwise by one.
+ * the ->dynticks counter. During NMI nesting within IRQs, the dynticks_nesting
+ * counter changes by two, otherwise one.
*/
TRACE_EVENT_RCU(rcu_dyntick,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ TP_PROTO(const char *polarity, const char *context, long oldnesting,
+ long newnesting, int dynticks),
- TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ TP_ARGS(polarity, context, oldnesting, newnesting, dynticks),
TP_STRUCT__entry(
__field(const char *, polarity)
+ __field(const char *, context)
__field(long, oldnesting)
__field(long, newnesting)
__field(int, dynticks)
@@ -462,14 +464,15 @@ TRACE_EVENT_RCU(rcu_dyntick,
TP_fast_assign(
__entry->polarity = polarity;
+ __entry->context = context;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
__entry->dynticks = dynticks;
),
- TP_printk("%s %lx %lx %#3x", __entry->polarity,
- __entry->oldnesting, __entry->newnesting,
- __entry->dynticks & 0xfff)
+ TP_printk("%s %s %lx %lx %#3x", __entry->polarity,
+ __entry->context, __entry->oldnesting, __entry->newnesting,
+ __entry->dynticks & 0xfff)
);
/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a011bebe7d0e0..0e5304bad705a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -523,7 +523,8 @@ static void rcu_eqs_enter(bool user)
}
lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
+ trace_rcu_dyntick(TPS("Start"), (user ? TPS("USER") : TPS("IDLE")),
+ rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rdp = this_cpu_ptr(&rcu_data);
do_nocb_deferred_wakeup(rdp);
@@ -596,15 +597,17 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
* leave it in non-RCU-idle state.
*/
if (rdp->dynticks_nmi_nesting != 1) {
- trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
- atomic_read(&rdp->dynticks));
+ trace_rcu_dyntick(TPS("StillNonIdle"), TPS("IRQ"),
+ rdp->dynticks_nmi_nesting,
+ rdp->dynticks_nmi_nesting - 2, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
rdp->dynticks_nmi_nesting - 2);
return;
}
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
+ trace_rcu_dyntick(TPS("Start"), TPS("IRQ"), rdp->dynticks_nmi_nesting,
+ 0, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
if (irq)
@@ -691,7 +694,8 @@ static void rcu_eqs_exit(bool user)
rcu_dynticks_task_exit();
rcu_dynticks_eqs_exit();
rcu_cleanup_after_idle();
- trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
+ trace_rcu_dyntick(TPS("End"), (user ? TPS("USER") : TPS("IDLE")),
+ rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(rdp->dynticks_nesting, 1);
WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
@@ -783,9 +787,11 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
}
raw_spin_unlock_rcu_node(rdp->mynode);
}
- trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
- rdp->dynticks_nmi_nesting,
+
+ trace_rcu_dyntick(incby == 1 ? TPS("End") : TPS("StillNonIdle"),
+ TPS("IRQ"), rdp->dynticks_nmi_nesting,
rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
+
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
rdp->dynticks_nmi_nesting + incby);
barrier();
--
2.26.0.rc2.310.g2932bb562d-goog