[RFC PATCH 2/7] trace: Move the trace recursion context enum to trace.h and reuse it
From: Daniel Bristot de Oliveira
Date: Tue Apr 02 2019 - 16:04:21 EST
Both trace and ring buffer code needs to identify in which
context the current code is running to control recursion.
Move the enum in the trace.h, and unify its usage.
Signed-off-by: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: "Joel Fernandes (Google)" <joel@xxxxxxxxxxxxxxxxx>
Cc: Jiri Olsa <jolsa@xxxxxxxxxx>
Cc: Namhyung Kim <namhyung@xxxxxxxxxx>
Cc: Alexander Shishkin <alexander.shishkin@xxxxxxxxxxxxxxx>
Cc: Tommaso Cucinotta <tommaso.cucinotta@xxxxxxxxxxxxxxx>
Cc: Romulo Silva de Oliveira <romulo.deoliveira@xxxxxxx>
Cc: Clark Williams <williams@xxxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: x86@xxxxxxxxxx
---
kernel/trace/ring_buffer.c | 25 +++++--------------------
kernel/trace/trace.h | 25 +++++++++++++++++++++----
2 files changed, 26 insertions(+), 24 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 41b6f96e5366..fa8cbad2ca88 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -27,6 +27,8 @@
#include <asm/local.h>
+#include "trace.h"
+
static void update_pages_handler(struct work_struct *work);
/*
@@ -428,23 +430,6 @@ struct rb_event_info {
int add_timestamp;
};
-/*
- * Used for which event context the event is in.
- * NMI = 0
- * IRQ = 1
- * SOFTIRQ = 2
- * NORMAL = 3
- *
- * See trace_recursive_lock() comment below for more details.
- */
-enum {
- RB_CTX_NMI,
- RB_CTX_IRQ,
- RB_CTX_SOFTIRQ,
- RB_CTX_NORMAL,
- RB_CTX_MAX
-};
-
/*
* head_page == tail_page && head == tail then buffer is empty.
*/
@@ -2704,10 +2689,10 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
int bit;
if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- bit = RB_CTX_NORMAL;
+ bit = TRACE_CTX_NORMAL;
else
- bit = pc & NMI_MASK ? RB_CTX_NMI :
- pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+ bit = pc & NMI_MASK ? TRACE_CTX_NMI :
+ pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
return 1;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d80cee49e0eb..dad2f0cd7208 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -616,20 +616,37 @@ enum {
#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
+/*
+ * Used for which event context the event is in.
+ * NMI = 0
+ * IRQ = 1
+ * SOFTIRQ = 2
+ * NORMAL = 3
+ *
+ * See trace_recursive_lock() comment for more details.
+ */
+enum {
+ TRACE_CTX_NMI,
+ TRACE_CTX_IRQ,
+ TRACE_CTX_SOFTIRQ,
+ TRACE_CTX_NORMAL,
+ TRACE_CTX_MAX
+};
+
static __always_inline int trace_get_context_bit(void)
{
int bit;
if (in_interrupt()) {
if (in_nmi())
- bit = 0;
+ bit = TRACE_CTX_NMI;
else if (in_irq())
- bit = 1;
+ bit = TRACE_CTX_IRQ;
else
- bit = 2;
+ bit = TRACE_CTX_SOFTIRQ;
} else
- bit = 3;
+ bit = TRACE_CTX_NORMAL;
return bit;
}
--
2.20.1