[patch V3 24/29] tracing: Remove the last struct stack_trace usage

From: Thomas Gleixner
Date: Thu Apr 25 2019 - 06:00:52 EST


Simplify the stack retrieval code by using the storage array based
interface.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Reviewed-by: Steven Rostedt (VMware) <rostedt@xxxxxxxxxxx>
---
kernel/trace/trace_stack.c | 37 ++++++++++++++++---------------------
1 file changed, 16 insertions(+), 21 deletions(-)

--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -23,11 +23,7 @@
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
static unsigned stack_trace_index[STACK_TRACE_ENTRIES];

-struct stack_trace stack_trace_max = {
- .max_entries = STACK_TRACE_ENTRIES,
- .entries = &stack_dump_trace[0],
-};
-
+static unsigned int stack_trace_entries;
static unsigned long stack_trace_max_size;
static arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -44,10 +40,10 @@ static void print_max_stack(void)

pr_emerg(" Depth Size Location (%d entries)\n"
" ----- ---- --------\n",
- stack_trace_max.nr_entries);
+ stack_trace_entries);

- for (i = 0; i < stack_trace_max.nr_entries; i++) {
- if (i + 1 == stack_trace_max.nr_entries)
+ for (i = 0; i < stack_trace_entries; i++) {
+ if (i + 1 == stack_trace_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];
@@ -93,13 +89,12 @@ static void check_stack(unsigned long ip

stack_trace_max_size = this_size;

- stack_trace_max.nr_entries = 0;
- stack_trace_max.skip = 0;
-
- save_stack_trace(&stack_trace_max);
+ stack_trace_entries = stack_trace_save(stack_dump_trace,
+ ARRAY_SIZE(stack_dump_trace) - 1,
+ 0);

/* Skip over the overhead of the stack tracer itself */
- for (i = 0; i < stack_trace_max.nr_entries; i++) {
+ for (i = 0; i < stack_trace_entries; i++) {
if (stack_dump_trace[i] == ip)
break;
}
@@ -108,7 +103,7 @@ static void check_stack(unsigned long ip
* Some archs may not have the passed in ip in the dump.
* If that happens, we need to show everything.
*/
- if (i == stack_trace_max.nr_entries)
+ if (i == stack_trace_entries)
i = 0;

/*
@@ -126,13 +121,13 @@ static void check_stack(unsigned long ip
* loop will only happen once. This code only takes place
* on a new max, so it is far from a fast path.
*/
- while (i < stack_trace_max.nr_entries) {
+ while (i < stack_trace_entries) {
int found = 0;

stack_trace_index[x] = this_size;
p = start;

- for (; p < top && i < stack_trace_max.nr_entries; p++) {
+ for (; p < top && i < stack_trace_entries; p++) {
/*
* The READ_ONCE_NOCHECK is used to let KASAN know that
* this is not a stack-out-of-bounds error.
@@ -163,7 +158,7 @@ static void check_stack(unsigned long ip
i++;
}

- stack_trace_max.nr_entries = x;
+ stack_trace_entries = x;

if (task_stack_end_corrupted(current)) {
print_max_stack();
@@ -265,7 +260,7 @@ static void *
{
long n = *pos - 1;

- if (n >= stack_trace_max.nr_entries)
+ if (n >= stack_trace_entries)
return NULL;

m->private = (void *)n;
@@ -329,7 +324,7 @@ static int t_show(struct seq_file *m, vo
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
- stack_trace_max.nr_entries);
+ stack_trace_entries);

if (!stack_tracer_enabled && !stack_trace_max_size)
print_disabled(m);
@@ -339,10 +334,10 @@ static int t_show(struct seq_file *m, vo

i = *(long *)v;

- if (i >= stack_trace_max.nr_entries)
+ if (i >= stack_trace_entries)
return 0;

- if (i + 1 == stack_trace_max.nr_entries)
+ if (i + 1 == stack_trace_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];