[PATCH v8 08/18] perf ftrace: add support for tracing option 'func_stack_trace'

From: Changbin Du
Date: Fri Aug 07 2020 - 22:39:21 EST


This adds support to display call trace for function tracer. To do this,
just specify a '--func-opts call-graph' option.

$ sudo perf ftrace -T vfs_read --func-opts call-graph
iio-sensor-prox-855 [003] 6168.369657: vfs_read <-ksys_read
iio-sensor-prox-855 [003] 6168.369677: <stack trace>
=> vfs_read
=> ksys_read
=> __x64_sys_read
=> do_syscall_64
=> entry_SYSCALL_64_after_hwframe
...

Signed-off-by: Changbin Du <changbin.du@xxxxxxxxx>

---
v3: switch to uniform option --func-opts.
v2: option name '-s' -> '--func-call-graph'
---
tools/perf/Documentation/perf-ftrace.txt | 4 +++
tools/perf/builtin-ftrace.c | 42 ++++++++++++++++++++++++
2 files changed, 46 insertions(+)

diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
index c46d0a09b38c..8f08ad0992c2 100644
--- a/tools/perf/Documentation/perf-ftrace.txt
+++ b/tools/perf/Documentation/perf-ftrace.txt
@@ -76,6 +76,10 @@ OPTIONS
specify multiple functions (or glob patterns). It will be
passed to 'set_ftrace_notrace' in tracefs.

+--func-opts::
+ List of options allowed to set:
+ call-graph - Display kernel stack trace for function tracer.
+
-G::
--graph-funcs=::
Select function_graph tracer and set graph filter on the given
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 07b81d0c1658..469b89748c42 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -27,6 +27,7 @@
#include "util/cap.h"
#include "util/config.h"
#include "util/units.h"
+#include "util/parse-sublevel-options.h"

#define DEFAULT_TRACER "function_graph"

@@ -42,6 +43,7 @@ struct perf_ftrace {
int graph_depth;
unsigned long percpu_buffer_size;
bool inherit;
+ int func_stack_trace;
};

struct filter_entry {
@@ -202,6 +204,7 @@ static void reset_tracing_filters(void);
static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
{
write_tracing_option_file("function-fork", "0");
+ write_tracing_option_file("func_stack_trace", "0");
}

static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
@@ -278,6 +281,17 @@ static int set_tracing_cpu(struct perf_ftrace *ftrace)
return set_tracing_cpumask(cpumap);
}

+static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->func_stack_trace)
+ return 0;
+
+ if (write_tracing_option_file("func_stack_trace", "1") < 0)
+ return -1;
+
+ return 0;
+}
+
static int reset_tracing_cpu(void)
{
struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
@@ -426,6 +440,11 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
goto out_reset;
}

+ if (set_tracing_func_stack_trace(ftrace) < 0) {
+ pr_err("failed to set tracing option func_stack_trace\n");
+ goto out_reset;
+ }
+
if (set_tracing_filters(ftrace) < 0) {
pr_err("failed to set tracing filters\n");
goto out_reset;
@@ -598,6 +617,26 @@ static int parse_buffer_size(const struct option *opt,
return -1;
}

+static int parse_func_tracer_opts(const struct option *opt,
+ const char *str, int unset)
+{
+ int ret;
+ struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
+ struct sublevel_option func_tracer_opts[] = {
+ { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
+ { .name = NULL, }
+ };
+
+ if (unset)
+ return 0;
+
+ ret = perf_parse_sublevel_options(str, func_tracer_opts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void select_tracer(struct perf_ftrace *ftrace)
{
bool graph = !list_empty(&ftrace->graph_funcs) ||
@@ -645,6 +684,9 @@ int cmd_ftrace(int argc, const char **argv)
parse_filter_func),
OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
"do not trace given functions", parse_filter_func),
+ OPT_CALLBACK(0, "func-opts", &ftrace, "options",
+ "function tracer options, available options: call-graph",
+ parse_func_tracer_opts),
OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
"trace given functions using function_graph tracer",
parse_filter_func),
--
2.25.1