[PATCH v4 5/7] tracing: Add 'hist' event trigger command

From: Tom Zanussi
Date: Fri Apr 10 2015 - 12:06:31 EST


'hist' triggers allow users to continually aggregate trace events,
which can then be viewed afterwards by simply reading a 'hist' file
containing the aggregation in a human-readable format.

The basic idea is very simple and boils down to a mechanism whereby
trace events, rather than being exhaustively dumped in raw form and
viewed directly, are automatically 'compressed' into meaningful tables
completely defined by the user.

This is done strictly via single-line command-line commands and
without the aid of any kind of programming language or interpreter.

A surprising number of typical use cases can be accomplished by users
via this simple mechanism. In fact, a large number of the tasks that
users typically do using the more complicated script-based tracing
tools, at least during the initial stages of an investigation, can be
accomplished by simply specifying a set of keys and values to be used
in the creation of a hash table.

The Linux kernel trace event subsystem happens to provide an extensive
list of keys and values ready-made for such a purpose in the form of
the event format files associated with each trace event. By simply
consulting the format file for field names of interest and by plugging
them into the hist trigger command, users can create an endless number
of useful aggregations to help with investigating various properties
of the system. See Documentation/trace/events.txt for examples.

hist triggers are implemented on top of the existing event trigger
infrastructure, and as such are consistent with the existing triggers
from a user's perspective as well.

The basic syntax follows the existing trigger syntax. Users start an
aggregation by writing a 'hist' trigger to the event of interest's
trigger file:

# echo hist:keys=xxx:values=yyy [ if filter] > event/trigger

Once a hist trigger has been set up, by default it continually
aggregates every matching event into a hash table using the event key
and value fields specified.

To view the aggregation at any point in time, simply read the 'hist'
file in the same directory as the 'trigger' file:

# cat event/hist

The detailed syntax provides additional options for user control, and
is described exhaustively in Documentation/trace/events.txt and in the
virtual tracing/README file in the tracing subsystem.

Signed-off-by: Tom Zanussi <tom.zanussi@xxxxxxxxxxxxxxx>
---
include/linux/ftrace_event.h | 1 +
kernel/trace/Kconfig | 14 +
kernel/trace/Makefile | 1 +
kernel/trace/trace.c | 43 +
kernel/trace/trace.h | 21 +
kernel/trace/trace_events.c | 4 +
kernel/trace/trace_events_hist.c | 1593 +++++++++++++++++++++++++++++++++++
kernel/trace/trace_events_trigger.c | 29 +-
8 files changed, 1691 insertions(+), 15 deletions(-)
create mode 100644 kernel/trace/trace_events_hist.c

diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index b03dfa8..d1fa0b5 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -416,6 +416,7 @@ enum event_trigger_type {
ETT_SNAPSHOT = (1 << 1),
ETT_STACKTRACE = (1 << 2),
ETT_EVENT_ENABLE = (1 << 3),
+ ETT_EVENT_HIST = (1 << 4),
};

extern int filter_match_preds(struct event_filter *filter, void *rec);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a5da09c..002a9ff 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -520,6 +520,20 @@ config MMIOTRACE
See Documentation/trace/mmiotrace.txt.
If you are not helping to develop drivers, say N.

+config HIST_TRIGGERS
+ bool "Histogram triggers"
+ depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
+ help
+ Hist triggers allow one or more arbitrary trace event fields
+ to be aggregated into hash tables and dumped to stdout by
+ reading a debugfs/tracefs file. They're useful for
+ gathering quick and dirty (though precise) summaries of
+ event activity as an initial guide for further investigation
+ using more advanced tools.
+
+ See Documentation/trace/events.txt.
+ If in doubt, say N.
+
config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 98f2658..842ddbd 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
+obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
ifeq ($(CONFIG_PM),y)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bcfa2ad..a08f904 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3730,6 +3730,9 @@ static const char readme_msg[] =
#ifdef CONFIG_TRACER_SNAPSHOT
"\t\t snapshot\n"
#endif
+#ifdef CONFIG_HIST_TRIGGERS
+ "\t\t hist (see below)\n"
+#endif
"\t example: echo traceoff > events/block/block_unplug/trigger\n"
"\t echo traceoff:3 > events/block/block_unplug/trigger\n"
"\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
@@ -3745,6 +3748,46 @@ static const char readme_msg[] =
"\t To remove a trigger with a count:\n"
"\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
"\t Filters can be ignored when removing a trigger.\n"
+#ifdef CONFIG_HIST_TRIGGERS
+ " hist trigger\t- If set, event hits are aggregated into a hash table\n"
+ "\t Format: hist:keys=<field1>:values=<field1[,field2,...]>\n"
+ "\t [:size=#entries][:sort=field1][:pause][:continue]\n"
+ "\t [:clear] [if <filter>]\n\n"
+ "\t When a matching event is hit, an entry is added to a hash\n"
+ "\t table using the key(s) and value(s) named. Keys and values\n"
+ "\t correspond to fields in the event's format description.\n"
+ "\t Values must correspond to numeric fields - on an event hit,\n"
+ "\t the value(s) will be added to a sum kept for that field.\n"
+ "\t The special string 'hitcount' can be used in place of an\n"
+ "\t explicit value field - this is simply a count of event hits.\n"
+ "\t Keys can be any field, or the special string 'stacktrace',\n"
+ "\t which will use the event's kernel stacktrace as the key.\n\n"
+ "\t Reading the 'hist' file for the event will dump the hash\n"
+ "\t table in its entirety to stdout. By default, numeric fields\n"
+ "\t are displayed as base-10 integers. This can be modified by\n"
+ "\t appending any of the following modifiers to the field name:\n\n"
+ "\t .hex display a number as a hex value\n"
+ "\t .sym display an address as a symbol\n"
+ "\t .syscall display a syscall id as a system call name\n"
+ "\t .execname display a common_pid as a program name\n\n"
+ "\t By default, the size of the hash table is 2048 entries. The\n"
+ "\t 'size' param can be used to specify more or fewer than that.\n"
+ "\t The units are in terms of hashtable entries - if a run uses\n"
+ "\t more entries than specified, the results will show the number\n"
+ "\t of 'drops', the number of hits that were ignored. The size\n"
+ "\t should be a power of 2 between 128 and 131072 (any non-\n"
+ "\t power-of-2 number specified will be rounded up).\n\n"
+ "\t The 'sort' param can be used to specify a value field to sort\n"
+ "\t on. The default if unspecified is 'hitcount' and the.\n"
+ "\t default sort order is 'ascending'. To sort in the opposite\n"
+ "\t direction, append .descending' to the sort key.\n\n"
+ "\t The 'pause' param can be used to pause an existing hist\n"
+ "\t trigger or to start a hist trigger but not log any events\n"
+ "\t until told to do so. 'continue' can be used to start or\n"
+ "\t restart a paused hist trigger.\n\n"
+ "\t The 'clear' param will clear the contents of a running hist\n"
+ "\t trigger and leave its current paused/active state.\n\n"
+#endif
;

static ssize_t
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5bc1752..1ae4d90 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1098,6 +1098,13 @@ extern struct mutex event_mutex;
extern struct list_head ftrace_events;

extern const struct file_operations event_trigger_fops;
+extern const struct file_operations event_hist_fops;
+
+#ifdef CONFIG_HIST_TRIGGERS
+extern int register_trigger_hist_cmd(void);
+#else
+static inline int register_trigger_hist_cmd(void) { return 0; }
+#endif

extern int register_trigger_cmds(void);
extern void clear_event_triggers(struct trace_array *tr);
@@ -1114,6 +1121,20 @@ struct event_trigger_data {
struct list_head list;
};

+extern void trigger_data_free(struct event_trigger_data *data);
+extern int event_trigger_init(struct event_trigger_ops *ops,
+ struct event_trigger_data *data);
+extern int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
+ int trigger_enable);
+extern void update_cond_flag(struct ftrace_event_file *file);
+extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
+ struct event_trigger_data *test,
+ struct ftrace_event_file *file);
+extern int set_trigger_filter(char *filter_str,
+ struct event_trigger_data *trigger_data,
+ struct ftrace_event_file *file);
+extern int register_event_command(struct event_command *cmd);
+
/**
* struct event_trigger_ops - callbacks for trace event triggers
*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 0d2e473..495ace7 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1621,6 +1621,10 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
trace_create_file("trigger", 0644, file->dir, file,
&event_trigger_fops);

+#ifdef CONFIG_HIST_TRIGGERS
+ trace_create_file("hist", 0444, file->dir, file,
+ &event_hist_fops);
+#endif
trace_create_file("format", 0444, file->dir, call,
&ftrace_event_format_fops);

diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
new file mode 100644
index 0000000..6fe3400
--- /dev/null
+++ b/kernel/trace/trace_events_hist.c
@@ -0,0 +1,1593 @@
+/*
+ * trace_events_hist - trace event hist triggers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 Tom Zanussi <tom.zanussi@xxxxxxxxxxxxxxx>
+ *
+ * tracing_map implementation inspired by lock-free map algorithms
+ * originated by Dr. Cliff Click:
+ *
+ * http://www.azulsystems.com/blog/cliff/2007-03-26-non-blocking-hashtable
+ * http://www.azulsystems.com/events/javaone_2007/2007_LockFreeHash.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/jhash.h>
+#include <linux/stacktrace.h>
+#include <linux/sort.h>
+
+#include "trace.h"
+
+struct hist_field;
+
+typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
+
+struct hist_field {
+ struct ftrace_event_field *field;
+ unsigned long flags;
+ hist_field_fn_t fn;
+};
+
+static u64 hist_field_none(struct hist_field *field, void *event)
+{
+ return 0;
+}
+
+static u64 hist_field_string(struct hist_field *hist_field, void *event)
+{
+ char *addr = (char *)(event + hist_field->field->offset);
+
+ return (u64)addr;
+}
+
+#define DEFINE_HIST_FIELD_FN(type) \
+static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
+{ \
+ type *addr = (type *)(event + hist_field->field->offset); \
+ \
+ return (u64)*addr; \
+}
+
+DEFINE_HIST_FIELD_FN(s64);
+DEFINE_HIST_FIELD_FN(u64);
+DEFINE_HIST_FIELD_FN(s32);
+DEFINE_HIST_FIELD_FN(u32);
+DEFINE_HIST_FIELD_FN(s16);
+DEFINE_HIST_FIELD_FN(u16);
+DEFINE_HIST_FIELD_FN(s8);
+DEFINE_HIST_FIELD_FN(u8);
+
+#define HIST_TRIGGER_BITS 11
+#define HIST_TRIGGER_BITS_MAX 17
+#define HIST_TRIGGER_BITS_MIN 7
+#define HIST_VALS_MAX 2
+#define HIST_SORT_KEYS_MAX 2
+
+#define HIST_KEY_STRING_MAX 64
+
+enum hist_field_flags {
+ HIST_FIELD_SYM = 1,
+ HIST_FIELD_HEX = 2,
+ HIST_FIELD_STACKTRACE = 4,
+ HIST_FIELD_STRING = 8,
+ HIST_FIELD_EXECNAME = 16,
+ HIST_FIELD_SYSCALL = 32,
+};
+
+struct hist_trigger_sort_key {
+ bool use_hitcount;
+ bool use_key;
+ bool descending;
+ unsigned int idx;
+};
+
+struct hist_trigger_sort_entry {
+ void *key;
+ struct hist_trigger_entry *entry;
+ bool entry_copied;
+ bool dup;
+};
+
+struct hist_trigger_entry {
+ struct hist_trigger_data *hist_data;
+ void *key;
+ atomic64_t hitcount;
+ atomic64_t sums[HIST_VALS_MAX];
+ char *comm;
+};
+
+struct tracing_map_entry {
+ u32 key;
+ struct hist_trigger_entry *val;
+};
+
+struct tracing_map {
+ unsigned int key_size;
+ unsigned int map_bits;
+ unsigned int map_size;
+ unsigned int max_entries;
+ atomic_t next_entry;
+ struct tracing_map_entry *map;
+ struct hist_trigger_entry *entries;
+};
+
+struct hist_trigger_attrs {
+ char *keys_str;
+ char *vals_str;
+ char *sort_keys_str;
+ bool pause;
+ bool cont;
+ bool clear;
+ unsigned int map_bits;
+};
+
+struct hist_trigger_data {
+ atomic64_t total_hits;
+ struct hist_field *key;
+ struct hist_field *vals[HIST_VALS_MAX];
+ unsigned int n_vals;
+ struct ftrace_event_file *event_file;
+ atomic64_t drops;
+ struct hist_trigger_attrs *attrs;
+ struct hist_trigger_sort_key *sort_keys[HIST_SORT_KEYS_MAX];
+ struct hist_trigger_sort_key *sort_key_cur;
+ struct tracing_map *map;
+};
+
+#define HIST_STACKTRACE_DEPTH 16
+#define HIST_STACKTRACE_SKIP 5
+
+static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
+{
+ hist_field_fn_t fn = NULL;
+
+ switch (field_size) {
+ case 8:
+ if (field_is_signed)
+ fn = hist_field_s64;
+ else
+ fn = hist_field_u64;
+ break;
+ case 4:
+ if (field_is_signed)
+ fn = hist_field_s32;
+ else
+ fn = hist_field_u32;
+ break;
+ case 2:
+ if (field_is_signed)
+ fn = hist_field_s16;
+ else
+ fn = hist_field_u16;
+ break;
+ case 1:
+ if (field_is_signed)
+ fn = hist_field_s8;
+ else
+ fn = hist_field_u8;
+ break;
+ }
+
+ return fn;
+}
+
+static inline void save_comm(char *comm, struct task_struct *task)
+{
+ if (!task->pid) {
+ strcpy(comm, "<idle>");
+ return;
+ }
+
+ if (WARN_ON_ONCE(task->pid < 0)) {
+ strcpy(comm, "<XXX>");
+ return;
+ }
+
+ if (task->pid > PID_MAX_DEFAULT) {
+ strcpy(comm, "<...>");
+ return;
+ }
+
+ memcpy(comm, task->comm, TASK_COMM_LEN);
+}
+
+static void destroy_hist_field(struct hist_field *hist_field)
+{
+ kfree(hist_field);
+}
+
+static struct hist_field *create_hist_field(struct ftrace_event_field *field,
+ unsigned long flags)
+{
+ hist_field_fn_t fn = hist_field_none;
+ struct hist_field *hist_field;
+
+ hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
+ if (!hist_field)
+ return NULL;
+
+ if (flags & HIST_FIELD_STACKTRACE) {
+ hist_field->flags = flags;
+ goto out;
+ }
+
+ if (is_function_field(field))
+ goto free;
+
+ if (is_string_field(field)) {
+ flags |= HIST_FIELD_STRING;
+ fn = hist_field_string;
+ } else {
+ fn = select_value_fn(field->size, field->is_signed);
+ if (!fn)
+ goto free;
+ }
+
+ hist_field->field = field;
+ hist_field->flags = flags;
+ hist_field->fn = fn;
+ out:
+ return hist_field;
+ free:
+ kfree(hist_field);
+ hist_field = NULL;
+
+ goto out;
+}
+
+static void destroy_hist_fields(struct hist_trigger_data *hist_data)
+{
+ unsigned int i;
+
+ destroy_hist_field(hist_data->key);
+ hist_data->key = NULL;
+
+ for (i = 0; i < hist_data->n_vals; i++) {
+ destroy_hist_field(hist_data->vals[i]);
+ hist_data->vals[i] = NULL;
+ }
+}
+
+static inline struct hist_trigger_sort_key *create_default_sort_key(void)
+{
+ struct hist_trigger_sort_key *sort_key;
+
+ sort_key = kzalloc(sizeof(*sort_key), GFP_KERNEL);
+ if (!sort_key)
+ return ERR_PTR(-ENOMEM);
+
+ sort_key->use_hitcount = true;
+
+ return sort_key;
+}
+
+static inline struct hist_trigger_sort_key *
+create_sort_key(char *field_name, struct hist_trigger_data *hist_data)
+{
+ struct hist_trigger_sort_key *sort_key;
+ unsigned int i;
+
+ if (!strcmp(field_name, "hitcount"))
+ return create_default_sort_key();
+
+ for (i = 0; i < hist_data->n_vals; i++)
+ if (!strcmp(field_name, hist_data->vals[i]->field->name))
+ goto out;
+
+ return ERR_PTR(-EINVAL);
+ out:
+ sort_key = kzalloc(sizeof(*sort_key), GFP_KERNEL);
+ if (!sort_key)
+ return ERR_PTR(-ENOMEM);
+
+ sort_key->idx = i;
+
+ return sort_key;
+}
+
+static int create_sort_keys(struct hist_trigger_data *hist_data)
+{
+ char *fields_str = hist_data->attrs->sort_keys_str;
+ struct hist_trigger_sort_key *sort_key;
+ char *field_str, *field_name;
+ unsigned int i;
+ int ret = 0;
+
+ if (!fields_str) {
+ sort_key = create_default_sort_key();
+ if (IS_ERR(sort_key)) {
+ ret = PTR_ERR(sort_key);
+ goto out;
+ }
+ hist_data->sort_keys[0] = sort_key;
+ goto out;
+ }
+
+ strsep(&fields_str, "=");
+ if (!fields_str) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ for (i = 0; i < HIST_SORT_KEYS_MAX; i++) {
+ field_str = strsep(&fields_str, ",");
+ if (!field_str) {
+ if (i == 0) {
+ ret = -EINVAL;
+ goto free;
+ } else
+ break;
+ }
+
+ field_name = strsep(&field_str, ".");
+ sort_key = create_sort_key(field_name, hist_data);
+ if (IS_ERR(sort_key)) {
+ ret = PTR_ERR(sort_key);
+ goto free;
+ }
+
+ if (field_str) {
+ if (!strcmp(field_str, "descending"))
+ sort_key->descending = true;
+ else if (strcmp(field_str, "ascending")) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+ hist_data->sort_keys[i] = sort_key;
+ }
+ out:
+ return ret;
+ free:
+ for (i = 0; i < HIST_SORT_KEYS_MAX; i++) {
+ if (!hist_data->sort_keys[i])
+ break;
+ kfree(hist_data->sort_keys[i]);
+ hist_data->sort_keys[i] = NULL;
+ }
+ goto out;
+}
+
+static int create_key_field(struct hist_trigger_data *hist_data,
+ struct ftrace_event_file *file,
+ char *field_str)
+{
+ struct ftrace_event_field *field = NULL;
+ unsigned long flags = 0;
+ char *field_name;
+ int ret = 0;
+
+ if (!strcmp(field_str, "stacktrace"))
+ flags |= HIST_FIELD_STACKTRACE;
+ else {
+ field_name = strsep(&field_str, ".");
+ if (field_str) {
+ if (!strcmp(field_str, "sym"))
+ flags |= HIST_FIELD_SYM;
+ else if (!strcmp(field_str, "hex"))
+ flags |= HIST_FIELD_HEX;
+ else if (!strcmp(field_str, "execname") &&
+ !strcmp(field_name, "common_pid"))
+ flags |= HIST_FIELD_EXECNAME;
+ else if (!strcmp(field_str, "syscall"))
+ flags |= HIST_FIELD_SYSCALL;
+ }
+
+ field = trace_find_event_field(file->event_call, field_name);
+ if (!field) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ hist_data->key = create_hist_field(field, flags);
+ if (!hist_data->key)
+ ret = -ENOMEM;
+ out:
+ return ret;
+}
+
+static int create_val_field(struct hist_trigger_data *hist_data,
+ unsigned int val,
+ struct ftrace_event_file *file,
+ char *field_str)
+{
+ struct ftrace_event_field *field = NULL;
+ unsigned long flags = 0;
+ char *field_name;
+ int ret = 0;
+
+ if (!strcmp(field_str, "hitcount"))
+ return ret;
+
+ field_name = strsep(&field_str, ".");
+ if (field_str) {
+ if (!strcmp(field_str, "sym"))
+ flags |= HIST_FIELD_SYM;
+ else if (!strcmp(field_str, "hex"))
+ flags |= HIST_FIELD_HEX;
+ }
+
+ field = trace_find_event_field(file->event_call, field_name);
+ if (!field) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hist_data->vals[val] = create_hist_field(field, flags);
+ if (!hist_data->vals[val]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ hist_data->n_vals++;
+ out:
+ return ret;
+}
+
+static int create_hist_fields(struct hist_trigger_data *hist_data,
+ struct ftrace_event_file *file)
+{
+ char *fields_str, *field_str;
+ unsigned int i;
+ int ret;
+
+ fields_str = hist_data->attrs->keys_str;
+ strsep(&fields_str, "=");
+ if (!fields_str) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = create_key_field(hist_data, file, fields_str);
+ if (ret)
+ goto out;
+
+ fields_str = hist_data->attrs->vals_str;
+ strsep(&fields_str, "=");
+ if (!fields_str) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < HIST_VALS_MAX; i++) {
+ field_str = strsep(&fields_str, ",");
+ if (!field_str) {
+ if (i == 0) {
+ ret = -EINVAL;
+ goto out;
+ } else
+ break;
+ }
+ ret = create_val_field(hist_data, i, file, field_str);
+ if (ret)
+ goto out;
+ }
+
+ ret = create_sort_keys(hist_data);
+ out:
+ return ret;
+}
+
+static u32 get_key_size(struct hist_trigger_data *hist_data)
+{
+ u32 key_size;
+
+ if (hist_data->key->flags & HIST_FIELD_STACKTRACE)
+ key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
+ else
+ key_size = hist_data->key->field->size;
+
+ return key_size;
+}
+
+static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
+{
+ kfree(attrs->sort_keys_str);
+ kfree(attrs->keys_str);
+ kfree(attrs->vals_str);
+ kfree(attrs);
+}
+
+static int parse_map_size(char *str)
+{
+ unsigned long size, map_bits;
+ int ret;
+
+ strsep(&str, "=");
+ if (!str) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtoul(str, 0, &size);
+ if (ret)
+ goto out;
+
+ map_bits = ilog2(roundup_pow_of_two(size));
+ if (map_bits < HIST_TRIGGER_BITS_MIN ||
+ map_bits > HIST_TRIGGER_BITS_MAX)
+ ret = -EINVAL;
+ else
+ ret = map_bits;
+ out:
+ return ret;
+}
+
+static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
+{
+ struct hist_trigger_attrs *attrs;
+ int ret = 0;
+
+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return ERR_PTR(-ENOMEM);
+
+ while (trigger_str) {
+ char *str = strsep(&trigger_str, ":");
+
+ if (!strncmp(str, "keys", strlen("keys")) ||
+ !strncmp(str, "key", strlen("key")))
+ attrs->keys_str = kstrdup(str, GFP_KERNEL);
+ else if (!strncmp(str, "values", strlen("values")) ||
+ !strncmp(str, "vals", strlen("vals")) ||
+ !strncmp(str, "val", strlen("val")))
+ attrs->vals_str = kstrdup(str, GFP_KERNEL);
+ else if (!strncmp(str, "sort", strlen("sort")))
+ attrs->sort_keys_str = kstrdup(str, GFP_KERNEL);
+ else if (!strncmp(str, "pause", strlen("pause")))
+ attrs->pause = true;
+ else if (!strncmp(str, "continue", strlen("continue")) ||
+ !strncmp(str, "cont", strlen("cont")))
+ attrs->cont = true;
+ else if (!strncmp(str, "clear", strlen("clear")))
+ attrs->clear = true;
+ else if (!strncmp(str, "size", strlen("size"))) {
+ int map_bits = parse_map_size(str);
+
+ if (map_bits < 0) {
+ ret = map_bits;
+ goto free;
+ }
+ attrs->map_bits = map_bits;
+ } else {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+
+ return attrs;
+ free:
+ destroy_hist_trigger_attrs(attrs);
+
+ return ERR_PTR(ret);
+}
+
+static struct hist_trigger_entry *
+hist_trigger_entry_create(struct tracing_map *map)
+{
+ struct hist_trigger_entry *entry = NULL;
+ int idx;
+
+ idx = atomic_inc_return(&map->next_entry);
+ if (idx < map->max_entries) {
+ entry = &map->entries[idx];
+ if (entry->comm)
+ save_comm(entry->comm, current);
+ }
+
+ return entry;
+}
+
+static inline bool keys_match(void *key, void *test_key, unsigned key_size)
+{
+ bool match = true;
+
+ if (memcmp(key, test_key, key_size))
+ match = false;
+
+ return match;
+}
+
+/**
+ * tracing_map_insert - Insert key and/or return associated val
+ * @map: The tracing_map to insert into
+ * @key: The key to insert
+ *
+ * Inserts a key into the map and creates and returns a new
+ * hist_trigger_entry for it, or if the key has already been inserted
+ * by a previous call, returns the hist_trigger_entry already
+ * associated with it. When the map was created, the number of
+ * entries for the map was specified, and that number of
+ * hist_trigger_entries was created at the same time. This is the
+ * pre-allocated pool of hist trigger entries that
+ * tracing_map_insert() will allocate from when adding new keys. Once
+ * that pool is exhausted, tracing_map_insert() is useless and will
+ * return NULL to signal that state.
+ *
+ * This is a lock-free tracing map insertion function implementing a
+ * modified form of Cliff Click's basic insertion algorithm. It
+ * requires the table size to be a power of two. To prevent any
+ * possibility of an infinite loop we always make the actual table
+ * size double the size of the requested table size (max_size * 2).
+ * Likewise, we never reuse a slot or resize or delete elements - when
+ * we've reached map_size entries, we simply return NULL once we've
+ * run out of entries. Readers can at any point in time traverse the
+ * tracing map and safely access the key/val pairs.
+ *
+ * Return: the hist_trigger_entry * val associated with the key. If
+ * this was a newly inserted key, the val will be a newly allocated
+ * and associated hist_trigger_entry * val. If the key wasn't found
+ * and the pool of hist_trigger_entries has been exhausted, NULL is
+ * returned and no further insertions will succeed.
+ */
+static struct hist_trigger_entry *
+tracing_map_insert(struct tracing_map *map, void *key)
+{
+ u32 idx, key_hash, test_key;
+
+ key_hash = jhash(key, map->key_size, 0);
+ idx = key_hash >> (32 - (map->map_bits + 1));
+
+ while (1) {
+ idx &= (map->map_size - 1);
+ test_key = map->map[idx].key;
+
+ if (test_key && test_key == key_hash && map->map[idx].val &&
+ keys_match(key, map->map[idx].val->key, map->key_size))
+ return map->map[idx].val;
+
+ if (!test_key && !cmpxchg(&map->map[idx].key, 0, key_hash)) {
+ struct hist_trigger_entry *entry;
+
+ entry = hist_trigger_entry_create(map);
+ if (!entry)
+ break;
+ memcpy(entry->key, key, map->key_size);
+ map->map[idx].val = entry;
+
+ return map->map[idx].val;
+ }
+
+ idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * tracing_map_destroy - Destroy a tracing_map
+ * @map: The tracing_map to destroy
+ *
+ * Frees a tracing_map along with its associated array of
+ * hist_trigger_entries.
+ *
+ * Callers should make sure there are no readers or writers actively
+ * reading or inserting into the map before calling this.
+ */
+static void tracing_map_destroy(struct tracing_map *map)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ if (map->entries) {
+ for (i = 0; i < map->max_entries; i++) {
+ kfree(map->entries[i].key);
+ kfree(map->entries[i].comm);
+ }
+ }
+
+ kfree(map->entries);
+ kfree(map->map);
+ kfree(map);
+}
+
+/**
+ * tracing_map_clear - Clear a tracing_map
+ * @map: The tracing_map to clear
+ *
+ * Resets the tracing map to a cleared or initial state. The
+ * tracing_map_entries are all cleared, and the array of
+ * hist_trigger_entries are reset to an initialized state.
+ *
+ * Callers should make sure there are no writers actively inserting
+ * into the map before calling this.
+ */
+static void tracing_map_clear(struct tracing_map *map)
+{
+ unsigned int i, j, size;
+
+ atomic_set(&map->next_entry, -1);
+
+ size = map->map_size * sizeof(struct tracing_map_entry);
+ memset(map->map, 0, size);
+
+ for (i = 0; i < map->max_entries; i++) {
+ atomic64_set(&map->entries[i].hitcount, 0);
+ for (j = 0; j < HIST_VALS_MAX; j++)
+ atomic64_set(&map->entries[i].sums[j], 0);
+ }
+}
+
+/**
+ * tracing_map_create - Create a lock-free map and hist entry pool
+ * @hist_data: The hist_trigger_data to be associated with the map
+ * @map_bits: The size of the map (2 ** map_bits)
+ * @key_size: The size of the key for the map in bytes
+ * @comm: boolean, do the entries need space for a comm string?
+ *
+ * Creates and sets up a map to contain a max_size number of entries
+ * equal to a size of 2 ** map_bits. It also creates an array of
+ * (hist_trigger _entry) of the same size in order to avoid allocating
+ * anything in the map insertion path. These sizes reflect the number
+ * of entries requested by the user - internally we double that in
+ * order to keep the table sparse and keep collisions manageable.
+ *
+ * Return: the tracing_map * if successful, ERR_PTR if not.
+ */
+static struct tracing_map *
+tracing_map_create(struct hist_trigger_data *hist_data, unsigned int map_bits,
+ unsigned int key_size, bool comm)
+{
+ struct tracing_map *map;
+ unsigned int i, size;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ map->map_bits = map_bits;
+ map->max_entries = (1 << map_bits);
+ atomic_set(&map->next_entry, -1);
+
+ map->map_size = (1 << (map_bits + 1));
+ map->key_size = key_size;
+
+ size = map->map_size * sizeof(struct tracing_map_entry);
+ map->map = kzalloc(size, GFP_KERNEL);
+ if (!map->map)
+ goto free;
+
+ size = map->max_entries * sizeof(struct hist_trigger_entry);
+ map->entries = kzalloc(size, GFP_KERNEL);
+ if (!map->entries)
+ goto free;
+
+ for (i = 0; i < map->max_entries; i++) {
+ map->entries[i].hist_data = hist_data;
+ map->entries[i].key = kzalloc(key_size, GFP_KERNEL);
+ if (!map->entries[i].key)
+ goto free;
+
+ if (comm) {
+ size = TASK_COMM_LEN + 1;
+ map->entries[i].comm = kzalloc(size, GFP_KERNEL);
+ if (!map->entries[i].comm)
+ goto free;
+ }
+ }
+ tracing_map_clear(map);
+ out:
+ return map;
+ free:
+ tracing_map_destroy(map);
+ map = ERR_PTR(-ENOMEM);
+
+ goto out;
+}
+
+static void destroy_hist_data(struct hist_trigger_data *hist_data)
+{
+ destroy_hist_trigger_attrs(hist_data->attrs);
+ destroy_hist_fields(hist_data);
+ tracing_map_destroy(hist_data->map);
+ kfree(hist_data);
+}
+
+static struct hist_trigger_data *
+create_hist_data(unsigned int map_bits,
+ struct hist_trigger_attrs *attrs,
+ struct ftrace_event_file *file)
+{
+ struct hist_trigger_data *hist_data;
+ int ret = 0;
+ bool comm;
+
+ hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
+ if (!hist_data)
+ return NULL;
+
+ hist_data->attrs = attrs;
+
+ ret = create_hist_fields(hist_data, file);
+ if (ret < 0)
+ goto free;
+
+ comm = hist_data->key->flags & HIST_FIELD_EXECNAME;
+
+ hist_data->map = tracing_map_create(hist_data, map_bits,
+ get_key_size(hist_data),
+ comm);
+ if (IS_ERR(hist_data->map)) {
+ ret = PTR_ERR(hist_data->map);
+ hist_data->map = NULL;
+ goto free;
+ }
+
+ hist_data->event_file = file;
+ out:
+ return hist_data;
+ free:
+ destroy_hist_data(hist_data);
+ if (ret)
+ hist_data = ERR_PTR(ret);
+ else
+ hist_data = NULL;
+
+ goto out;
+}
+
+static void hist_trigger_entry_update(struct hist_trigger_data *hist_data,
+ struct hist_trigger_entry *entry,
+ void *rec)
+{
+ struct hist_field *hist_field;
+ unsigned int i;
+ u64 hist_val;
+
+ for (i = 0; i < hist_data->n_vals; i++) {
+ hist_field = hist_data->vals[i];
+ hist_val = hist_field->fn(hist_field, rec);
+ atomic64_add(hist_val, &entry->sums[i]);
+ }
+
+ atomic64_inc(&entry->hitcount);
+}
+
+static void event_hist_trigger(struct event_trigger_data *data, void *rec)
+{
+ struct hist_trigger_data *hist_data = data->private_data;
+ unsigned long entries[HIST_STACKTRACE_DEPTH];
+ struct hist_trigger_entry *entry;
+ struct stack_trace stacktrace;
+ u64 field_contents;
+ void *key;
+
+ if (atomic64_read(&hist_data->drops)) {
+ atomic64_inc(&hist_data->drops);
+ return;
+ }
+
+ if (hist_data->key->flags & HIST_FIELD_STACKTRACE) {
+ stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
+ stacktrace.entries = entries;
+ stacktrace.nr_entries = 0;
+ stacktrace.skip = HIST_STACKTRACE_SKIP;
+
+ memset(stacktrace.entries, 0, hist_data->map->key_size);
+ save_stack_trace(&stacktrace);
+
+ key = entries;
+ } else {
+ field_contents = hist_data->key->fn(hist_data->key, rec);
+ if (hist_data->key->flags & HIST_FIELD_STRING)
+ key = (void *)field_contents;
+ else
+ key = (void *)&field_contents;
+ }
+
+ entry = tracing_map_insert(hist_data->map, key);
+ if (entry)
+ hist_trigger_entry_update(hist_data, entry, rec);
+ else
+ atomic64_inc(&hist_data->drops);
+
+ atomic64_inc(&hist_data->total_hits);
+}
+
+static void hist_trigger_stacktrace_print(struct seq_file *m,
+ unsigned long *stacktrace_entries,
+ unsigned int max_entries)
+{
+ char str[KSYM_SYMBOL_LEN];
+ unsigned int spaces = 8;
+ unsigned int i;
+
+ for (i = 0; i < max_entries; i++) {
+ if (stacktrace_entries[i] == ULONG_MAX)
+ return;
+
+ seq_printf(m, "%*c", 1 + spaces, ' ');
+ sprint_symbol(str, stacktrace_entries[i]);
+ seq_printf(m, "%s\n", str);
+ }
+}
+
+static void hist_trigger_entry_print(struct seq_file *m,
+ struct hist_trigger_data *hist_data,
+ void *key,
+ struct hist_trigger_entry *entry)
+{
+ char str[KSYM_SYMBOL_LEN];
+ unsigned int i;
+ u64 uval;
+
+ if (entry->hist_data->key->flags & HIST_FIELD_SYM) {
+ uval = *(u64 *)key;
+ kallsyms_lookup(uval, NULL, NULL, NULL, str);
+ seq_printf(m, "%s: [%llx] %-35s", hist_data->key->field->name,
+ uval, str);
+ } else if (entry->hist_data->key->flags & HIST_FIELD_HEX) {
+ uval = *(u64 *)key;
+ seq_printf(m, "%s: %llx", hist_data->key->field->name, uval);
+ } else if (entry->hist_data->key->flags & HIST_FIELD_STACKTRACE) {
+ seq_puts(m, "stacktrace:\n");
+ hist_trigger_stacktrace_print(m, key, HIST_STACKTRACE_DEPTH);
+ } else if (entry->hist_data->key->flags & HIST_FIELD_STRING) {
+ seq_printf(m, "%s: %-35s", hist_data->key->field->name,
+ (char *)key);
+ } else if (entry->hist_data->key->flags & HIST_FIELD_EXECNAME) {
+ uval = *(u64 *)key;
+ seq_printf(m, "%s: %-16s[%10llu]", hist_data->key->field->name,
+ entry->comm, uval);
+ } else if (entry->hist_data->key->flags & HIST_FIELD_SYSCALL) {
+ const char *syscall_name;
+
+ uval = *(u64 *)key;
+ syscall_name = get_syscall_name(uval);
+ if (!syscall_name)
+ syscall_name = "unknown_syscall";
+
+ seq_printf(m, "%s: %-30s", hist_data->key->field->name,
+ syscall_name);
+ } else {
+ uval = *(u64 *)key;
+ seq_printf(m, "%s: %10llu", hist_data->key->field->name, uval);
+ }
+
+ seq_printf(m, " hitcount: %10llu",
+ (u64)atomic64_read(&entry->hitcount));
+
+ for (i = 0; i < hist_data->n_vals; i++) {
+ seq_printf(m, " %s: %10llu", hist_data->vals[i]->field->name,
+ (u64)atomic64_read(&entry->sums[i]));
+ }
+
+ seq_puts(m, "\n");
+}
+
+static int cmp_entries(const struct hist_trigger_sort_entry **a,
+ const struct hist_trigger_sort_entry **b)
+{
+ const struct hist_trigger_entry *entry_a, *entry_b;
+ struct hist_trigger_sort_key *sort_key;
+ struct hist_trigger_data *hist_data;
+ u64 val_a, val_b;
+ int ret = 0;
+
+ entry_a = (*a)->entry;
+ entry_b = (*b)->entry;
+
+ hist_data = entry_a->hist_data;
+ sort_key = hist_data->sort_key_cur;
+
+ if (sort_key->use_key) {
+ if (memcmp((*a)->key, (*b)->key, hist_data->map->key_size))
+ ret = 1;
+ return ret;
+ }
+
+ if (sort_key->use_hitcount) {
+ val_a = atomic64_read(&entry_a->hitcount);
+ val_b = atomic64_read(&entry_b->hitcount);
+ } else {
+ val_a = atomic64_read(&entry_a->sums[sort_key->idx]);
+ val_b = atomic64_read(&entry_b->sums[sort_key->idx]);
+ }
+
+ if (val_a > val_b)
+ ret = 1;
+ else if (val_a < val_b)
+ ret = -1;
+
+ if (sort_key->descending)
+ ret = -ret;
+
+ return ret;
+}
+
+static void print_entries_unsorted(struct seq_file *m,
+ struct hist_trigger_data *hist_data)
+{
+ int i;
+
+ for (i = 0; i < hist_data->map->map_size; i++) {
+ if (!hist_data->map->map[i].key)
+ continue;
+ hist_trigger_entry_print(m, hist_data,
+ hist_data->map->map[i].val->key,
+ hist_data->map->map[i].val);
+ }
+}
+
+static void destroy_sort_entry(struct hist_trigger_sort_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->entry_copied) {
+ kfree(entry->entry->key);
+ kfree(entry->entry->comm);
+ kfree(entry->entry);
+ }
+
+ kfree(entry->key);
+ kfree(entry);
+}
+
+static void destroy_sort_entries(struct hist_trigger_sort_entry **entries,
+ unsigned int entries_size)
+{
+ unsigned int i;
+
+ for (i = 0; i < entries_size; i++)
+ destroy_sort_entry(entries[i]);
+}
+
+static struct hist_trigger_sort_entry *
+create_sort_entry(void *key, struct hist_trigger_entry *entry)
+{
+ struct hist_trigger_sort_entry *sort_entry;
+
+ sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
+ if (!sort_entry)
+ return NULL;
+
+ sort_entry->key = kzalloc(entry->hist_data->map->key_size, GFP_KERNEL);
+ if (!sort_entry->key) {
+ destroy_sort_entry(sort_entry);
+ return NULL;
+ }
+ memcpy(sort_entry->key, key, entry->hist_data->map->key_size);
+ sort_entry->entry = entry;
+
+ return sort_entry;
+}
+
+static struct hist_trigger_entry *alloc_entry(struct hist_trigger_entry *entry)
+{
+ struct hist_trigger_entry *dup_entry;
+ unsigned int size;
+
+ size = sizeof(struct hist_trigger_entry);
+
+ dup_entry = kzalloc(size, GFP_KERNEL);
+ if (!dup_entry)
+ return NULL;
+
+ size = entry->hist_data->map->key_size;
+ dup_entry->key = kzalloc(size, GFP_KERNEL);
+ if (!dup_entry->key)
+ goto free;
+
+ if (entry->comm) {
+ size = TASK_COMM_LEN + 1;
+ dup_entry->comm = kzalloc(size, GFP_KERNEL);
+ if (!dup_entry->comm)
+ goto free;
+ }
+
+ return dup_entry;
+ free:
+ kfree(dup_entry->key);
+ kfree(dup_entry->comm);
+ kfree(dup_entry);
+
+ return NULL;
+}
+
+static struct hist_trigger_entry *copy_entry(struct hist_trigger_entry *entry)
+{
+ struct hist_trigger_entry *dup_entry;
+ unsigned int i;
+
+ dup_entry = alloc_entry(entry);
+ if (!dup_entry)
+ return NULL;
+
+ dup_entry->hist_data = entry->hist_data;
+
+ memcpy(dup_entry->key, entry->key, entry->hist_data->map->key_size);
+
+ if (entry->comm)
+ memcpy(dup_entry->comm, entry->comm, TASK_COMM_LEN);
+
+ atomic64_set(&dup_entry->hitcount, atomic64_read(&entry->hitcount));
+
+ for (i = 0; i < entry->hist_data->n_vals; i++)
+ atomic64_set(&dup_entry->sums[i],
+ atomic64_read(&entry->sums[i]));
+
+ return dup_entry;
+}
+
+static int merge_dup(struct hist_trigger_sort_entry **sort_entries,
+ unsigned int target, unsigned int dup)
+{
+ struct hist_trigger_entry *target_entry, *entry;
+ bool first_dup = (target - dup) == 1;
+ int i;
+
+ if (first_dup) {
+ entry = sort_entries[target]->entry;
+ target_entry = copy_entry(entry);
+ if (!target_entry)
+ return -ENOMEM;
+ sort_entries[target]->entry = target_entry;
+ sort_entries[target]->entry_copied = true;
+ } else
+ target_entry = sort_entries[target]->entry;
+
+ entry = sort_entries[dup]->entry;
+
+ atomic64_add(atomic64_read(&entry->hitcount), &target_entry->hitcount);
+
+ for (i = 0; i < entry->hist_data->n_vals; i++)
+ atomic64_add(atomic64_read(&entry->sums[i]),
+ &target_entry->sums[i]);
+
+ sort_entries[dup]->dup = true;
+
+ return 0;
+}
+
+static int merge_dups(struct hist_trigger_sort_entry **sort_entries,
+ int n_entries, struct hist_trigger_data *hist_data)
+{
+ unsigned int key_size, dups = 0, total_dups = 0;
+ int err, i, j;
+ void *key;
+
+ if (n_entries < 2)
+ return total_dups;
+
+ hist_data->sort_key_cur->use_key = true;
+ sort(sort_entries, n_entries, sizeof(struct hist_trigger_entry *),
+ (int (*)(const void *, const void *))cmp_entries, NULL);
+ hist_data->sort_key_cur->use_key = false;
+
+ key_size = hist_data->map->key_size;
+
+ key = sort_entries[0]->key;
+ for (i = 1; i < n_entries; i++) {
+ if (!memcmp(sort_entries[i]->key, key, key_size)) {
+ dups++; total_dups++;
+ err = merge_dup(sort_entries, i - dups, i);
+ if (err)
+ return err;
+ continue;
+ }
+ key = sort_entries[i]->key;
+ dups = 0;
+ }
+
+ if (!total_dups)
+ return total_dups;
+
+ for (i = 0, j = 0; i < n_entries; i++) {
+ if (!sort_entries[i]->dup) {
+ sort_entries[j] = sort_entries[i];
+ if (j++ != i)
+ sort_entries[i] = NULL;
+ } else {
+ destroy_sort_entry(sort_entries[i]);
+ sort_entries[i] = NULL;
+ }
+ }
+
+ return total_dups;
+}
+
+static int print_entries(struct seq_file *m,
+ struct hist_trigger_data *hist_data)
+{
+ struct hist_trigger_sort_entry **sort_entries;
+ struct hist_trigger_sort_entry *sort_entry;
+ struct tracing_map *map = hist_data->map;
+ int i, j, n_entries, ret;
+
+ sort_entries = kcalloc(map->max_entries, sizeof(sort_entry),
+ GFP_KERNEL);
+ if (!sort_entries)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < map->map_size; i++) {
+ if (!map->map[i].key || !map->map[i].val)
+ continue;
+
+ sort_entries[j] = create_sort_entry(map->map[i].val->key,
+ map->map[i].val);
+ if (!sort_entries[j++]) {
+ ret = -ENOMEM;
+ n_entries = j;
+ goto free;
+ }
+ }
+ n_entries = j;
+
+ hist_data->sort_key_cur = hist_data->sort_keys[0];
+
+ ret = merge_dups(sort_entries, n_entries, hist_data);
+ if (ret < 0)
+ goto free;
+ j -= ret;
+
+ sort(sort_entries, j, sizeof(struct hist_trigger_entry *),
+ (int (*)(const void *, const void *))cmp_entries, NULL);
+
+ for (i = 0; i < j; i++)
+ hist_trigger_entry_print(m, hist_data,
+ sort_entries[i]->key,
+ sort_entries[i]->entry);
+ free:
+ destroy_sort_entries(sort_entries, n_entries);
+
+ return ret;
+}
+
+static int hist_show(struct seq_file *m, void *v)
+{
+ struct event_trigger_data *test, *data = NULL;
+ struct ftrace_event_file *event_file;
+ struct hist_trigger_data *hist_data;
+ int entries, dups, ret = 0;
+
+ mutex_lock(&event_mutex);
+
+ event_file = event_file_data(m->private);
+ if (unlikely(!event_file)) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ list_for_each_entry_rcu(test, &event_file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ data = test;
+ break;
+ }
+ }
+ if (!data)
+ goto out_unlock;
+
+ seq_puts(m, "# trigger info: ");
+ data->ops->print(m, data->ops, data);
+ seq_puts(m, "\n");
+
+ hist_data = data->private_data;
+ dups = print_entries(m, hist_data);
+ if (dups < 0) {
+ print_entries_unsorted(m, hist_data);
+ dups = 0;
+ }
+
+ entries = atomic_read(&hist_data->map->next_entry) + 1;
+ if (entries > hist_data->map->max_entries)
+ entries = hist_data->map->max_entries;
+
+ seq_printf(m, "\nTotals:\n Hits: %lu\n Entries: %u\n Dropped: %lu\n",
+ atomic64_read(&hist_data->total_hits),
+ entries - dups,
+ atomic64_read(&hist_data->drops));
+ out_unlock:
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+static int event_hist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hist_show, file);
+}
+
+const struct file_operations event_hist_fops = {
+ .open = event_hist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const char *get_hist_field_flags(struct hist_field *hist_field)
+{
+ const char *flags_str = NULL;
+
+ if (hist_field->flags & HIST_FIELD_SYM)
+ flags_str = "sym";
+ else if (hist_field->flags & HIST_FIELD_HEX)
+ flags_str = "hex";
+ else if (hist_field->flags & HIST_FIELD_SYSCALL)
+ flags_str = "syscall";
+ else if (hist_field->flags & HIST_FIELD_EXECNAME)
+ flags_str = "execname";
+
+ return flags_str;
+}
+
+static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
+{
+ seq_printf(m, "%s", hist_field->field->name);
+ if (hist_field->flags) {
+ const char *flags_str = get_hist_field_flags(hist_field);
+
+ if (flags_str)
+ seq_printf(m, ".%s", flags_str);
+ }
+}
+
+static int event_hist_trigger_print(struct seq_file *m,
+ struct event_trigger_ops *ops,
+ struct event_trigger_data *data)
+{
+ struct hist_trigger_data *hist_data = data->private_data;
+ unsigned int i;
+
+ seq_puts(m, "hist:keys=");
+
+ if (hist_data->key->flags & HIST_FIELD_STACKTRACE)
+ seq_puts(m, "stacktrace");
+ else
+ hist_field_print(m, hist_data->key);
+
+ seq_puts(m, ":vals=");
+
+ for (i = 0; i < hist_data->n_vals; i++) {
+ if (i > 0)
+ seq_puts(m, ",");
+ hist_field_print(m, hist_data->vals[i]);
+ }
+
+ for (i = 0; i < HIST_SORT_KEYS_MAX; i++) {
+ if (!hist_data->sort_keys[i])
+ break;
+
+ if (i == 0)
+ seq_puts(m, ":sort=");
+ else
+ seq_puts(m, ",");
+
+ if (hist_data->sort_keys[i]->use_hitcount)
+ seq_puts(m, "hitcount");
+ else {
+ unsigned int idx = hist_data->sort_keys[i]->idx;
+
+ hist_field_print(m, hist_data->vals[idx]);
+ }
+ }
+
+ seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
+
+ if (data->filter_str)
+ seq_printf(m, " if %s", data->filter_str);
+
+ if (data->paused)
+ seq_puts(m, " [paused]");
+ else
+ seq_puts(m, " [active]");
+
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static void event_hist_trigger_free(struct event_trigger_ops *ops,
+ struct event_trigger_data *data)
+{
+ struct hist_trigger_data *hist_data = data->private_data;
+
+ if (WARN_ON_ONCE(data->ref <= 0))
+ return;
+
+ data->ref--;
+ if (!data->ref) {
+ trigger_data_free(data);
+ destroy_hist_data(hist_data);
+ }
+}
+
+static struct event_trigger_ops event_hist_trigger_ops = {
+ .func = event_hist_trigger,
+ .print = event_hist_trigger_print,
+ .init = event_trigger_init,
+ .free = event_hist_trigger_free,
+};
+
+static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
+ char *param)
+{
+ return &event_hist_trigger_ops;
+}
+
+static void hist_clear(struct event_trigger_data *data)
+{
+ struct hist_trigger_data *hist_data = data->private_data;
+ bool paused;
+
+ paused = data->paused;
+ data->paused = true;
+
+ synchronize_sched();
+
+ tracing_map_clear(hist_data->map);
+
+ atomic64_set(&hist_data->total_hits, 0);
+ atomic64_set(&hist_data->drops, 0);
+
+ data->paused = paused;
+}
+
+static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
+ struct event_trigger_data *data,
+ struct ftrace_event_file *file)
+{
+ struct hist_trigger_data *hist_data = data->private_data;
+ struct event_trigger_data *test;
+ int ret = 0;
+
+ list_for_each_entry_rcu(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (hist_data->attrs->clear)
+ hist_clear(test);
+ else if (hist_data->attrs->pause)
+ test->paused = true;
+ else if (hist_data->attrs->cont)
+ test->paused = false;
+ else
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ if (hist_data->attrs->pause)
+ data->paused = true;
+
+ if (data->ops->init) {
+ ret = data->ops->init(data->ops, data);
+ if (ret < 0)
+ goto out;
+ }
+
+ list_add_rcu(&data->list, &file->triggers);
+ ret++;
+
+ if (trace_event_trigger_enable_disable(file, 1) < 0) {
+ list_del_rcu(&data->list);
+ ret--;
+ }
+ update_cond_flag(file);
+ out:
+ return ret;
+}
+
+static int event_hist_trigger_func(struct event_command *cmd_ops,
+ struct ftrace_event_file *file,
+ char *glob, char *cmd, char *param)
+{
+ unsigned int hist_trigger_bits = HIST_TRIGGER_BITS;
+ struct event_trigger_data *trigger_data;
+ struct hist_trigger_attrs *attrs;
+ struct event_trigger_ops *trigger_ops;
+ struct hist_trigger_data *hist_data;
+ char *trigger;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ /* separate the trigger from the filter (k:v [if filter]) */
+ trigger = strsep(&param, " \t");
+ if (!trigger)
+ return -EINVAL;
+
+ attrs = parse_hist_trigger_attrs(trigger);
+ if (IS_ERR(attrs))
+ return PTR_ERR(attrs);
+
+ if (!attrs->keys_str || !attrs->vals_str)
+ return -EINVAL;
+
+ if (attrs->map_bits)
+ hist_trigger_bits = attrs->map_bits;
+
+ hist_data = create_hist_data(hist_trigger_bits, attrs, file);
+ if (IS_ERR(hist_data))
+ return PTR_ERR(hist_data);
+
+ trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
+
+ ret = -ENOMEM;
+ trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
+ if (!trigger_data)
+ goto out;
+
+ trigger_data->count = -1;
+ trigger_data->ops = trigger_ops;
+ trigger_data->cmd_ops = cmd_ops;
+
+ INIT_LIST_HEAD(&trigger_data->list);
+ RCU_INIT_POINTER(trigger_data->filter, NULL);
+
+ trigger_data->private_data = hist_data;
+
+ if (glob[0] == '!') {
+ cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+ ret = 0;
+ goto out_free;
+ }
+
+ if (!param) /* if param is non-empty, it's supposed to be a filter */
+ goto out_reg;
+
+ if (!cmd_ops->set_filter)
+ goto out_reg;
+
+ ret = cmd_ops->set_filter(param, trigger_data, file);
+ if (ret < 0)
+ goto out_free;
+ out_reg:
+ ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
+ /*
+ * The above returns on success the # of triggers registered,
+ * but if it didn't register any it returns zero. Consider no
+ * triggers registered a failure too.
+ */
+ if (!ret) {
+ if (!(attrs->pause || attrs->cont || attrs->clear))
+ ret = -ENOENT;
+ goto out_free;
+ } else if (ret < 0)
+ goto out_free;
+ /* Just return zero, not the number of registered triggers */
+ ret = 0;
+ out:
+ return ret;
+ out_free:
+ if (cmd_ops->set_filter)
+ cmd_ops->set_filter(NULL, trigger_data, NULL);
+ kfree(trigger_data);
+ destroy_hist_data(hist_data);
+
+ goto out;
+}
+
+static struct event_command trigger_hist_cmd = {
+ .name = "hist",
+ .trigger_type = ETT_EVENT_HIST,
+ .post_trigger = true, /* need non-NULL rec */
+ .func = event_hist_trigger_func,
+ .reg = hist_register_trigger,
+ .unreg = unregister_trigger,
+ .get_trigger_ops = event_hist_get_trigger_ops,
+ .set_filter = set_trigger_filter,
+};
+
+__init int register_trigger_hist_cmd(void)
+{
+ int ret;
+
+ ret = register_event_command(&trigger_hist_cmd);
+ WARN_ON(ret < 0);
+
+ return ret;
+}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 010ce30..a09eba3 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -28,8 +28,7 @@
static LIST_HEAD(trigger_commands);
static DEFINE_MUTEX(trigger_cmd_mutex);

-static void
-trigger_data_free(struct event_trigger_data *data)
+void trigger_data_free(struct event_trigger_data *data)
{
if (data->cmd_ops->set_filter)
data->cmd_ops->set_filter(NULL, data, NULL);
@@ -316,7 +315,7 @@ const struct file_operations event_trigger_fops = {
* Currently we only register event commands from __init, so mark this
* __init too.
*/
-static __init int register_event_command(struct event_command *cmd)
+__init int register_event_command(struct event_command *cmd)
{
struct event_command *p;
int ret = 0;
@@ -405,9 +404,8 @@ event_trigger_print(const char *name, struct seq_file *m,
*
* Return: 0 on success, errno otherwise
*/
-static int
-event_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+int event_trigger_init(struct event_trigger_ops *ops,
+ struct event_trigger_data *data)
{
data->ref++;
return 0;
@@ -435,8 +433,8 @@ event_trigger_free(struct event_trigger_ops *ops,
trigger_data_free(data);
}

-static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
- int trigger_enable)
+int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
+ int trigger_enable)
{
int ret = 0;

@@ -493,7 +491,7 @@ clear_event_triggers(struct trace_array *tr)
* its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
* cleared.
*/
-static void update_cond_flag(struct ftrace_event_file *file)
+void update_cond_flag(struct ftrace_event_file *file)
{
struct event_trigger_data *data;
bool set_cond = false;
@@ -569,9 +567,9 @@ out:
* Usually used directly as the @unreg method in event command
* implementations.
*/
-static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
- struct event_trigger_data *test,
- struct ftrace_event_file *file)
+void unregister_trigger(char *glob, struct event_trigger_ops *ops,
+ struct event_trigger_data *test,
+ struct ftrace_event_file *file)
{
struct event_trigger_data *data;
bool unregistered = false;
@@ -705,9 +703,9 @@ event_trigger_callback(struct event_command *cmd_ops,
*
* Return: 0 on success, errno otherwise
*/
-static int set_trigger_filter(char *filter_str,
- struct event_trigger_data *trigger_data,
- struct ftrace_event_file *file)
+int set_trigger_filter(char *filter_str,
+ struct event_trigger_data *trigger_data,
+ struct ftrace_event_file *file)
{
struct event_trigger_data *data = trigger_data;
struct event_filter *filter = NULL, *tmp;
@@ -1437,6 +1435,7 @@ __init int register_trigger_cmds(void)
register_trigger_snapshot_cmd();
register_trigger_stacktrace_cmd();
register_trigger_enable_disable_cmds();
+ register_trigger_hist_cmd();

return 0;
}
--
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/