[RFC][PATCH] tracing/jump-labels: Add message when a trace event has no instances

From: Steven Rostedt
Date: Tue Oct 10 2017 - 18:56:38 EST


From: Steven Rostedt (VMware) <rostedt@xxxxxxxxxxx>

The TRACE_EVENT() macro creates a few data structures and several
helper functions. This takes up some memory. It dawned on me that if a
trace event is created but never used, it's like a header file that is
included without any reason. Everything just works and there's nothing
to tell us otherwise. But unlike an unneeded header file, a TRACE_EVENT
without any usage wastes kernel memory. As trace events are being
added and removed, there are leftovers, and we need a way to detect
this.

As trace events use jump labels to enable them, it is possible to tweak
the jump label code to return a number of instances there are for a
specific key. The tracepoint code makes the key, and the use cases
create the jump label instances. If any tracepoint has no jump label
instances, then we know it is not being used.

By checking the number of jump label instances a tracepoint key has,
we are able to detect when none exist. And printing out a message when
the tracepoints are registered shows it:

[ 0.000000] trace_rcu_prep_idle has no instances
[ 0.000000] trace_rcu_nocb_wake has no instances
[ 0.000000] trace_ftrace_test_filter has no instances
[ 0.000000] trace_power_domain_target has no instances
[ 0.000000] trace_powernv_throttle has no instances
[ 0.000000] trace_xdp_redirect_map has no instances
[ 0.000000] trace_bpf_map_next_key has no instances
[ 0.000000] trace_bpf_map_delete_elem has no instances
[ 0.000000] trace_bpf_map_update_elem has no instances
[ 0.000000] trace_bpf_map_lookup_elem has no instances
[ 0.000000] trace_bpf_obj_get_map has no instances
[ 0.000000] trace_bpf_obj_pin_map has no instances
[ 0.000000] trace_bpf_obj_get_prog has no instances
[ 0.000000] trace_bpf_obj_pin_prog has no instances
[ 0.000000] trace_bpf_map_create has no instances
[ 0.000000] trace_bpf_prog_load has no instances
[ 0.000000] trace_bpf_prog_put_rcu has no instances
[ 0.000000] trace_bpf_prog_get_type has no instances
[ 0.000000] trace_mm_vmscan_memcg_softlimit_reclaim_end has no instances
[ 0.000000] trace_mm_vmscan_memcg_reclaim_end has no instances
[ 0.000000] trace_mm_vmscan_memcg_softlimit_reclaim_begin has no instances
[ 0.000000] trace_mm_vmscan_memcg_reclaim_begin has no instances
[ 0.000000] trace_ext4_find_delalloc_range has no instances
[ 0.000000] trace_ext4_ext_in_cache has no instances
[ 0.000000] trace_ext4_ext_put_in_cache has no instances
[ 0.000000] trace_mei_pci_cfg_write has no instances
[ 0.000000] trace_dma_fence_emit has no instances
[ 0.000000] trace_dma_fence_annotate_wait_on has no instances
[ 0.000000] trace_thermal_power_devfreq_limit has no instances
[ 0.000000] trace_thermal_power_devfreq_get_power has no instances
[ 0.000000] trace_thermal_power_cpu_limit has no instances
[ 0.000000] trace_thermal_power_cpu_get_power has no instances

This is after I cleaned some of them up. Some trace events I found
simply had their use case removed. Some were never created. Others just
need to have a #ifdef around their TRACE_EVENT so they don't waste
space when not being used.

Signed-off-by: Steven Rostedt (VMware) <rostedt@xxxxxxxxxxx>
---
Index: linux-trace.git/include/linux/jump_label.h
===================================================================
--- linux-trace.git.orig/include/linux/jump_label.h
+++ linux-trace.git/include/linux/jump_label.h
@@ -165,6 +165,7 @@ extern void static_key_enable(struct sta
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
+extern int static_key_instances(struct static_key *key);

/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -256,6 +257,12 @@ static inline void static_key_disable(st
atomic_set(&key->enabled, 0);
}

+/* Without jump labels we don't know how many instances, just assume one */
+static inline int static_key_instances(struct static_key *key)
+{
+ return 1;
+}
+
#define static_key_enable_cpuslocked(k) static_key_enable((k))
#define static_key_disable_cpuslocked(k) static_key_disable((k))

Index: linux-trace.git/kernel/jump_label.c
===================================================================
--- linux-trace.git.orig/kernel/jump_label.c
+++ linux-trace.git/kernel/jump_label.c
@@ -56,7 +56,7 @@ jump_label_sort_entries(struct jump_entr
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}

-static void jump_label_update(struct static_key *key);
+static int jump_label_update(struct static_key *key, bool count_only);

/*
* There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
@@ -106,7 +106,7 @@ static void static_key_slow_inc_cpuslock
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
- jump_label_update(key);
+ jump_label_update(key, false);
/*
* Ensure that if the above cmpxchg loop observes our positive
* value, it must also observe all the text changes.
@@ -138,7 +138,7 @@ void static_key_enable_cpuslocked(struct
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
- jump_label_update(key);
+ jump_label_update(key, false);
/*
* See static_key_slow_inc().
*/
@@ -167,7 +167,7 @@ void static_key_disable_cpuslocked(struc

jump_label_lock();
if (atomic_cmpxchg(&key->enabled, 1, 0))
- jump_label_update(key);
+ jump_label_update(key, false);
jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
@@ -201,7 +201,7 @@ static void static_key_slow_dec_cpuslock
atomic_inc(&key->enabled);
schedule_delayed_work(work, rate_limit);
} else {
- jump_label_update(key);
+ jump_label_update(key, false);
}
jump_label_unlock();
}
@@ -354,19 +354,26 @@ static enum jump_label_type jump_label_t
return enabled ^ branch;
}

-static void __jump_label_update(struct static_key *key,
+static int __jump_label_update(struct static_key *key,
struct jump_entry *entry,
- struct jump_entry *stop)
+ struct jump_entry *stop,
+ bool count_only)
{
+ int cnt = 0;
+
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
/*
* entry->code set to 0 invalidates module init text sections
* kernel_text_address() verifies we are not in core kernel
* init code, see jump_label_invalidate_module_init().
*/
- if (entry->code && kernel_text_address(entry->code))
- arch_jump_label_transform(entry, jump_label_type(entry));
+ if (entry->code && kernel_text_address(entry->code)) {
+ if (!count_only)
+ arch_jump_label_transform(entry, jump_label_type(entry));
+ cnt++;
+ }
}
+ return cnt;
}

void __init jump_label_init(void)
@@ -470,9 +477,10 @@ static int __jump_label_mod_text_reserve
start, end);
}

-static void __jump_label_mod_update(struct static_key *key)
+static int __jump_label_mod_update(struct static_key *key, bool count_only)
{
struct static_key_mod *mod;
+ int cnt = 0;

for (mod = static_key_mod(key); mod; mod = mod->next) {
struct jump_entry *stop;
@@ -490,8 +498,9 @@ static void __jump_label_mod_update(stru
stop = __stop___jump_table;
else
stop = m->jump_entries + m->num_jump_entries;
- __jump_label_update(key, mod->entries, stop);
+ cnt += __jump_label_update(key, mod->entries, stop, count_only);
}
+ return cnt;
}

/***
@@ -571,7 +580,7 @@ static int jump_label_add_module(struct

/* Only update if we've changed from our initial state */
if (jump_label_type(iter) != jump_label_init_type(iter))
- __jump_label_update(key, iter, iter_stop);
+ __jump_label_update(key, iter, iter_stop, false);
}

return 0;
@@ -711,17 +720,15 @@ int jump_label_text_reserved(void *start
return ret;
}

-static void jump_label_update(struct static_key *key)
+static int jump_label_update(struct static_key *key, bool count_only)
{
struct jump_entry *stop = __stop___jump_table;
struct jump_entry *entry;
#ifdef CONFIG_MODULES
struct module *mod;

- if (static_key_linked(key)) {
- __jump_label_mod_update(key);
- return;
- }
+ if (static_key_linked(key))
+ return __jump_label_mod_update(key, count_only);

preempt_disable();
mod = __module_address((unsigned long)key);
@@ -732,7 +739,13 @@ static void jump_label_update(struct sta
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */
if (entry)
- __jump_label_update(key, entry, stop);
+ return __jump_label_update(key, entry, stop, count_only);
+ return 0;
+}
+
+int static_key_instances(struct static_key *key)
+{
+ return jump_label_update(key, true);
}

#ifdef CONFIG_STATIC_KEYS_SELFTEST
Index: linux-trace.git/kernel/trace/trace_events.c
===================================================================
--- linux-trace.git.orig/kernel/trace/trace_events.c
+++ linux-trace.git/kernel/trace/trace_events.c
@@ -2093,6 +2093,10 @@ static int event_init(struct trace_event
pr_warn("Could not initialize trace events/%s\n", name);
}

+ if (call->class->reg == trace_event_reg &&
+ static_key_instances(&call->tp->key) == 0)
+ pr_info("trace_%s has no instances\n", name);
+
return ret;
}