[PATCH] pstore/ftrace: Convert to its own enable/disable debugfs knob
From: Anton Vorontsov
Date: Tue Jul 17 2012 - 23:49:25 EST
With this patch we no longer reuse function tracer infrastructure, now
we register our own tracer back-end via a debugfs knob.
It's a bit more code, but that is the only downside. On the bright side we
have:
- Ability to make persistent_ram module removable (when needed, we can
move ftrace_ops struct into a module). Note that persistent_ram is still
not removable for other reasons, but with this patch it's just one
thing less to worry about;
- Pstore part is more isolated from the generic function tracer. We tried
it already by registering our own tracer in available_tracers, but that
way we're loosing ability to see the traces while we record them to
pstore. This solution is somewhere in the middle: we only register
"internal ftracer" back-end, but not the "front-end";
- When only pstore tracing enabled, the kernel will only write to the
pstore buffer, omitting function tracer buffer (which, of course, still
can be enabled via 'echo function > current_tracer').
Suggested-by: Steven Rostedt <rostedt@xxxxxxxxxxx>
Signed-off-by: Anton Vorontsov <anton.vorontsov@xxxxxxxxxx>
---
On Tue, Jul 17, 2012 at 05:38:22PM -0400, Steven Rostedt wrote:
[...]
> > > Note, it's safe to use if the trace_ops (or anything the callback calls)
> > > is a module. That's because it detects the trace_ops is not kernel core
> > > code and will place a wrapper around it that allows the function tracing
> > > to by synced with module unload. You still need to unregister the
> > > trace_ops before unloading the module, or you can have a crash that way.
> >
> > Hehe. Like this? http://lkml.org/lkml/2012/5/26/80 :-D
> >
> > So, do you want something like this, but combinded: we don't register
> > another tracer, but register our own ftrace_ops? This sounds doable.
>
> Yeah, the combined. That is, don't make a tracer out of it. Use another
> mechanism to enable the tracing and not as a tracer
> in /debug/tracing/available_tracers. We can probably set up events for
> you too, but at a later time.
OK, here it is. How does it look?
Documentation/ramoops.txt | 4 +-
fs/pstore/Kconfig | 1 +
fs/pstore/ftrace.c | 94 +++++++++++++++++++++++++++++++++++++++-
fs/pstore/internal.h | 6 +++
fs/pstore/platform.c | 1 +
include/linux/pstore.h | 8 ----
kernel/trace/trace_functions.c | 15 +------
7 files changed, 103 insertions(+), 26 deletions(-)
diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
index 197ad59..69b3cac 100644
--- a/Documentation/ramoops.txt
+++ b/Documentation/ramoops.txt
@@ -102,9 +102,7 @@ related hangs. The functions call chain log is stored in a "ftrace-ramoops"
file. Here is an example of usage:
# mount -t debugfs debugfs /sys/kernel/debug/
- # cd /sys/kernel/debug/tracing
- # echo function > current_tracer
- # echo 1 > options/func_pstore
+ # echo 1 > /sys/kernel/debug/pstore/record_ftrace
# reboot -f
[...]
# mount -t pstore pstore /mnt/
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index d39bb5c..ca71db6 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -23,6 +23,7 @@ config PSTORE_FTRACE
bool "Persistent function tracer"
depends on PSTORE
depends on FUNCTION_TRACER
+ depends on DEBUG_FS
help
With this option kernel traces function calls into a persistent
ram buffer that can be decoded and dumped after reboot through
diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
index a130d48..824be81 100644
--- a/fs/pstore/ftrace.c
+++ b/fs/pstore/ftrace.c
@@ -17,19 +17,111 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
#include <asm/barrier.h>
#include "internal.h"
-void notrace pstore_ftrace_call(unsigned long ip, unsigned long parent_ip)
+static void notrace pstore_ftrace_call(unsigned long ip,
+ unsigned long parent_ip)
{
+ unsigned long flags;
struct pstore_ftrace_record rec = {};
+ if (unlikely(!ftrace_enabled))
+ return;
if (unlikely(oops_in_progress))
return;
+ local_irq_save(flags);
+
rec.ip = ip;
rec.parent_ip = parent_ip;
pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
sizeof(rec), psinfo);
+
+ local_irq_restore(flags);
+}
+
+static struct ftrace_ops pstore_ftrace_ops __read_mostly = {
+ .func = pstore_ftrace_call,
+};
+
+static DEFINE_MUTEX(pstore_ftrace_lock);
+static bool pstore_ftrace_enabled;
+
+static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 on;
+ int err;
+
+ err = kstrtou8_from_user(buf, count, 2, &on);
+ if (err)
+ return err;
+
+ mutex_lock(&pstore_ftrace_lock);
+
+ if (!on ^ pstore_ftrace_enabled)
+ goto out;
+ pstore_ftrace_enabled = on;
+
+ if (on)
+ register_ftrace_function(&pstore_ftrace_ops);
+ else
+ unregister_ftrace_function(&pstore_ftrace_ops);
+out:
+ mutex_unlock(&pstore_ftrace_lock);
+
+ return count;
+}
+
+static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char val[] = { '0' + pstore_ftrace_enabled, '\n' };
+
+ return simple_read_from_buffer(buf, count, ppos, val, sizeof(val));
+}
+
+static const struct file_operations pstore_knob_fops = {
+ .open = simple_open,
+ .read = pstore_ftrace_knob_read,
+ .write = pstore_ftrace_knob_write,
+};
+
+void pstore_register_ftrace(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+ int err;
+
+ if (!psinfo->write_buf)
+ return;
+
+ dir = debugfs_create_dir("pstore", NULL);
+ if (IS_ERR_OR_NULL(dir)) {
+ err = PTR_ERR(dir);
+ pr_err("%s: unable to create pstore debugfs: %d\n",
+ __func__, err);
+ return;
+ }
+
+ file = debugfs_create_file("record_ftrace", 0600, dir, NULL,
+ &pstore_knob_fops);
+ if (IS_ERR_OR_NULL(file)) {
+ err = PTR_ERR(file);
+ pr_err("%s: unable to create pstore/ftrace file: %d\n",
+ __func__, err);
+ goto err_file;
+ }
+
+ return;
+err_file:
+ debugfs_remove(dir);
}
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 0d0d3b7..4847f58 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -39,6 +39,12 @@ pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
#endif
}
+#ifdef CONFIG_PSTORE_FTRACE
+extern void pstore_register_ftrace(void);
+#else
+static inline void pstore_register_ftrace(void) {}
+#endif
+
extern struct pstore_info *psinfo;
extern void pstore_set_kmsg_bytes(int);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 29996e8..6c23eab 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -236,6 +236,7 @@ int pstore_register(struct pstore_info *psi)
kmsg_dump_register(&pstore_dumper);
pstore_register_console();
+ pstore_register_ftrace();
if (pstore_update_ms >= 0) {
pstore_timer.expires = jiffies +
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index c892587..ee3034a 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -64,14 +64,6 @@ struct pstore_info {
void *data;
};
-
-#ifdef CONFIG_PSTORE_FTRACE
-extern void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip);
-#else
-static inline void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip)
-{ }
-#endif
-
#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
#else
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index a426f41..0ad83e3 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,7 +13,6 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
-#include <linux/pstore.h>
#include <linux/fs.h>
#include "trace.h"
@@ -75,10 +74,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
preempt_enable_notrace();
}
-/* Our two options */
+/* Our option */
enum {
TRACE_FUNC_OPT_STACK = 0x1,
- TRACE_FUNC_OPT_PSTORE = 0x2,
};
static struct tracer_flags func_flags;
@@ -106,12 +104,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
- /*
- * So far tracing doesn't support multiple buffers, so
- * we make an explicit call for now.
- */
- if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
- pstore_ftrace_call(ip, parent_ip);
pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc);
}
@@ -177,9 +169,6 @@ static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
-#ifdef CONFIG_PSTORE_FTRACE
- { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
-#endif
{ } /* Always set a last empty entry */
};
@@ -232,8 +221,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
}
break;
- case TRACE_FUNC_OPT_PSTORE:
- break;
default:
return -EINVAL;
}
--
1.7.10.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/