[RFC PATCH 29/30 v3] make varaible size buffers for traces
From: Steven Rostedt
Date: Tue Jan 15 2008 - 15:53:14 EST
Each trace now can have the size of its trace entries modified
through command line arguments.
trace_fn_entries - function trace entries (default 65536)
trace_irq_entries - irq off trace entries (default 512)
trace_ctx_entries - schedule switch entries (default 16384)
Signed-off-by: Steven Rostedt <srostedt@xxxxxxxxxx>
---
lib/tracing/trace_function.c | 14 ++++++++++++--
lib/tracing/trace_irqsoff.c | 14 ++++++++++++--
lib/tracing/trace_sched_switch.c | 14 ++++++++++++--
lib/tracing/tracer.h | 1 -
4 files changed, 36 insertions(+), 7 deletions(-)
Index: linux-compile.git/lib/tracing/trace_function.c
===================================================================
--- linux-compile.git.orig/lib/tracing/trace_function.c 2008-01-15 15:13:59.000000000 -0500
+++ linux-compile.git/lib/tracing/trace_function.c 2008-01-15 15:15:00.000000000 -0500
@@ -18,6 +18,16 @@
static struct tracing_trace function_trace __read_mostly;
static DEFINE_PER_CPU(struct tracing_trace_cpu, function_trace_cpu);
static int trace_enabled __read_mostly;
+static unsigned long trace_nr_entries = (65536UL);
+
+static int __init set_nr_entries(char *str)
+{
+ if (!str)
+ return 0;
+ trace_nr_entries = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("trace_fn_entries=", set_nr_entries);
static notrace void function_trace_reset(struct tracing_trace *tr)
{
@@ -132,7 +142,7 @@ static void function_trace_close(struct
__init static int function_trace_alloc_buffers(void)
{
- const int order = page_order(TRACING_NR_ENTRIES * TRACING_ENTRY_SIZE);
+ const int order = page_order(trace_nr_entries * TRACING_ENTRY_SIZE);
const unsigned long size = (1UL << order) << PAGE_SHIFT;
struct tracing_entry *array;
int i;
@@ -156,7 +166,7 @@ __init static int function_trace_alloc_b
function_trace.entries = size / TRACING_ENTRY_SIZE;
pr_info("function tracer: %ld bytes allocated for %ld",
- size, TRACING_NR_ENTRIES);
+ size, trace_nr_entries);
pr_info(" entries of %ld bytes\n", (long)TRACING_ENTRY_SIZE);
pr_info(" actual entries %ld\n", function_trace.entries);
Index: linux-compile.git/lib/tracing/trace_irqsoff.c
===================================================================
--- linux-compile.git.orig/lib/tracing/trace_irqsoff.c 2008-01-15 15:13:59.000000000 -0500
+++ linux-compile.git/lib/tracing/trace_irqsoff.c 2008-01-15 15:15:00.000000000 -0500
@@ -25,6 +25,16 @@ static unsigned long preempt_max_latency
static unsigned long preempt_thresh;
static __cacheline_aligned_in_smp DEFINE_MUTEX(max_mutex);
static int trace_enabled __read_mostly;
+static unsigned long trace_nr_entries = (512UL);
+
+static int __init set_nr_entries(char *str)
+{
+ if (!str)
+ return 0;
+ trace_nr_entries = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("trace_irq_entries=", set_nr_entries);
/*
* max trace is switched with this buffer.
@@ -497,7 +507,7 @@ static void notrace irqsoff_trace_close(
__init static int trace_irqsoff_alloc_buffers(void)
{
- const int order = page_order(TRACING_NR_ENTRIES * TRACING_ENTRY_SIZE);
+ const int order = page_order(trace_nr_entries * TRACING_ENTRY_SIZE);
const unsigned long size = (1UL << order) << PAGE_SHIFT;
struct tracing_entry *array;
int i;
@@ -535,7 +545,7 @@ __init static int trace_irqsoff_alloc_bu
max_tr.stop = irqsoff_stop;
pr_info("irqs off tracer: %ld bytes allocated for %ld",
- size, TRACING_NR_ENTRIES);
+ size, trace_nr_entries);
pr_info(" entries of %d bytes\n", (int)TRACING_ENTRY_SIZE);
pr_info(" actual entries %ld\n", irqsoff_trace.entries);
Index: linux-compile.git/lib/tracing/tracer.h
===================================================================
--- linux-compile.git.orig/lib/tracing/tracer.h 2008-01-15 15:13:59.000000000 -0500
+++ linux-compile.git/lib/tracing/tracer.h 2008-01-15 15:15:00.000000000 -0500
@@ -76,7 +76,6 @@ struct tracing_iterator {
};
#define TRACING_ENTRY_SIZE sizeof(struct tracing_entry)
-#define TRACING_NR_ENTRIES (65536UL)
void notrace tracing_reset(struct tracing_trace_cpu *data);
int tracing_open_generic(struct inode *inode, struct file *filp);
Index: linux-compile.git/lib/tracing/trace_sched_switch.c
===================================================================
--- linux-compile.git.orig/lib/tracing/trace_sched_switch.c 2008-01-15 15:13:59.000000000 -0500
+++ linux-compile.git/lib/tracing/trace_sched_switch.c 2008-01-15 15:15:00.000000000 -0500
@@ -18,6 +18,16 @@ static struct tracing_trace sched_switch
static DEFINE_PER_CPU(struct tracing_trace_cpu, sched_switch_trace_cpu);
static int trace_enabled __read_mostly;
+static unsigned long trace_nr_entries = (16384UL);
+
+static int __init set_nr_entries(char *str)
+{
+ if (!str)
+ return 0;
+ trace_nr_entries = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("trace_ctx_entries=", set_nr_entries);
static notrace void sched_switch_callback(const struct marker *mdata,
void *private_data,
@@ -144,7 +154,7 @@ static void sched_switch_trace_close(str
__init static int sched_switch_trace_alloc_buffers(void)
{
- const int order = page_order(TRACING_NR_ENTRIES * TRACING_ENTRY_SIZE);
+ const int order = page_order(trace_nr_entries * TRACING_ENTRY_SIZE);
const unsigned long size = (1UL << order) << PAGE_SHIFT;
struct tracing_entry *array;
int ret;
@@ -170,7 +180,7 @@ __init static int sched_switch_trace_all
sched_switch_trace.entries = size / TRACING_ENTRY_SIZE;
pr_info("sched_switch tracer: %ld bytes allocated for %ld",
- size, TRACING_NR_ENTRIES);
+ size, trace_nr_entries);
pr_info(" entries of %ld bytes\n", (long)TRACING_ENTRY_SIZE);
pr_info(" actual entries %ld\n", sched_switch_trace.entries);
--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/