Re: [PATCH v5 2/3] tracing: Add trace_total_entries() / trace_total_entries_cpu()

From: Daniel Thompson
Date: Tue Mar 19 2019 - 07:25:44 EST


On Mon, Mar 18, 2019 at 01:47:40PM -0700, Douglas Anderson wrote:
> These two new exported functions will be used in a future patch by
> kdb_ftdump() to quickly skip all but the last few trace entries.
>
> Suggested-by: Steven Rostedt <rostedt@xxxxxxxxxxx>
> Signed-off-by: Douglas Anderson <dianders@xxxxxxxxxxxx>
> ---
>
> Changes in v5: None
> Changes in v4:
> - trace_total_entries() / trace_total_entries_cpu() new for v4
>
> Changes in v3: None
>
> kernel/trace/trace.c | 65 ++++++++++++++++++++++++++++++++++----------
> kernel/trace/trace.h | 3 ++
> 2 files changed, 53 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index ccd759eaad79..7afc90f82e53 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -3490,34 +3490,69 @@ static void s_stop(struct seq_file *m, void *p)
> trace_event_read_unlock();
> }
>
> +static void
> +get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
> + unsigned long *entries, int cpu)
> +{
> + unsigned long count;
> +
> + count = ring_buffer_entries_cpu(buf->buffer, cpu);
> + /*
> + * If this buffer has skipped entries, then we hold all
> + * entries for the trace and we need to ignore the
> + * ones before the time stamp.
> + */
> + if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
> + count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
> + /* total is the same as the entries */
> + *total = count;
> + } else
> + *total = count +
> + ring_buffer_overrun_cpu(buf->buffer, cpu);
> + *entries = count;
> +}
> +
> static void
> get_total_entries(struct trace_buffer *buf,
> unsigned long *total, unsigned long *entries)
> {
> - unsigned long count;
> + unsigned long t, e;
> int cpu;
>
> *total = 0;
> *entries = 0;
>
> for_each_tracing_cpu(cpu) {
> - count = ring_buffer_entries_cpu(buf->buffer, cpu);
> - /*
> - * If this buffer has skipped entries, then we hold all
> - * entries for the trace and we need to ignore the
> - * ones before the time stamp.
> - */
> - if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
> - count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
> - /* total is the same as the entries */
> - *total += count;
> - } else
> - *total += count +
> - ring_buffer_overrun_cpu(buf->buffer, cpu);
> - *entries += count;
> + get_total_entries_cpu(buf, &t, &e, cpu);
> + *total += t;
> + *entries += e;
> }
> }
>
> +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
> +{
> + unsigned long total, entries;
> +
> + if (!tr)
> + tr = &global_trace;

This function is only ever called with tr set to NULL which means tr is
an argument looking for a user.

I wouldn't mind except if this was following copying prior art to keep
the API feel the same but I can't find any other trace function where
the trace_array can be substituted for NULL. AFAICT all the existing
sites where global_trace is used will use it unconditionally.


> +
> + get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
> +
> + return entries;
> +}
> +
> +unsigned long trace_total_entries(struct trace_array *tr)
> +{
> + unsigned long total, entries;
> +
> + if (!tr)
> + tr = &global_trace;

Ditto.


Daniel.


> +
> + get_total_entries(&tr->trace_buffer, &total, &entries);
> +
> + return entries;
> +}
> +
> static void print_lat_help_header(struct seq_file *m)
> {
> seq_puts(m, "# _------=> CPU# \n"
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index d80cee49e0eb..4562b30eda9d 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -719,6 +719,9 @@ void trace_init_global_iter(struct trace_iterator *iter);
>
> void tracing_iter_reset(struct trace_iterator *iter, int cpu);
>
> +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
> +unsigned long trace_total_entries(struct trace_array *tr);
> +
> void trace_function(struct trace_array *tr,
> unsigned long ip,
> unsigned long parent_ip,
> --
> 2.21.0.225.g810b269d1ac-goog
>