Re: [RFC 0/3] Revert SRCU from tracepoint infrastructure
From: Mathieu Desnoyers
Date: Mon Feb 10 2020 - 14:05:23 EST
----- On Feb 10, 2020, at 1:30 PM, rostedt rostedt@xxxxxxxxxxx wrote:
> On Mon, 10 Feb 2020 12:33:04 -0500 (EST)
> Mathieu Desnoyers <mathieu.desnoyers@xxxxxxxxxxxx> wrote:
>
>> The rcu_irq_enter/exit_irqson() does atomic_add_return(), which is even worse
>> than a memory barrier.
>
> As we discussed on IRC, would something like this work (not even
> compiled tested).
Yes, it's very close to what I have prototyped locally. With one very minor
detail below:
>
> -- Steve
>
> diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
> index 1fb11daa5c53..a83fd076a312 100644
> --- a/include/linux/tracepoint.h
> +++ b/include/linux/tracepoint.h
> @@ -179,10 +179,8 @@ static inline struct tracepoint
> *tracepoint_ptr_deref(tracepoint_ptr_t *p)
> * For rcuidle callers, use srcu since sched-rcu \
> * doesn't work from the idle path. \
> */ \
> - if (rcuidle) { \
> + if (rcuidle) \
> __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
> - rcu_irq_enter_irqson(); \
> - } \
> \
> it_func_ptr = rcu_dereference_raw((tp)->funcs); \
> \
> @@ -194,10 +192,8 @@ static inline struct tracepoint
> *tracepoint_ptr_deref(tracepoint_ptr_t *p)
> } while ((++it_func_ptr)->func); \
> } \
> \
> - if (rcuidle) { \
> - rcu_irq_exit_irqson(); \
> + if (rcuidle) \
> srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
> - } \
> \
> preempt_enable_notrace(); \
> } while (0)
> diff --git a/include/trace/perf.h b/include/trace/perf.h
> index dbc6c74defc3..86d3b2eb00cd 100644
> --- a/include/trace/perf.h
> +++ b/include/trace/perf.h
> @@ -39,17 +39,27 @@ perf_trace_##call(void *__data, proto) \
> u64 __count = 1; \
> struct task_struct *__task = NULL; \
> struct hlist_head *head; \
> + bool rcu_watching; \
> int __entry_size; \
> int __data_size; \
> int rctx; \
> \
> + rcu_watching = rcu_is_watching(); \
> + \
> + /* Can not use RCU if rcu is not watching and in NMI */ \
> + if (!rcu_watching && in_nmi()) \
> + return; \
> + \
> __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
> \
> + if (!rcu_watching) \
> + rcu_irq_enter_irqson(); \
You might want to fold the line above into the first check like this,
considering that doing the rcu_irq_enter_irqson() earlier should not
matter, and I expect it to remove a branch from the probe:
rcu_watching = rcu_is_watching();
if (!rcu_watching) {
if (in_nmi())
return;
rcu_irq_enter_irqson();
}
Thanks!
Mathieu
> + \
> head = this_cpu_ptr(event_call->perf_events); \
> if (!bpf_prog_array_valid(event_call) && \
> __builtin_constant_p(!__task) && !__task && \
> hlist_empty(head)) \
> - return; \
> + goto out; \
> \
> __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
> sizeof(u64)); \
> @@ -57,7 +67,7 @@ perf_trace_##call(void *__data, proto) \
> \
> entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
> if (!entry) \
> - return; \
> + goto out; \
> \
> perf_fetch_caller_regs(__regs); \
> \
> @@ -68,6 +78,9 @@ perf_trace_##call(void *__data, proto) \
> perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
> event_call, __count, __regs, \
> head, __task); \
> +out: \
> + if (!rcu_watching) \
> + rcu_irq_exit_irqson(); \
> }
>
> /*
--
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com