Re: [PATCH v3] perf/core: Fix reentry problem in perf_output_read_group
From: Peter Zijlstra
Date: Tue Aug 16 2022 - 11:31:58 EST
On Tue, Aug 16, 2022 at 03:54:19PM +0100, Mark Rutland wrote:
> > diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> > index ee8b9ecdc03b..d4d53b9ba71e 100644
> > --- a/include/linux/perf_event.h
> > +++ b/include/linux/perf_event.h
> > @@ -631,7 +631,12 @@ struct pmu_event_list {
> > struct list_head list;
> > };
> >
> > +/*
> > + * Iterating the sibling list requires this list to be stable; by ensuring IRQs
> > + * are disabled IPIs from perf_{install_in,remove_from}_context() are held off.
> > + */
> > #define for_each_sibling_event(sibling, event) \
> > + lockdep_assert_irqs_disabled(); \
> > if ((event)->group_leader == (event)) \
> > list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
> >
>
> I had a go with v6.0-rc1 and Vince's perf fuzzer immediately triggered a bunch
> of cases (dump below).
>
> I had thought holding the context mutex protected some of these cases, even
> with IRQs unmasked?
Ah yes.. duh. How's this then?
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ee8b9ecdc03b..4d9cf508c510 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -631,7 +631,21 @@ struct pmu_event_list {
struct list_head list;
};
+#ifdef CONFIG_LOCKDEP
+#define LOCKDEP_ASSERT_EVENT_CTX(event) \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (this_cpu_read(hardirqs_enabled) || \
+ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
+#else
+#define LOCKDEP_ASSERT_EVENT_CTX(event)
+#endif
+
+/*
+ * Iterating the sibling list requires this list to be stable; by ensuring IRQs
+ * are disabled IPIs from perf_{install_in,remove_from}_context() are held off.
+ */
#define for_each_sibling_event(sibling, event) \
+ LOCKDEP_ASSERT_EVENT_CTX(event); \
if ((event)->group_leader == (event)) \
list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)