[PATCH v4 09/10] perf: optimize event_filter_match during sched_in
From: Ian Rogers
Date: Fri Nov 15 2019 - 20:19:20 EST
The caller verified the CPU and cgroup so directly call
pmu_filter_match.
Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
---
kernel/events/core.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 37abfca18bd3..6427b16c95d0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2212,8 +2212,11 @@ static inline int pmu_filter_match(struct perf_event *event)
static inline int
event_filter_match(struct perf_event *event)
{
- return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
- perf_cgroup_match(event) && pmu_filter_match(event);
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+ if (!perf_cgroup_match(event))
+ return 0;
+ return pmu_filter_match(event);
}
static void
@@ -3562,7 +3565,11 @@ static int merge_sched_in(struct perf_event_context *ctx,
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
- if (!event_filter_match(event))
+ /*
+ * Avoid full event_filter_match as the caller verified the CPU and
+ * cgroup before calling.
+ */
+ if (!pmu_filter_match(event))
return 0;
if (group_can_go_on(event, cpuctx, 1)) {
--
2.24.0.432.g9d3f5f5b63-goog