[PATCH 4/5] perf/x86: Change x86_pmu::limit_period signature

From: Peter Zijlstra
Date: Wed May 11 2022 - 10:25:25 EST


In preparation for making it a static_call, change the signature.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/x86/events/amd/core.c | 8 +++-----
arch/x86/events/core.c | 18 ++++++++----------
arch/x86/events/intel/core.c | 19 ++++++++-----------
arch/x86/events/perf_event.h | 2 +-
4 files changed, 20 insertions(+), 27 deletions(-)

--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1258,16 +1258,14 @@ static void amd_pmu_sched_task(struct pe
amd_pmu_brs_sched_task(ctx, sched_in);
}

-static u64 amd_pmu_limit_period(struct perf_event *event, u64 left)
+static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
{
/*
* Decrease period by the depth of the BRS feature to get the last N
* taken branches and approximate the desired period
*/
- if (has_branch_stack(event) && left > x86_pmu.lbr_nr)
- left -= x86_pmu.lbr_nr;
-
- return left;
+ if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
+ *left -= x86_pmu.lbr_nr;
}

static __initconst const struct x86_pmu amd_pmu = {
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -621,8 +621,9 @@ int x86_pmu_hw_config(struct perf_event
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;

if (event->attr.sample_period && x86_pmu.limit_period) {
- if (x86_pmu.limit_period(event, event->attr.sample_period) >
- event->attr.sample_period)
+ s64 left = event->attr.sample_period;
+ x86_pmu.limit_period(event, &left);
+ if (left > event->attr.sample_period)
return -EINVAL;
}

@@ -1386,19 +1387,14 @@ int x86_perf_event_set_period(struct per
hwc->last_period = period;
ret = 1;
}
- /*
- * Quirk: certain CPUs dont like it if just 1 hw_event is left:
- */
- if (unlikely(left < 2))
- left = 2;

if (left > x86_pmu.max_period)
left = x86_pmu.max_period;

if (x86_pmu.limit_period)
- left = x86_pmu.limit_period(event, left);
+ x86_pmu.limit_period(event, &left);

- per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
+ this_cpu_write(pmc_prev_left[idx], left);

/*
* The hw event starts counting from this event offset,
@@ -2672,7 +2668,9 @@ static int x86_pmu_check_period(struct p
return -EINVAL;

if (value && x86_pmu.limit_period) {
- if (x86_pmu.limit_period(event, value) > value)
+ s64 left = value;
+ x86_pmu.limit_period(event, &left);
+ if (left > value)
return -EINVAL;
}

--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4244,28 +4244,25 @@ static u8 adl_get_hybrid_cpu_type(void)
* Therefore the effective (average) period matches the requested period,
* despite coarser hardware granularity.
*/
-static u64 bdw_limit_period(struct perf_event *event, u64 left)
+static void bdw_limit_period(struct perf_event *event, s64 *left)
{
if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
X86_CONFIG(.event=0xc0, .umask=0x01)) {
- if (left < 128)
- left = 128;
- left &= ~0x3fULL;
+ if (*left < 128)
+ *left = 128;
+ *left &= ~0x3fULL;
}
- return left;
}

-static u64 nhm_limit_period(struct perf_event *event, u64 left)
+static void nhm_limit_period(struct perf_event *event, s64 *left)
{
- return max(left, 32ULL);
+ *left = max(*left, 32LL);
}

-static u64 spr_limit_period(struct perf_event *event, u64 left)
+static void spr_limit_period(struct perf_event *event, s64 *left)
{
if (event->attr.precise_ip == 3)
- return max(left, 128ULL);
-
- return left;
+ *left = max(*left, 128LL);
}

PMU_FORMAT_ATTR(event, "config:0-7" );
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -773,7 +773,7 @@ struct x86_pmu {
struct event_constraint *event_constraints;
struct x86_pmu_quirk *quirks;
int perfctr_second_write;
- u64 (*limit_period)(struct perf_event *event, u64 l);
+ void (*limit_period)(struct perf_event *event, s64 *l);

/* PMI handler bits */
unsigned int late_ack :1,