[PATCH 06/15] cpufreq: Use trace_invoke_##name() at guarded tracepoint call sites
From: Vineeth Pillai (Google)
Date: Thu Mar 12 2026 - 11:12:19 EST
Replace trace_foo() with the new trace_invoke_foo() at sites already
guarded by trace_foo_enabled(), avoiding a redundant
static_branch_unlikely() re-evaluation inside the tracepoint.
trace_invoke_foo() calls the tracepoint callbacks directly without
utilizing the static branch again.
Suggested-by: Steven Rostedt <rostedt@xxxxxxxxxxx>
Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Signed-off-by: Vineeth Pillai (Google) <vineeth@xxxxxxxxxxxxxxx>
Assisted-by: Claude:claude-sonnet-4-6
---
drivers/cpufreq/amd-pstate.c | 10 +++++-----
drivers/cpufreq/cpufreq.c | 2 +-
drivers/cpufreq/intel_pstate.c | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 5aa9fcd80cf51..3fa40a32ef6b5 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -247,7 +247,7 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
if (trace_amd_pstate_epp_perf_enabled()) {
union perf_cached perf = READ_ONCE(cpudata->perf);
- trace_amd_pstate_epp_perf(cpudata->cpu,
+ trace_invoke_amd_pstate_epp_perf(cpudata->cpu,
perf.highest_perf,
epp,
min_perf,
@@ -298,7 +298,7 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
if (trace_amd_pstate_epp_perf_enabled()) {
union perf_cached perf = cpudata->perf;
- trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ trace_invoke_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
epp,
FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
cpudata->cppc_req_cached),
@@ -343,7 +343,7 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
if (trace_amd_pstate_epp_perf_enabled()) {
union perf_cached perf = cpudata->perf;
- trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ trace_invoke_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
epp,
FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
cpudata->cppc_req_cached),
@@ -507,7 +507,7 @@ static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
if (trace_amd_pstate_epp_perf_enabled()) {
union perf_cached perf = READ_ONCE(cpudata->perf);
- trace_amd_pstate_epp_perf(cpudata->cpu,
+ trace_invoke_amd_pstate_epp_perf(cpudata->cpu,
perf.highest_perf,
epp,
min_perf,
@@ -588,7 +588,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
}
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
- trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
+ trace_invoke_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
cpudata->cpu, fast_switch);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 277884d91913c..cf57aeb503790 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2222,7 +2222,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
if (trace_cpu_frequency_enabled()) {
for_each_cpu(cpu, policy->cpus)
- trace_cpu_frequency(freq, cpu);
+ trace_invoke_cpu_frequency(freq, cpu);
}
return freq;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 11c58af419006..a0da9b31c4ffe 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -3132,7 +3132,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
return;
sample = &cpu->sample;
- trace_pstate_sample(trace_type,
+ trace_invoke_pstate_sample(trace_type,
0,
old_pstate,
cpu->pstate.current_pstate,
--
2.53.0