[PATCH 01/15] cpufreq/amd-pstate: Add trace event for EPP perf updates

From: Mario Limonciello
Date: Thu Dec 05 2024 - 17:29:18 EST


In "active" mode the most important thing for debugging whether
an issue is hardware or software based is to look at what was the
last thing written to the CPPC request MSR or shared memory region.

The 'amd_pstate_epp_perf' trace event shows the values being written
for all CPUs.

Signed-off-by: Mario Limonciello <mario.limonciello@xxxxxxx>
---
drivers/cpufreq/amd-pstate-trace.h | 45 ++++++++++++++++++++++++++++++
drivers/cpufreq/amd-pstate.c | 28 +++++++++++++++++++
2 files changed, 73 insertions(+)

diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb13..e2221a4b6901c 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -88,6 +88,51 @@ TRACE_EVENT(amd_pstate_perf,
)
);

+TRACE_EVENT(amd_pstate_epp_perf,
+
+ TP_PROTO(unsigned int cpu_id,
+ unsigned int highest_perf,
+ unsigned int epp,
+ unsigned int min_perf,
+ unsigned int max_perf,
+ bool boost
+ ),
+
+ TP_ARGS(cpu_id,
+ highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ boost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu_id)
+ __field(unsigned int, highest_perf)
+ __field(unsigned int, epp)
+ __field(unsigned int, min_perf)
+ __field(unsigned int, max_perf)
+ __field(bool, boost)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->highest_perf = highest_perf;
+ __entry->epp = epp;
+ __entry->min_perf = min_perf;
+ __entry->max_perf = max_perf;
+ __entry->boost = boost;
+ ),
+
+ TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ (unsigned int)__entry->cpu_id,
+ (unsigned int)__entry->min_perf,
+ (unsigned int)__entry->max_perf,
+ (unsigned int)__entry->highest_perf,
+ (unsigned int)__entry->epp,
+ (bool)__entry->boost
+ )
+);
+
#endif /* _AMD_PSTATE_TRACE_H */

/* This part must be outside protection */
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 66fb7aee95d24..4d1da49d345ec 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -324,6 +324,14 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
return -EBUSY;
}

+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ epp,
+ AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+ AMD_CPPC_MAX_PERF(cpudata->cppc_req_cached),
+ cpudata->boost_state);
+ }
+
ret = amd_pstate_set_epp(cpudata, epp);

return ret;
@@ -1596,6 +1604,13 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)

WRITE_ONCE(cpudata->cppc_req_cached, value);

+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+ cpudata->min_limit_perf,
+ cpudata->max_limit_perf,
+ cpudata->boost_state);
+ }
+
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
cpudata->max_limit_perf, false);

@@ -1639,6 +1654,13 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)

max_perf = READ_ONCE(cpudata->highest_perf);

+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ cpudata->epp_cached,
+ AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+ max_perf, cpudata->boost_state);
+ }
+
amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
amd_pstate_set_epp(cpudata, cpudata->epp_cached);
}
@@ -1667,6 +1689,12 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)

mutex_lock(&amd_pstate_limits_lock);

+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ min_perf, min_perf, cpudata->boost_state);
+ }
+
amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);

--
2.43.0