Re: [PATCH 3/5] cpufreq/amd-pstate: Refactor amd_pstate_epp_reenable() and amd_pstate_epp_offline()
From: Gautham R. Shenoy
Date: Fri Dec 06 2024 - 00:01:23 EST
On Wed, Dec 04, 2024 at 02:48:40PM +0000, Dhananjay Ugwekar wrote:
> Replace similar code chunks with amd_pstate_update_perf() and
> amd_pstate_set_epp() function calls.
>
> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@xxxxxxx>
> ---
> drivers/cpufreq/amd-pstate.c | 36 +++++++-----------------------------
> 1 file changed, 7 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index a1b2393cef22..a38be7727c9d 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -1630,25 +1630,17 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
>
> static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
> {
> - struct cppc_perf_ctrls perf_ctrls;
> - u64 value, max_perf;
> + u64 max_perf;
> int ret;
>
> ret = amd_pstate_cppc_enable(true);
> if (ret)
> pr_err("failed to enable amd pstate during resume, return %d\n", ret);
>
> - value = READ_ONCE(cpudata->cppc_req_cached);
> max_perf = READ_ONCE(cpudata->highest_perf);
>
> - if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
> - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
> - } else {
> - perf_ctrls.max_perf = max_perf;
> - cppc_set_perf(cpudata->cpu, &perf_ctrls);
> - perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
> - cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
> - }
> + amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
> + amd_pstate_set_epp(cpudata, cpudata->epp_cached);
This will cause two MSR writes on MSR based systems.
But then, we don't expect the amd_pstate_epp_reenable() and
amd_pstate_epp_offline() to be called often. Hence it should be ok.
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@xxxxxxx>
--
Thanks and Regards
gautham.
> }
>
> static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
> @@ -1668,7 +1660,6 @@ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
> static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
> {
> struct amd_cpudata *cpudata = policy->driver_data;
> - struct cppc_perf_ctrls perf_ctrls;
> int min_perf;
> u64 value;
>
> @@ -1676,23 +1667,10 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
> value = READ_ONCE(cpudata->cppc_req_cached);
>
> mutex_lock(&amd_pstate_limits_lock);
> - if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
> - cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
> -
> - /* Set max perf same as min perf */
> - value &= ~AMD_CPPC_MAX_PERF(~0L);
> - value |= AMD_CPPC_MAX_PERF(min_perf);
> - value &= ~AMD_CPPC_MIN_PERF(~0L);
> - value |= AMD_CPPC_MIN_PERF(min_perf);
> - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
> - } else {
> - perf_ctrls.desired_perf = 0;
> - perf_ctrls.min_perf = min_perf;
> - perf_ctrls.max_perf = min_perf;
> - cppc_set_perf(cpudata->cpu, &perf_ctrls);
> - perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
> - cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
> - }
> +
> + amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
> + amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
> +
> mutex_unlock(&amd_pstate_limits_lock);
> }
>
> --
> 2.34.1
>