From: Perry Yuan <Perry.Yuan@xxxxxxx>
With this new sysfs entry `cpb_boost`created, user can change CPU boost
state dynamically under `active` and `passive` modes.
And the highest perf and frequency will also be updated as the boost
state changing.
0: check current boost state
cat /sys/devices/system/cpu/amd_pstate/cpb_boost
1: disable CPU boost
sudo bash -c "echo 0 > /sys/devices/system/cpu/amd_pstate/cpb_boost"
2: enable CPU boost
sudo bash -c "echo 1 > /sys/devices/system/cpu/amd_pstate/cpb_boost"
Link: https://bugzilla.kernel.org/show_bug.cgi?id=217931
Link: https://bugzilla.kernel.org/show_bug.cgi?id=217618
Signed-off-by: Perry Yuan <Perry.Yuan@xxxxxxx>
---
drivers/cpufreq/amd-pstate.c | 97 ++++++++++++++++++++++++++++++++++++
1 file changed, 97 insertions(+)
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 0dc9124140d4..b37bea7440b9 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1036,6 +1036,101 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
return ret < 0 ? ret : count;
}
+static int amd_cpu_boost_update(struct amd_cpudata *cpudata, u32 on)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
+ struct cppc_perf_ctrls perf_ctrls;
+ u32 highest_perf, nominal_perf;
+ int ret;
+
+ if (!policy)
+ return -ENODATA;
+
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(7, 0);
+ value |= on ? highest_perf : nominal_perf;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+
+ } else {
+ perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (on)
+ policy->cpuinfo.max_freq = cpudata->max_freq;
+ else
+ policy->cpuinfo.max_freq = cpudata->nominal_freq;
+
+ policy->max = policy->cpuinfo.max_freq;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE) {
+ ret = freq_qos_update_request(&cpudata->req[1],
+ policy->cpuinfo.max_freq);
+ }
+
+ cpufreq_cpu_release(policy);
+
+ return ret;
+}
+
+static ssize_t cpb_boost_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", global.cpb_boost);
+}
+
+static ssize_t cpb_boost_store(struct device *dev, struct device_attribute *b,
+ const char *buf, size_t count)
+{
+ bool new_state;
+ ssize_t ret;
+ int cpu;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ if (!global.cpb_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtobool(buf, &new_state);
+ if (ret)
+ return -EINVAL;
+
+ global.cpb_boost = !!new_state;
+
+ for_each_possible_cpu(cpu) {
+
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (!cpudata) {
+ pr_err("cpudata is NULL\n");
+ ret = -ENODATA;
+ cpufreq_cpu_put(policy);
+ goto err_exit;
+ }
+
+ amd_cpu_boost_update(cpudata, global.cpb_boost);
+ refresh_frequency_limits(policy);
+ cpufreq_cpu_put(policy);
+ }
+
+err_exit:
+ mutex_unlock(&amd_pstate_driver_lock);
+ return ret < 0 ? ret : count;
+}
+
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
@@ -1043,6 +1138,7 @@ cpufreq_freq_attr_ro(amd_pstate_highest_perf);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RW(cpb_boost);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
@@ -1062,6 +1158,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
static struct attribute *pstate_global_attributes[] = {
&dev_attr_status.attr,
+ &dev_attr_cpb_boost.attr,
NULL
};