[PATCH 8/8] x86/intel_rdt: rmdir,umount and hotcpu updates for MBA
From: Vikas Shivappa
Date: Tue Jan 10 2017 - 14:36:15 EST
During rmdir only reset the ctrl values for the rdtgroup's closid. This
is done so that that next time when the closid is reused they dont
reflect old values.
Remove the closid update during cpuonline in cqm as its
already in the CAT code. Since both cqm and CAT want the rmid and closid
to be zero when cpu is online, remove the PQR MSR write to zero during
cpuonline because the MSRs are at zero after cpu reset and also during
the first sched in they are updated.
Signed-off-by: Vikas Shivappa <vikas.shivappa@xxxxxxxxxxxxxxx>
---
arch/x86/events/intel/cqm.c | 1 -
arch/x86/kernel/cpu/intel_rdt.c | 1 -
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 18 +++++++++++++-----
3 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 8c00dc0..baf7ade 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1573,7 +1573,6 @@ static int intel_cqm_cpu_starting(unsigned int cpu)
struct cpuinfo_x86 *c = &cpu_data(cpu);
state->rmid = 0;
- state->closid = 0;
state->rmid_usecnt = 0;
WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 9b0a00e..0c25227 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -403,7 +403,6 @@ static void clear_closid(int cpu)
per_cpu(cpu_closid, cpu) = 0;
state->closid = 0;
- wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
}
static int intel_rdt_online_cpu(unsigned int cpu)
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 9d9b7f4..97fc129 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -856,7 +856,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
return dentry;
}
-static int reset_all_cbms(struct rdt_resource *r)
+static int reset_all_ctrls(struct rdt_resource *r, u32 sclosid, u32 eclosid)
{
struct msr_param msr_param;
cpumask_var_t cpu_mask;
@@ -867,8 +867,8 @@ static int reset_all_cbms(struct rdt_resource *r)
return -ENOMEM;
msr_param.res = r;
- msr_param.low = 0;
- msr_param.high = r->num_closid;
+ msr_param.low = sclosid;
+ msr_param.high = eclosid;
/*
* Disable resource control for this resource by setting all
@@ -878,7 +878,7 @@ static int reset_all_cbms(struct rdt_resource *r)
list_for_each_entry(d, &r->domains, list) {
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
- for (i = 0; i < r->num_closid; i++)
+ for (i = sclosid; i < eclosid; i++)
d->ctrl_val[i] = r->no_ctrl;
}
cpu = get_cpu();
@@ -972,7 +972,7 @@ static void rdt_kill_sb(struct super_block *sb)
/*Put everything back to default values. */
for_each_enabled_rdt_resource(r)
- reset_all_cbms(r);
+ reset_all_ctrls(r, 0, r->num_closid);
cdp_disable();
rmdir_all_sub();
static_branch_disable(&rdt_enable_key);
@@ -1067,6 +1067,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
{
int ret, cpu, closid = rdtgroup_default.closid;
struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
cpumask_var_t tmpmask;
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
@@ -1095,6 +1096,13 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
rdt_update_closid(tmpmask, NULL);
+ /*
+ * Put domain control values back to default for the
+ * rdtgrp thats being removed.
+ */
+ for_each_enabled_rdt_resource(r)
+ reset_all_ctrls(r, rdtgrp->closid, rdtgrp->closid + 1);
+
rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
list_del(&rdtgrp->rdtgroup_list);
--
1.9.1