Legacy resctrl monitor files must provide the sum of event values across
all Sub-NUMA Cluster (SNC) domains that share an L3 cache instance.
Rename the existing resctrl_arch_rmid_read() function as
resctrl_arch_rmid_read_one() (with some small refactoring to drop
arguments that are not needed.
Create a new resctrl_arch_rmid_read() that iterates across
domains when necessary. Pass a CPU number from the right domain to
resctrl_arch_rmid_read_one().
Signed-off-by: Tony Luck <tony.luck@xxxxxxxxx>
---
arch/x86/kernel/cpu/resctrl/monitor.c | 41 ++++++++++++++++++++-------
1 file changed, 31 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 5f89ed4823ee..c9dd6ec68fcd 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -200,10 +200,9 @@ static inline struct rmid_entry *__rmid_entry(u32 idx)
* Caller is responsible to make sure execution running on a CPU in
* the domain to be read.
*/
-static int logical_rmid_to_physical_rmid(int lrmid)
+static int logical_rmid_to_physical_rmid(int cpu, int lrmid)
{
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
- int cpu = smp_processor_id();
if (snc_nodes_per_l3_cache == 1)
return lrmid;
@@ -211,13 +210,13 @@ static int logical_rmid_to_physical_rmid(int lrmid)
return lrmid + (cpu_to_node(cpu) % snc_nodes_per_l3_cache) * r->num_rmid;
}
-static int __rmid_read(u32 lrmid,
+static int __rmid_read(int cpu, u32 lrmid,
enum resctrl_event_id eventid, u64 *val)
{
u64 msr_val;
int prmid;
- prmid = logical_rmid_to_physical_rmid(lrmid);
+ prmid = logical_rmid_to_physical_rmid(cpu, lrmid);
/*
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
* with a valid event code for supported resource type and the bits
@@ -269,7 +268,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
memset(am, 0, sizeof(*am));
/* Record any initial, non-zero count value. */
- __rmid_read(rmid, eventid, &am->prev_msr);
+ __rmid_read(smp_processor_id(), rmid, eventid, &am->prev_msr);
}
}
@@ -298,9 +297,8 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
return chunks >> shift;
}
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 unused, u32 rmid, enum resctrl_event_id eventid,
- u64 *val, bool sum, struct cacheinfo *ci, void *ignored)
+static int resctrl_arch_rmid_read_one(struct rdt_resource *r, struct rdt_mon_domain *d,
+ int cpu, u32 rmid, enum resctrl_event_id eventid, u64 *val)
{
struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
@@ -313,7 +311,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask))
return -EINVAL;
- ret = __rmid_read(rmid, eventid, &msr_val);
+ ret = __rmid_read(cpu, rmid, eventid, &msr_val);
if (ret)
return ret;
@@ -327,7 +325,30 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
chunks = msr_val;
}
- *val = chunks * hw_res->mon_scale;
+ *val += chunks * hw_res->mon_scale;
+
+ return 0;
+}
+
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 unused, u32 rmid, enum resctrl_event_id eventid,
+ u64 *val, bool sum, struct cacheinfo *ci, void *ignored)
+{
+ int cpu = smp_processor_id();
+ int ret;
+
+ *val = 0;
+ if (!sum)
+ return resctrl_arch_rmid_read_one(r, d, cpu, rmid, eventid, val);
+
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ if (d->ci->id != ci->id)
+ continue;
+ cpu = cpumask_any(&d->hdr.cpu_mask);
+ ret = resctrl_arch_rmid_read_one(r, d, cpu, rmid, eventid, val);
+ if (ret)
+ return ret;
+ }
return 0;
}