[PATCH v12 22/26] x86/resctrl: Auto assign/unassign counters when mbm_cntr_assign is enabled
From: Babu Moger
Date: Thu Apr 03 2025 - 20:37:49 EST
Automatically assign or unassign counters when a resctrl group is created
or deleted. By default, each group requires two counters: one for the MBM
total event and one for the MBM local event.
The mbm_cntr_assign mode offers "num_mbm_cntrs" number of counters that
can be assigned to an RMID, event pair and monitor the bandwidth as long
as it is assigned. If these counters are exhausted, the kernel will log
the error message "Unable to allocate counter in domain" in
/sys/fs/resctrl/info/last_cmd_status when a new group is created.
However, the creation of a group should not fail due to assignment
failures. Users have the flexibility to modify the assignments at a later
time.
Signed-off-by: Babu Moger <babu.moger@xxxxxxx>
---
v12: Removed mbm_cntr_reset() as it is not required while removing the group.
Update the commit text.
Added r->mon_capable check in rdtgroup_assign_cntrs() and rdtgroup_unassign_cntrs.
v11: Moved mbm_cntr_reset() to monitor.c.
Added code reset non-architectural state in mbm_cntr_reset().
Added missing rdtgroup_unassign_cntrs() calls on failure path.
v10: Assigned the counter before exposing the event files.
Moved the call rdtgroup_assign_cntrs() inside mkdir_rdt_prepare_rmid_alloc().
This is called both CNTR_MON and MON group creation.
Call mbm_cntr_reset() when unmounted to clear all the assignments.
Taken care of few other feedback comments.
v9: Changed rdtgroup_assign_cntrs() and rdtgroup_unassign_cntrs() to return void.
Updated couple of rdtgroup_unassign_cntrs() calls properly.
Updated function comments.
v8: Renamed rdtgroup_assign_grp to rdtgroup_assign_cntrs.
Renamed rdtgroup_unassign_grp to rdtgroup_unassign_cntrs.
Fixed the problem with unassigning the child MON groups of CTRL_MON group.
v7: Reworded the commit message.
Removed the reference of ABMC with mbm_cntr_assign.
Renamed the function rdtgroup_assign_cntrs to rdtgroup_assign_grp.
v6: Removed the redundant comments on all the calls of
rdtgroup_assign_cntrs. Updated the commit message.
Dropped printing error message on every call of rdtgroup_assign_cntrs.
v5: Removed the code to enable/disable ABMC during the mount.
That will be another patch.
Added arch callers to get the arch specific data.
Renamed fuctions to match the other abmc function.
Added code comments for assignment failures.
v4: Few name changes based on the upstream discussion.
Commit message update.
v3: This is a new patch. Patch addresses the upstream comment to enable
ABMC feature by default if the feature is available.
---
arch/x86/kernel/cpu/resctrl/monitor.c | 1 +
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 95 +++++++++++++++++++++++++-
2 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index ee31dfe2c224..4e22563dda60 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -1316,6 +1316,7 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
r->mon.mbm_cntr_assignable = true;
cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx);
r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;
+ r->mon.mbm_assign_on_mkdir = true;
}
r->mon_capable = true;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 592a9dc5b404..3e440ace60e0 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -72,6 +72,18 @@ struct mbm_assign_config mbm_assign_configs[NUM_MBM_ASSIGN_CONFIGS] = {
{"mbm_local_bytes", QOS_L3_MBM_LOCAL_EVENT_ID, 0x15},
};
+static struct mbm_assign_config *mbm_get_assign_config(enum resctrl_event_id evtid)
+{
+ int i;
+
+ for (i = 0; i < NUM_MBM_ASSIGN_CONFIGS; i++) {
+ if (mbm_assign_configs[i].evtid == evtid)
+ return &mbm_assign_configs[i];
+ }
+
+ return NULL;
+}
+
/*
* Used to store the max resource name width to display the schemata names in
* a tabular format.
@@ -3043,6 +3055,67 @@ static void schemata_list_destroy(void)
}
}
+/*
+ * Called when a new group is created. If "mbm_cntr_assign" mode is enabled,
+ * counters are automatically assigned. Each group can accommodate two counters:
+ * one for the total event and one for the local event. Assignments may fail
+ * due to the limited number of counters. However, it is not necessary to fail
+ * the group creation and thus no failure is returned. Users have the option
+ * to modify the counter assignments after the group has been created.
+ */
+static void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct mbm_assign_config *assign_config;
+
+ if (!r->mon_capable)
+ return;
+
+ if (resctrl_arch_mbm_cntr_assign_enabled(r) && !r->mon.mbm_assign_on_mkdir)
+ return;
+
+ if (resctrl_arch_is_mbm_total_enabled()) {
+ assign_config = mbm_get_assign_config(QOS_L3_MBM_TOTAL_EVENT_ID);
+ if (assign_config)
+ resctrl_assign_cntr_event(r, NULL, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID,
+ assign_config->val);
+ }
+
+ if (resctrl_arch_is_mbm_local_enabled()) {
+ assign_config = mbm_get_assign_config(QOS_L3_MBM_LOCAL_EVENT_ID);
+ if (assign_config)
+ resctrl_assign_cntr_event(r, NULL, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID,
+ assign_config->val);
+ }
+}
+
+/*
+ * Called when a group is deleted. Counters are unassigned if it was in
+ * assigned state.
+ */
+static void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct mbm_assign_config *assign_config;
+
+ if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r))
+ return;
+
+ if (resctrl_arch_is_mbm_total_enabled()) {
+ assign_config = mbm_get_assign_config(QOS_L3_MBM_TOTAL_EVENT_ID);
+ if (assign_config)
+ resctrl_unassign_cntr_event(r, NULL, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID,
+ assign_config->val);
+ }
+
+ if (resctrl_arch_is_mbm_local_enabled()) {
+ assign_config = mbm_get_assign_config(QOS_L3_MBM_LOCAL_EVENT_ID);
+ if (assign_config)
+ resctrl_unassign_cntr_event(r, NULL, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID,
+ assign_config->val);
+ }
+}
+
static int rdt_get_tree(struct fs_context *fc)
{
struct rdt_fs_context *ctx = rdt_fc2context(fc);
@@ -3097,6 +3170,8 @@ static int rdt_get_tree(struct fs_context *fc)
if (ret < 0)
goto out_info;
+ rdtgroup_assign_cntrs(&rdtgroup_default);
+
ret = mkdir_mondata_all(rdtgroup_default.kn,
&rdtgroup_default, &kn_mondata);
if (ret < 0)
@@ -3135,8 +3210,10 @@ static int rdt_get_tree(struct fs_context *fc)
if (resctrl_arch_mon_capable())
kernfs_remove(kn_mondata);
out_mongrp:
- if (resctrl_arch_mon_capable())
+ if (resctrl_arch_mon_capable()) {
+ rdtgroup_unassign_cntrs(&rdtgroup_default);
kernfs_remove(kn_mongrp);
+ }
out_info:
kernfs_remove(kn_info);
out_schemata_free:
@@ -3312,6 +3389,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
head = &rdtgrp->mon.crdtgrp_list;
list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
+ rdtgroup_unassign_cntrs(sentry);
free_rmid(sentry->closid, sentry->mon.rmid);
list_del(&sentry->mon.crdtgrp_list);
@@ -3352,6 +3430,8 @@ static void rmdir_all_sub(void)
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+ rdtgroup_unassign_cntrs(rdtgrp);
+
free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
kernfs_remove(rdtgrp->kn);
@@ -3384,6 +3464,7 @@ static void rdt_kill_sb(struct super_block *sb)
resctrl_arch_reset_all_ctrls(r);
rmdir_all_sub();
+ rdtgroup_unassign_cntrs(&rdtgroup_default);
rdt_pseudo_lock_release();
rdtgroup_default.mode = RDT_MODE_SHAREABLE;
schemata_list_destroy();
@@ -3847,9 +3928,12 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
}
rdtgrp->mon.rmid = ret;
+ rdtgroup_assign_cntrs(rdtgrp);
+
ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
if (ret) {
rdt_last_cmd_puts("kernfs subdir error\n");
+ rdtgroup_unassign_cntrs(rdtgrp);
free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
return ret;
}
@@ -3859,8 +3943,10 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
{
- if (resctrl_arch_mon_capable())
+ if (resctrl_arch_mon_capable()) {
+ rdtgroup_unassign_cntrs(rgrp);
free_rmid(rgrp->closid, rgrp->mon.rmid);
+ }
}
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
@@ -4128,6 +4214,9 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
update_closid_rmid(tmpmask, NULL);
rdtgrp->flags = RDT_DELETED;
+
+ rdtgroup_unassign_cntrs(rdtgrp);
+
free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
/*
@@ -4175,6 +4264,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
update_closid_rmid(tmpmask, NULL);
+ rdtgroup_unassign_cntrs(rdtgrp);
+
free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
closid_free(rdtgrp->closid);
--
2.34.1