[PATCH v7 17/24] x86/resctrl: Add the interface to unassign a MBM counter
From: Babu Moger
Date: Wed Sep 04 2024 - 18:26:52 EST
The mbm_cntr_assign mode provides a limited number of hardware counters
that can be assigned to an RMID-event pair to monitor bandwidth while
assigned. If all counters are in use, the kernel will show an error
message: "Out of MBM assignable counters" when a new assignment is
requested. To make space for a new assignment, users must unassign an
already assigned counter.
Introduce an interface that allows for the unassignment of counter IDs
from both the group and the domain. Additionally, ensure that the global
counter is released if it is no longer assigned to any domains.
Signed-off-by: Babu Moger <babu.moger@xxxxxxx>
---
v7: Merged rdtgroup_unassign_cntr and rdtgroup_free_cntr functions.
Renamed rdtgroup_mbm_cntr_test() to rdtgroup_mbm_cntr_is_assigned().
Reworded the commit log little bit.
v6: Removed mbm_cntr_free from this patch.
Added counter test in all the domains and free if it is not assigned to
any domains.
v5: Few name changes to match cntr_id.
Changed the function names to rdtgroup_unassign_cntr
More comments on commit log.
v4: Added domain specific unassign feature.
Few name changes.
v3: Removed the static from the prototype of rdtgroup_unassign_abmc.
The function is not called directly from user anymore. These
changes are related to global assignment interface.
v2: No changes.
---
arch/x86/kernel/cpu/resctrl/internal.h | 2 ++
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 49 ++++++++++++++++++++++++++
2 files changed, 51 insertions(+)
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 6a90fc20be5b..9a65a13ccbe9 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -707,6 +707,8 @@ int resctrl_arch_assign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 cntr_id, bool assign);
int rdtgroup_assign_cntr(struct rdt_resource *r, struct rdtgroup *rdtgrp,
struct rdt_mon_domain *d, enum resctrl_event_id evtid);
+int rdtgroup_unassign_cntr(struct rdt_resource *r, struct rdtgroup *rdtgrp,
+ struct rdt_mon_domain *d, enum resctrl_event_id evtid);
void rdt_staged_configs_clear(void);
bool closid_allocated(unsigned int closid);
int resctrl_find_cleanest_closid(void);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 1d45120ff2b5..21b9ca4ce493 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1944,6 +1944,55 @@ int rdtgroup_assign_cntr(struct rdt_resource *r, struct rdtgroup *rdtgrp,
return 0;
}
+static int rdtgroup_mbm_cntr_is_assigned(struct rdt_resource *r, u32 cntr_id)
+{
+ struct rdt_mon_domain *d;
+
+ list_for_each_entry(d, &r->mon_domains, hdr.list)
+ if (test_bit(cntr_id, d->mbm_cntr_map))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Unassign a hardware counter from the domain and the group. Global
+ * counter will be freed once it is unassigned from all the domains.
+ */
+int rdtgroup_unassign_cntr(struct rdt_resource *r, struct rdtgroup *rdtgrp,
+ struct rdt_mon_domain *d,
+ enum resctrl_event_id evtid)
+{
+ int index = MBM_EVENT_ARRAY_INDEX(evtid);
+ int cntr_id = rdtgrp->mon.cntr_id[index];
+
+ if (cntr_id != MON_CNTR_UNSET) {
+ if (!d) {
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ resctrl_arch_assign_cntr(r, d, evtid,
+ rdtgrp->mon.rmid,
+ rdtgrp->closid,
+ cntr_id, false);
+ clear_bit(cntr_id, d->mbm_cntr_map);
+ }
+ } else {
+ resctrl_arch_assign_cntr(r, d, evtid,
+ rdtgrp->mon.rmid,
+ rdtgrp->closid,
+ cntr_id, false);
+ clear_bit(cntr_id, d->mbm_cntr_map);
+ }
+
+ /* Update the counter bitmap */
+ if (!rdtgroup_mbm_cntr_is_assigned(r, cntr_id)) {
+ mbm_cntr_free(r, cntr_id);
+ rdtgrp->mon.cntr_id[index] = MON_CNTR_UNSET;
+ }
+ }
+
+ return 0;
+}
+
/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] = {
{
--
2.34.1