[PATCH 15/21] x86/intel_rdt/cqm: Add rmdir support
From: Vikas Shivappa
Date: Mon Jun 26 2017 - 14:55:26 EST
Resource groups (ctrl_mon and monitor groups) are represented by
directories in resctrl fs. Add support to remove the directories.
When a ctrl_mon directory is removed all the cpus and tasks are assigned
back to the root rdtgroup. When a monitor group is removed the cpus and
tasks are returned to the parent control group.
Signed-off-by: Vikas Shivappa <vikas.shivappa@xxxxxxxxxxxxxxx>
---
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 110 +++++++++++++++++++++++++++----
1 file changed, 99 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 9377bcd..6131508 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -1164,6 +1164,18 @@ static int reset_all_ctrls(struct rdt_resource *r)
return 0;
}
+static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_alloc_enabled &&
+ (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
+}
+
+static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_mon_features &&
+ (r->type == RDTMON_GROUP) && (t->rmid == r->rmid));
+}
+
/*
* Move tasks from one to the other group. If @from is NULL, then all tasks
* in the systems are moved unconditionally (used for teardown).
@@ -1179,8 +1191,11 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
- if (!from || t->closid == from->closid) {
+ if (!from || is_closid_match(t, from) ||
+ is_rmid_match(t, from)) {
t->closid = to->closid;
+ t->rmid = to->rmid;
+
#ifdef CONFIG_SMP
/*
* This is safe on x86 w/o barriers as the ordering
@@ -1402,6 +1417,7 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *pr,
kernfs_remove(kn);
return ret;
}
+
/*
* Common code for ctrl_mon and monitor group mkdir.
* The caller needs to unlock the global mutex upon success.
@@ -1588,20 +1604,57 @@ static int rdtgroup_mkdir(struct kernfs_node *pkn, const char *name,
return -EPERM;
}
-static int rdtgroup_rmdir(struct kernfs_node *kn)
+static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp)
{
- int ret, cpu, closid = rdtgroup_default.closid;
- struct rdtgroup *rdtgrp;
+ struct rdtgroup *prdtgrp = rdtgrp->parent;
cpumask_var_t tmpmask;
+ int cpu;
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
- rdtgrp = rdtgroup_kn_lock_live(kn);
- if (!rdtgrp) {
- ret = -EPERM;
- goto out;
- }
+ /* Give any tasks back to the parent group */
+ rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
+
+ /* Update per cpu rmid of the moved CPUs first */
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(cpu_rmid, cpu) = prdtgrp->rmid;
+ /*
+ * Update the MSR on moved CPUs and CPUs which have moved
+ * task running on them.
+ */
+ cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
+ update_closid_rmid(tmpmask, NULL);
+
+ rdtgrp->flags = RDT_DELETED;
+ free_rmid(rdtgrp->rmid);
+
+ /*
+ * Remove your rmid from the parent ctrl groups list
+ */
+ WARN_ON(list_empty(&prdtgrp->crdtgrp_list));
+ list_del(&rdtgrp->crdtgrp_list);
+
+ /*
+ * one extra hold on this, will drop when we kfree(rdtgrp)
+ * in rdtgroup_kn_unlock()
+ */
+ kernfs_get(kn);
+ kernfs_remove(rdtgrp->kn);
+ free_cpumask_var(tmpmask);
+
+ return 0;
+}
+
+static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp)
+{
+ int cpu, closid = rdtgroup_default.closid;
+ struct rdtgroup *entry, *tmp;
+ struct list_head *llist;
+ cpumask_var_t tmpmask;
+
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
/* Give any tasks back to the default group */
rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
@@ -1622,6 +1675,18 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
+ free_rmid(rdtgrp->rmid);
+
+ /*
+ * Free all the child monitor group rmids.
+ */
+ llist = &rdtgrp->crdtgrp_list;
+ list_for_each_entry_safe(entry, tmp, llist, crdtgrp_list) {
+ free_rmid(entry->rmid);
+ list_del(&entry->crdtgrp_list);
+ kfree(entry);
+ }
+
list_del(&rdtgrp->rdtgroup_list);
/*
@@ -1630,10 +1695,33 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
*/
kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
- ret = 0;
+ free_cpumask_var(tmpmask);
+
+ return 0;
+}
+
+static int rdtgroup_rmdir(struct kernfs_node *kn)
+{
+ struct kernfs_node *parent_kn = kn->parent;
+ struct rdtgroup *rdtgrp;
+ int ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(kn);
+ if (!rdtgrp) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
+ ret = rdtgroup_rmdir_ctrl(kn, rdtgrp);
+ else if (rdtgrp->type == RDTMON_GROUP &&
+ !strcmp(parent_kn->name, "mon_groups"))
+ ret = rdtgroup_rmdir_mon(kn, rdtgrp);
+ else
+ ret = -EPERM;
+
out:
rdtgroup_kn_unlock(kn);
- free_cpumask_var(tmpmask);
return ret;
}
--
1.9.1