Re: [PATCH 2/2] x86/intel_rdt: Update task closid immediately on CPU in rmdir and unmount

From: Thomas Gleixner
Date: Wed Nov 23 2016 - 09:28:23 EST


On Fri, 18 Nov 2016, Fenghua Yu wrote:
>
> When removing a sub directory/rdtgroup by rmdir or umount, closid in a
> task in the sub directory is set to default rdtgroup's closid which is 0.
> If the task is running on a CPU, the PQR_ASSOC MSR is only updated
> when the task runs through a context switch. Up to the context switch,
> the task runs with the wrong closid.
>
> Make the change immediately effective by invoking a smp function call
> on all online CPUs which calls intel_rdt_sched_in() to update the
> PQR_ASSOC MSR.

We can be smarter than that and figure out which cpus need that update
rather than interrupting the world for nothing.

> rdt_update_closid() (renamed from rdt_update_percpu_closid()) calls
> intel_rdt_sched_in() to update closid in the PQR_ASSOC MSR on a CPU.
> The task closid and percpu closid are set up before
> rdt_update_closid() is called. Handling PQR_ASSOC MSR update for
> both task closid and percpu closid in rdt_update_closid() reduces
> redundant smp function calls.

You should not describe WHAT the patch is doing, the WHY and the concept is
what needs to be in the changelog. The WHAT is in the patch.

> @@ -224,7 +226,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
> {
> cpumask_var_t tmpmask, newmask;
> struct rdtgroup *rdtgrp, *r;
> - int ret;
> + int cpu, ret;
>
> if (!buf)
> return -EINVAL;
> @@ -264,7 +266,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
> /* Give any dropped cpus to rdtgroup_default */
> cpumask_or(&rdtgroup_default.cpu_mask,
> &rdtgroup_default.cpu_mask, tmpmask);
> - rdt_update_percpu_closid(tmpmask, rdtgroup_default.closid);
> + for_each_cpu(cpu, tmpmask)
> + per_cpu(cpu_closid, cpu) = rdtgroup_default.closid;
> + rdt_update_closid(tmpmask);

Bah, this is silly. We can make that conditional instead of mindlessly
slapping a for_each_cpu() loop into every other function.

static void rdt_update_cpu_closid(void *closid)
{
if (closid)
this_cpu_write(cpu_closid, *(int *)closid);
....
}

and have:

static void rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
{
int cpu = get_cpu();

if (cpumask_test_cpu(cpu, cpu_mask))
rdt_update_cpu_closid(closid);
smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
put_cpu();
}

So the change here becomes:

- rdt_update_percpu_closid(tmpmask, rdtgroup_default.closid);
+ rdt_update_closid(tmpmask, &rdtgroup_default.closid);

and for the case where we need to deal with tasks, do the task move, the
percpu update and call rdt_update_closid(mask, NULL);

> +static int rdt_move_task_closid(struct rdtgroup *from, struct rdtgroup *to)

Bah, consistent types are optional, right? int != bool

> +{

So the right thing to do here is aside of using a proper function name:

static bool rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
struct cpumask *mask)
{
struct task_struct *p, *t;
bool moved_task = false;

read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
if (!from || t->closid == from->closid) {
t->closid = to->closid;
#ifdef CONFIG_SMP
if (mask && task->on_cpu) {
moved_task = true;
cpumask_set_cpu(task_cpu(t), mask);
}
#endif
}
}
read_unlock(&tasklist_lock);

return moved_task;
}

> /*
> * Forcibly remove all of subdirectories under root.
> */
> static void rmdir_all_sub(void)
> {
> struct rdtgroup *rdtgrp, *tmp;
> - struct task_struct *p, *t;
> + int cpu;
>
> /* move all tasks to default resource group */
> - read_lock(&tasklist_lock);
> - for_each_process_thread(p, t)
> - t->closid = 0;
> - read_unlock(&tasklist_lock);
> + rdt_move_task_closid(NULL, &rdtgroup_default);

So this becomes

rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);

>
> list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
> /* Remove each rdtgroup other than root */
> @@ -833,13 +853,18 @@ static void rmdir_all_sub(void)
> cpumask_or(&rdtgroup_default.cpu_mask,
> &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
>
> - rdt_update_percpu_closid(&rdtgrp->cpu_mask,
> - rdtgroup_default.closid);
> + for_each_cpu(cpu, &rdtgrp->cpu_mask)
> + per_cpu(cpu_closid, cpu) = rdtgroup_default.closid;

And this is pointless when we have the cpuid pointer in rdt_update_closid()
and the below becomes:

> + rdt_update_closid(cpu_online_mask);

rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);

> + put_online_cpus();

Reworked untested patch below.

Thanks,

tglx

8<-----------------
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -194,12 +194,13 @@ static int rdtgroup_cpus_show(struct ker
/*
* This is safe against intel_rdt_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
- * from rdt_update_percpu_closid() is proteced against __switch_to() because
+ * from rdt_update_closid() is proteced against __switch_to() because
* preemption is disabled.
*/
-static void rdt_update_cpu_closid(void *v)
+static void rdt_update_cpu_closid(void *closid)
{
- this_cpu_write(cpu_closid, *(int *)v);
+ if (closid)
+ this_cpu_write(cpu_closid, *(int *)closid);
/*
* We cannot unconditionally write the MSR because the current
* executing task might have its own closid selected. Just reuse
@@ -208,14 +209,23 @@ static void rdt_update_cpu_closid(void *
intel_rdt_sched_in();
}

-/* Update the per cpu closid and eventually the PGR_ASSOC MSR */
-static void rdt_update_percpu_closid(const struct cpumask *cpu_mask, int closid)
+/*
+ * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
+ *
+ * Per task closids must have been set up before calling this function.
+ *
+ * The per cpu closids are updated with the smp function call, when @closid
+ * is not NULL. If @closid is NULL then all affected percpu closids must
+ * have been set up before calling this function.
+ */
+static void
+rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
{
int cpu = get_cpu();

if (cpumask_test_cpu(cpu, cpu_mask))
- rdt_update_cpu_closid(&closid);
- smp_call_function_many(cpu_mask, rdt_update_cpu_closid, &closid, 1);
+ rdt_update_cpu_closid(closid);
+ smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
put_cpu();
}

@@ -264,7 +274,7 @@ static ssize_t rdtgroup_cpus_write(struc
/* Give any dropped cpus to rdtgroup_default */
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, tmpmask);
- rdt_update_percpu_closid(tmpmask, rdtgroup_default.closid);
+ rdt_update_closid(tmpmask, &rdtgroup_default.closid);
}

/*
@@ -278,7 +288,7 @@ static ssize_t rdtgroup_cpus_write(struc
continue;
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
}
- rdt_update_percpu_closid(tmpmask, rdtgrp->closid);
+ rdt_update_closid(tmpmask, &rdtgrp->closid);
}

/* Done pushing/pulling - update this group with new mask */
@@ -807,18 +817,49 @@ static int reset_all_cbms(struct rdt_res
}

/*
- * Forcibly remove all of subdirectories under root.
+ * Move tasks from one to the other group. If @from is NULL, then all tasks
+ * in the systems are moved unconditionally (used for teardown).
+ *
+ * If @mask is not NULL the cpus on which moved tasks are running are set
+ * in that mask so the update smp function call is restricted to affected
+ * cpus.
*/
-static void rmdir_all_sub(void)
+static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+ struct cpumask *mask)
{
- struct rdtgroup *rdtgrp, *tmp;
struct task_struct *p, *t;

- /* move all tasks to default resource group */
read_lock(&tasklist_lock);
- for_each_process_thread(p, t)
- t->closid = 0;
+ for_each_process_thread(p, t) {
+ if (!from || t->closid == from->closid) {
+ t->closid = to->closid;
+#ifdef CONFIG_SMP
+ /*
+ * This is safe on x86 w/o barriers as the ordering
+ * of writing to task_cpu() and t->on_cpu is
+ * reverse to the reading here. The detection is
+ * inaccurate as tasks might move or schedule
+ * before the smp function call takes place. In
+ * such a case the function call is pointless, but
+ * there is no other side effect.
+ */
+ if (mask && t->on_cpu)
+ cpumask_set_cpu(task_cpu(t), mask);
+#endif
+ }
+ }
read_unlock(&tasklist_lock);
+}
+
+/*
+ * Forcibly remove all of subdirectories under root.
+ */
+static void rmdir_all_sub(void)
+{
+ struct rdtgroup *rdtgrp, *tmp;
+
+ /* Move all tasks to the default resource group */
+ rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);

list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
/* Remove each rdtgroup other than root */
@@ -833,13 +874,15 @@ static void rmdir_all_sub(void)
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);

- rdt_update_percpu_closid(&rdtgrp->cpu_mask,
- rdtgroup_default.closid);
-
kernfs_remove(rdtgrp->kn);
list_del(&rdtgrp->rdtgroup_list);
kfree(rdtgrp);
}
+ /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
+ get_online_cpus();
+ rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);
+ put_online_cpus();
+
kernfs_remove(kn_info);
}

@@ -944,27 +987,35 @@ static int rdtgroup_mkdir(struct kernfs_

static int rdtgroup_rmdir(struct kernfs_node *kn)
{
- struct task_struct *p, *t;
+ int ret, cpu, closid = rdtgroup_default.closid;
struct rdtgroup *rdtgrp;
+ cpumask_var_t tmpmask;
+
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;

rdtgrp = rdtgroup_kn_lock_live(kn);
if (!rdtgrp) {
- rdtgroup_kn_unlock(kn);
- return -EPERM;
+ ret = -EPERM;
+ goto out;
}

/* Give any tasks back to the default group */
- read_lock(&tasklist_lock);
- for_each_process_thread(p, t) {
- if (t->closid == rdtgrp->closid)
- t->closid = 0;
- }
- read_unlock(&tasklist_lock);
+ rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);

/* Give any CPUs back to the default group */
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
- rdt_update_percpu_closid(&rdtgrp->cpu_mask, rdtgroup_default.closid);
+
+ /* Update per cpu closid of the moved CPUs first */
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(cpu_closid, cpu) = closid;
+ /*
+ * Update the MSR on moved CPUs and CPUs which have moved
+ * task running on them.
+ */
+ cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
+ rdt_update_closid(tmpmask, NULL);

rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
@@ -976,9 +1027,11 @@ static int rdtgroup_rmdir(struct kernfs_
*/
kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
-
+ ret = 0;
+out:
rdtgroup_kn_unlock(kn);
- return 0;
+ free_cpumask_var(tmpmask);
+ return ret;
}

static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {