[PATCH] perf/x86/amd/uncore: fixup use after free due to hotplug conversion
From: Sebastian Andrzej Siewior
Date: Fri Sep 09 2016 - 12:08:33 EST
The conversion change the behaviour of how the hotplug hooks were
invoked: Before one CPU at a time invoked them all. Now on registration
each hook is invoked by all CPUs (one after the other) before the next
state is invoked.
In amd_uncore_cpu_up_prepare() we allocate an uncore struct for each CPU
with id 0. In amd_uncore_cpu_starting() we set the ID and search of
others to replace it if available. CPU0 finds another struct with id 0
and puts its on the kill list. CPU1 find the struct of CPU0 and puts its
on the kill list.
The online event will free them.
To fix this I use an id of -1 assuming that no uncore with such an id
will show up. Step two is use a list to free the not needed elements.
Reported-by: Eric Sandeen <sandeen@xxxxxxxxxxx>
Tested-by: Eric Sandeen <sandeen@xxxxxxxxxxx>
Fixes: 96b2bd3866a0 ("perf/x86/amd/uncore: Convert to hotplug state machine")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
---
arch/x86/events/amd/uncore.c | 22 ++++++++++++++++++----
1 file changed, 18 insertions(+), 4 deletions(-)
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index e6131d4454e6..65577f081d07 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -29,6 +29,8 @@
#define COUNTER_SHIFT 16
+static HLIST_HEAD(uncore_unused_list);
+
struct amd_uncore {
int id;
int refcnt;
@@ -39,7 +41,7 @@ struct amd_uncore {
cpumask_t *active_mask;
struct pmu *pmu;
struct perf_event *events[MAX_COUNTERS];
- struct amd_uncore *free_when_cpu_online;
+ struct hlist_node node;
};
static struct amd_uncore * __percpu *amd_uncore_nb;
@@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
uncore_nb->active_mask = &amd_nb_active_mask;
uncore_nb->pmu = &amd_nb_pmu;
+ uncore_nb->id = -1;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
}
@@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
uncore_l2->active_mask = &amd_l2_active_mask;
uncore_l2->pmu = &amd_l2_pmu;
+ uncore_l2->id = -1;
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
}
@@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
continue;
if (this->id == that->id) {
- that->free_when_cpu_online = this;
+ hlist_add_head(&this->node, &uncore_unused_list);
this = that;
break;
}
@@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
return 0;
}
+static void uncore_clean_online(void)
+{
+ struct amd_uncore *uncore;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
+ hlist_del(&uncore->node);
+ kfree(uncore);
+ }
+}
+
static void uncore_online(unsigned int cpu,
struct amd_uncore * __percpu *uncores)
{
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
- kfree(uncore->free_when_cpu_online);
- uncore->free_when_cpu_online = NULL;
+ uncore_clean_online();
if (cpu == uncore->cpu)
cpumask_set_cpu(cpu, uncore->active_mask);
--
2.9.3