Currently, for the case of num_online_cpus() <= 1, return directly,
indicates the end of current grace period and then release old data.
it's not accurate, for SMP system, when num_online_cpus() is equal
one, maybe another cpu that in offline process(after invoke
__cpu_disable()) is still in the rude RCU-Tasks critical section
holding the old data, this lead to memory corruption.
Therefore, this commit add cpus_read_lock/unlock() before executing
num_online_cpus().
Signed-off-by: Zqiang <qiang1.zhang@xxxxxxxxx>
---
kernel/rcu/tasks.h | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 4a991311be9b..08e72c6462d8 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -1033,14 +1033,30 @@ static void rcu_tasks_be_rude(struct work_struct *work)
{
}
+static DEFINE_PER_CPU(struct work_struct, rude_work);
+
// Wait for one rude RCU-tasks grace period.
static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
{
+ int cpu;
+ struct work_struct *work;
+
+ cpus_read_lock();
if (num_online_cpus() <= 1)
- return; // Fastpath for only one CPU.
+ goto end;// Fastpath for only one CPU.
rtp->n_ipis += cpumask_weight(cpu_online_mask) > - schedule_on_each_cpu(rcu_tasks_be_rude);
+ for_each_online_cpu(cpu) {
+ work = per_cpu_ptr(&rude_work, cpu);
+ INIT_WORK(work, rcu_tasks_be_rude);
+ schedule_work_on(cpu, work);
+ }
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(&rude_work, cpu));
+
+end:
+ cpus_read_unlock();
}
void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);