[PATCH tip/core/rcu 10/10] rcu: Convert rcu_state.ofl_lock to raw_spinlock_t
From: Paul E. McKenney
Date: Wed Aug 29 2018 - 19:08:14 EST
From: Mike Galbraith <efault@xxxxxx>
1e64b15a4b10 ("rcu: Fix grace-period hangs due to race with CPU offline")
added spinlock_t ofl_lock to the rcu_state structure, then takes it with
preemption disabled during CPU offline, which gives the -rt patchset's
sleeping spinlock heartburn.
This commit therefore converts ->ofl_lock to raw_spinlock_t.
Signed-off-by: Mike Galbraith <efault@xxxxxx>
Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
---
kernel/rcu/tree.c | 12 ++++++------
kernel/rcu/tree.h | 2 +-
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 65b6e03ae6f5..0ce4702ce09f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -97,7 +97,7 @@ struct rcu_state rcu_state = {
.abbr = RCU_ABBR,
.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
- .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
+ .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
};
/* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1775,13 +1775,13 @@ static bool rcu_gp_init(void)
*/
rcu_state.gp_state = RCU_GP_ONOFF;
rcu_for_each_leaf_node(rnp) {
- spin_lock(&rcu_state.ofl_lock);
+ raw_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_irq_rcu_node(rnp);
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
!rnp->wait_blkd_tasks) {
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_irq_rcu_node(rnp);
- spin_unlock(&rcu_state.ofl_lock);
+ raw_spin_unlock(&rcu_state.ofl_lock);
continue;
}
@@ -1817,7 +1817,7 @@ static bool rcu_gp_init(void)
}
raw_spin_unlock_irq_rcu_node(rnp);
- spin_unlock(&rcu_state.ofl_lock);
+ raw_spin_unlock(&rcu_state.ofl_lock);
}
rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
@@ -3376,7 +3376,7 @@ void rcu_report_dead(unsigned int cpu)
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask;
- spin_lock(&rcu_state.ofl_lock);
+ raw_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
@@ -3387,7 +3387,7 @@ void rcu_report_dead(unsigned int cpu)
}
rnp->qsmaskinitnext &= ~mask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- spin_unlock(&rcu_state.ofl_lock);
+ raw_spin_unlock(&rcu_state.ofl_lock);
per_cpu(rcu_cpu_started, cpu) = 0;
}
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bfbf97a1c29d..703e19ff532d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -343,7 +343,7 @@ struct rcu_state {
const char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */
- spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
+ raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
/* Synchronize offline with */
/* GP pre-initialization. */
};
--
2.17.1