diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f732642e3e09..1d7bc87007cd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -290,6 +290,10 @@ static void sched_core_assert_empty(void) static void __sched_core_enable(void) { static_branch_enable(&__sched_core_enabled); + /* + * Ensure raw_spin_rq_*lock*() have completed before flipping. + */ + synchronize_rcu(); __sched_core_flip(true); sched_core_assert_empty(); } @@ -449,16 +453,23 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass) { raw_spinlock_t *lock; + /* Matches synchronize_sched() in __sched_core_enabled() */ + preempt_disable(); if (sched_core_disabled()) { raw_spin_lock_nested(&rq->__lock, subclass); + /* preempt-count *MUST* be > 1 */ + preempt_enable_no_resched(); return; } for (;;) { lock = __rq_lockp(rq); raw_spin_lock_nested(lock, subclass); - if (likely(lock == __rq_lockp(rq))) + if (likely(lock == __rq_lockp(rq))) { + /* preempt-count *MUST* be > 1 */ + preempt_enable_no_resched(); return; + } raw_spin_unlock(lock); } } @@ -468,14 +479,21 @@ bool raw_spin_rq_trylock(struct rq *rq) raw_spinlock_t *lock; bool ret; - if (sched_core_disabled()) - return raw_spin_trylock(&rq->__lock); + /* Matches synchronize_sched() in __sched_core_enabled() */ + preempt_disable(); + if (sched_core_disabled()) { + ret = raw_spin_trylock(&rq->__lock); + preempt_enable(); + return ret; + } for (;;) { lock = __rq_lockp(rq); ret = raw_spin_trylock(lock); - if (!ret || (likely(lock == __rq_lockp(rq)))) + if (!ret || (likely(lock == __rq_lockp(rq)))) { + preempt_enable(); return ret; + } raw_spin_unlock(lock); } } @@ -493,14 +511,17 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2) { lockdep_assert_irqs_disabled(); - if (rq1->cpu > rq2->cpu) - swap(rq1, rq2); - - raw_spin_rq_lock(rq1); - if (__rq_lockp(rq1) == __rq_lockp(rq2)) - return; - - raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); + if (__rq_lockp(rq1) == __rq_lockp(rq2)) { + raw_spin_rq_lock(rq1); + } else { + if (__rq_lockp(rq1) < __rq_lockp(rq2)) { + raw_spin_rq_lock(rq1); + raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); + } else { + raw_spin_rq_lock(rq2); + raw_spin_rq_lock_nested(rq1, SINGLE_DEPTH_NESTING); + } + } } #endif