Re: [PATCH] pstore: Revert pmsg_lock back to a normal mutex
From: Steven Rostedt
Date: Thu Mar 02 2023 - 20:01:48 EST
On Thu, 2 Mar 2023 16:56:13 -0500
Steven Rostedt <rostedt@xxxxxxxxxxx> wrote:
> Knowing that rcu_read_lock() keeps the tasks safe, I made the optimization
> to only grab the spinlock (and disable interrupts) once, or whenever the
> top waiter changes.
v3 as I found that there were too places to test for top waiter that had to
be removed:
(I took out the trace_printk() here).
-- Steve
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 010cf4e6d0b8..283dd8e654ef 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1399,8 +1399,12 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{
+ struct rt_mutex_waiter *top_waiter;
+ struct rt_mutex_waiter *last_waiter = NULL;
+ struct task_struct *top_task = NULL;
bool res = true;
+ /* rcu_read_lock keeps task_structs around */
rcu_read_lock();
for (;;) {
/* If owner changed, trylock again. */
@@ -1421,11 +1425,23 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* for CONFIG_PREEMPT_RCU=y)
* - the VCPU on which owner runs is preempted
*/
- if (!owner_on_cpu(owner) || need_resched() ||
- !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
+ if (!owner_on_cpu(owner) || need_resched()) {
res = false;
break;
}
+ top_waiter = rt_mutex_top_waiter(lock);
+ if (top_waiter != waiter) {
+ if (top_waiter != last_waiter) {
+ raw_spin_lock_irq(&lock->wait_lock);
+ last_waiter = rt_mutex_top_waiter(lock);
+ top_task = last_waiter->task;
+ raw_spin_unlock_irq(&lock->wait_lock);
+ }
+ if (!owner_on_cpu(top_task)) {
+ res = false;
+ break;
+ }
+ }
cpu_relax();
}
rcu_read_unlock();
@@ -1547,10 +1563,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
break;
}
- if (waiter == rt_mutex_top_waiter(lock))
- owner = rt_mutex_owner(lock);
- else
- owner = NULL;
+ owner = rt_mutex_owner(lock);
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
@@ -1736,10 +1749,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
if (try_to_take_rt_mutex(lock, current, &waiter))
break;
- if (&waiter == rt_mutex_top_waiter(lock))
- owner = rt_mutex_owner(lock);
- else
- owner = NULL;
+ owner = rt_mutex_owner(lock);
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))