Re: [RFC][PATCH RT] rtmutex: Use raw_spin_trylock() inrt_mutex_slowlock() to ease possible live locks

From: Steven Rostedt
Date: Wed Dec 19 2012 - 22:43:55 EST


On Wed, 2012-12-19 at 20:31 -0500, Steven Rostedt wrote:

>
> This is just an RFC patch to start discussion, not for inclusion. I may
> send another patch that implements #2 above.


Signed-off-by: Steven Rostedt <rostedt@xxxxxxxxxxx>

Index: linux-rt.git/kernel/rtmutex.c
===================================================================
--- linux-rt.git.orig/kernel/rtmutex.c
+++ linux-rt.git/kernel/rtmutex.c
@@ -167,6 +167,39 @@ static void rt_mutex_wake_waiter(struct
*/
int max_lock_depth = 1024;

+static bool test_lock_waiter(struct task_struct *task,
+ struct rt_mutex_waiter *waiter,
+ struct rt_mutex_waiter *top_waiter,
+ struct rt_mutex *orig_lock,
+ struct rt_mutex_waiter *orig_waiter,
+ int detect_deadlock)
+{
+ /*
+ * Check the orig_waiter state. After we dropped the locks,
+ * the previous owner of the lock might have released the lock.
+ */
+ if (orig_waiter && !rt_mutex_owner(orig_lock))
+ return false;
+
+ /*
+ * Drop out, when the task has no waiters. Note,
+ * top_waiter can be NULL, when we are in the deboosting
+ * mode!
+ */
+ if (top_waiter && (!task_has_pi_waiters(task) ||
+ top_waiter != task_top_pi_waiter(task)))
+ return false;
+
+ /*
+ * When deadlock detection is off then we check, if further
+ * priority adjustment is necessary.
+ */
+ if (!detect_deadlock && waiter->list_entry.prio == task->prio)
+ return false;
+
+ return true;
+}
+
/*
* Adjust the priority chain. Also used for deadlock detection.
* Decreases task's usage by one - may thus free the task.
@@ -225,34 +258,32 @@ static int rt_mutex_adjust_prio_chain(st
if (!rt_mutex_real_waiter(waiter))
goto out_unlock_pi;

- /*
- * Check the orig_waiter state. After we dropped the locks,
- * the previous owner of the lock might have released the lock.
- */
- if (orig_waiter && !rt_mutex_owner(orig_lock))
- goto out_unlock_pi;
-
- /*
- * Drop out, when the task has no waiters. Note,
- * top_waiter can be NULL, when we are in the deboosting
- * mode!
- */
- if (top_waiter && (!task_has_pi_waiters(task) ||
- top_waiter != task_top_pi_waiter(task)))
- goto out_unlock_pi;
-
- /*
- * When deadlock detection is off then we check, if further
- * priority adjustment is necessary.
- */
- if (!detect_deadlock && waiter->list_entry.prio == task->prio)
+ if (!test_lock_waiter(task, waiter, top_waiter,
+ orig_lock, orig_waiter, deadlock_detect))
goto out_unlock_pi;

lock = waiter->lock;
if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- cpu_relax();
- goto retry;
+ /*
+ * As raw_spin_locks are FIFO, we need to avoid being
+ * starved out by other tasks that may be grabbing
+ * the wait_lock. Grab both locks in the proper
+ * order and test if anything changed. If it did
+ * we need to drop them and try again.
+ */
+ raw_spin_lock(&lock->wait_lock);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ if (waiter != task->pi_blocked_on ||
+ lock != waiter->lock ||
+ !test_lock_waiter(task, waiter, top_waiter,
+ orig_lock, orig_waiter,
+ detect_deadlock)) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock(&lock->wait_lock);
+ cpu_relax();
+ goto retry;
+ }
}

/* Deadlock detection */


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/