[patch 1/2] rtmutex: Handle deadlock detection smarter

From: Thomas Gleixner
Date: Thu Jun 05 2014 - 11:28:54 EST


Even in the case when deadlock detection is not requested by the
caller, we can detect deadlocks. Right now the code stops the lock
chain walk and keeps the waiter enqueued, even on itself. Silly not to
yell when such a scenario is detected and to keep the waiter enqueued.

Return -EDEADLK unconditionally and handle it at the call sites.

The futex calls return -EDEADLK. The non futex ones dequeue the
waiter, throw a warning and put the task into a schedule loop.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
kernel/locking/rtmutex.c | 33 ++++++++++++++++++++++++++++-----
kernel/locking/rtmutex.h | 6 +++++-
2 files changed, 33 insertions(+), 6 deletions(-)

Index: tip/kernel/locking/rtmutex.c
===================================================================
--- tip.orig/kernel/locking/rtmutex.c
+++ tip/kernel/locking/rtmutex.c
@@ -314,7 +314,7 @@ static int rt_mutex_adjust_prio_chain(st
}
put_task_struct(task);

- return deadlock_detect ? -EDEADLK : 0;
+ return -EDEADLK;
}
retry:
/*
@@ -377,7 +377,7 @@ static int rt_mutex_adjust_prio_chain(st
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
raw_spin_unlock(&lock->wait_lock);
- ret = deadlock_detect ? -EDEADLK : 0;
+ ret = -EDEADLK;
goto out_unlock_pi;
}

@@ -548,7 +548,7 @@ static int task_blocks_on_rt_mutex(struc
* which is wrong, as the other waiter is not in a deadlock
* situation.
*/
- if (detect_deadlock && owner == task)
+ if (owner == task)
return -EDEADLK;

raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -763,6 +763,26 @@ __rt_mutex_slowlock(struct rt_mutex *loc
return ret;
}

+static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ struct rtmutex_waiter *w)
+{
+ /*
+ * If the result is not -EDEADLOCK or the caller requested
+ * deadlock detection, nothing to do here.
+ */
+ if (res != -EDEADLOCK || detect_deadlock)
+ return;
+
+ /*
+ * Yell lowdly and stop the task right here.
+ */
+ debug_rt_mutex_print_deadlock(w);
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+}
+
/*
* Slow path lock function:
*/
@@ -802,8 +822,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,

set_current_state(TASK_RUNNING);

- if (unlikely(ret))
+ if (unlikely(ret)) {
remove_waiter(lock, &waiter);
+ rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
+ }

/*
* try_to_take_rt_mutex() sets the waiter bit
@@ -1112,7 +1134,8 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}

- ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+ /* We enforce deadlock detection for futexes */
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);

if (ret && !rt_mutex_owner(lock)) {
/*
Index: tip/kernel/locking/rtmutex.h
===================================================================
--- tip.orig/kernel/locking/rtmutex.h
+++ tip/kernel/locking/rtmutex.h
@@ -21,6 +21,10 @@
#define debug_rt_mutex_unlock(l) do { } while (0)
#define debug_rt_mutex_init(m, n) do { } while (0)
#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
-#define debug_rt_mutex_print_deadlock(w) do { } while (0)
#define debug_rt_mutex_detect_deadlock(w,d) (d)
#define debug_rt_mutex_reset_waiter(w) do { } while (0)
+
+static inline void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+ WARN(1, "rtmutex deadlock detected\n");
+}


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/