--- linux-2.6.24/kernel/rtmutex.c 2008-02-25 15:32:05.000000000 -0800 +++ linux-2.6.24.working/kernel/rtmutex.c 2008-03-12 01:07:45.000000000 -0700 @@ -318,7 +318,7 @@ * assigned pending owner [which might not have taken the * lock yet]: */ -static inline int try_to_steal_lock(struct rt_mutex *lock) +static inline int try_to_steal_lock(struct rt_mutex *lock __DEP_MAP_DECL) { struct task_struct *pendowner = rt_mutex_owner(lock); struct rt_mutex_waiter *next; @@ -327,7 +327,7 @@ return 0; if (pendowner == current) - return 1; + goto stolen; spin_lock(&pendowner->pi_lock); if (current->prio >= pendowner->prio) { @@ -342,7 +342,7 @@ */ if (likely(!rt_mutex_has_waiters(lock))) { spin_unlock(&pendowner->pi_lock); - return 1; + goto stolen; } /* No chain handling, pending owner is not blocked on anything: */ @@ -356,7 +356,7 @@ * enqueued on the pending owners pi_waiters queue. So * we have to enqueue this waiter into * current->pi_waiters list. This covers the case, - * where current is boosted because it holds another + * where current is priority boosted because it holds another * lock and gets unboosted because the booster is * interrupted, so we would delay a waiter with higher * priority as current->normal_prio. @@ -371,6 +371,9 @@ __rt_mutex_adjust_prio(current); spin_unlock(¤t->pi_lock); } + +stolen: + lock_note_stolen(DEP_MAP_PARAM); return 1; } @@ -383,7 +386,7 @@ * * Must be called with lock->wait_lock held. */ -static int try_to_take_rt_mutex(struct rt_mutex *lock) +static int try_to_take_rt_mutex(struct rt_mutex *lock __DEP_MAP_DECL) { /* * We have to be careful here if the atomic speedups are @@ -406,7 +409,7 @@ */ mark_rt_mutex_waiters(lock); - if (rt_mutex_owner(lock) && !try_to_steal_lock(lock)) + if (rt_mutex_owner(lock) && !try_to_steal_lock(lock __DEP_MAP_PARAM)) return 0; /* We got the lock. */ @@ -631,7 +634,7 @@ static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, - void fastcall (*slowfn)(struct rt_mutex *lock)) + void fastcall (*slowfn)(struct rt_mutex *lock __LOCKDEP_DECLS) __LOCKDEP_DECLS) { /* Temporary HACK! */ if (!current->in_printk) @@ -643,7 +646,7 @@ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) rt_mutex_deadlock_account_lock(lock, current); else - slowfn(lock); + slowfn(lock __LOCKDEP_PARAMS); } static inline void @@ -672,7 +675,7 @@ * sleep/wakeup event loops. */ static void fastcall noinline __sched -rt_spin_lock_slowlock(struct rt_mutex *lock) +rt_spin_lock_slowlock(struct rt_mutex *lock __LOCKDEP_DECLS) { struct rt_mutex_waiter waiter; unsigned long saved_state, state, flags; @@ -680,13 +683,15 @@ debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; + lock_note_contended(LOCKDEP_PARAMS); + spin_lock_irqsave(&lock->wait_lock, flags); init_lists(lock); /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock)) { + if (try_to_take_rt_mutex(lock __DEP_MAP_PARAM)) { spin_unlock_irqrestore(&lock->wait_lock, flags); - return; + goto acquired; } BUG_ON(rt_mutex_owner(lock) == current); @@ -707,8 +712,9 @@ int saved_lock_depth = current->lock_depth; /* Try to acquire the lock */ - if (try_to_take_rt_mutex(lock)) + if (try_to_take_rt_mutex(lock __DEP_MAP_PARAM)) break; + /* * waiter.task is NULL the first time we come here and * when we have been woken up by the previous owner @@ -763,6 +769,9 @@ spin_unlock_irqrestore(&lock->wait_lock, flags); debug_rt_mutex_free_waiter(&waiter); + +acquired: + lock_note_acquired(_dep_map); } /* @@ -800,9 +809,9 @@ } EXPORT_SYMBOL(rt_spin_lock); -void __lockfunc __rt_spin_lock(struct rt_mutex *lock) +void __lockfunc __rt_spin_lock(struct rt_mutex *lock __LOCKDEP_DECLS) { - rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock __LOCKDEP_PARAMS); } EXPORT_SYMBOL(__rt_spin_lock); @@ -934,7 +943,8 @@ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock) + int detect_deadlock + __LOCKDEP_DECLS) { int ret = 0, saved_lock_depth = -1; struct rt_mutex_waiter waiter; @@ -943,13 +953,16 @@ debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; + lock_note_contended(LOCKDEP_PARAMS); + spin_lock_irqsave(&lock->wait_lock, flags); init_lists(lock); /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock)) { + if (try_to_take_rt_mutex(lock __DEP_MAP_PARAM)) { spin_unlock_irqrestore(&lock->wait_lock, flags); - return 0; + ret = 0; + goto acquired; } /* @@ -970,8 +983,9 @@ unsigned long saved_flags; /* Try to acquire the lock: */ - if (try_to_take_rt_mutex(lock)) + if (try_to_take_rt_mutex(lock __DEP_MAP_PARAM)) { break; + } /* * TASK_INTERRUPTIBLE checks for signals and @@ -1060,6 +1074,9 @@ debug_rt_mutex_free_waiter(&waiter); +acquired: + lock_note_acquired(_dep_map); + return ret; } @@ -1067,7 +1084,7 @@ * Slow path try-lock function: */ static inline int -rt_mutex_slowtrylock(struct rt_mutex *lock) +rt_mutex_slowtrylock(struct rt_mutex *lock __LOCKDEP_DECLS) { unsigned long flags; int ret = 0; @@ -1078,7 +1095,10 @@ init_lists(lock); - ret = try_to_take_rt_mutex(lock); + lock_note_contended(LOCKDEP_PARAMS); + + ret = try_to_take_rt_mutex(lock __DEP_MAP_PARAM); + /* * try_to_take_rt_mutex() sets the lock waiters * bit unconditionally. Clean this up. @@ -1088,6 +1108,9 @@ spin_unlock_irqrestore(&lock->wait_lock, flags); + if (!ret) + lock_note_acquired(_dep_map); + return ret; } @@ -1130,13 +1153,13 @@ int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock)) + int detect_deadlock __LOCKDEP_DECLS) __LOCKDEP_DECLS) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, NULL, detect_deadlock); + return slowfn(lock, state, NULL, detect_deadlock __LOCKDEP_PARAMS); } static inline int @@ -1144,24 +1167,24 @@ struct hrtimer_sleeper *timeout, int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock)) + int detect_deadlock __LOCKDEP_DECLS) __LOCKDEP_DECLS) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, timeout, detect_deadlock); + return slowfn(lock, state, timeout, detect_deadlock __LOCKDEP_PARAMS); } static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, - int (*slowfn)(struct rt_mutex *lock)) + int (*slowfn)(struct rt_mutex *lock __LOCKDEP_DECLS) __LOCKDEP_DECLS) { if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 1; } - return slowfn(lock); + return slowfn(lock __LOCKDEP_PARAMS); } static inline void @@ -1179,11 +1202,11 @@ * * @lock: the rt_mutex to be locked */ -void __sched rt_mutex_lock(struct rt_mutex *lock) +void __sched rt_mutex_lock(struct rt_mutex *lock __LOCKDEP_DECLS) { might_sleep(); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock __LOCKDEP_PARAMS); } EXPORT_SYMBOL_GPL(rt_mutex_lock); @@ -1199,12 +1222,12 @@ * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, - int detect_deadlock) + int detect_deadlock __LOCKDEP_DECLS) { might_sleep(); return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, - detect_deadlock, rt_mutex_slowlock); + detect_deadlock, rt_mutex_slowlock __LOCKDEP_PARAMS); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); @@ -1230,7 +1253,7 @@ might_sleep(); return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - detect_deadlock, rt_mutex_slowlock); + detect_deadlock, rt_mutex_slowlock __LOCKDEP_PARAMS_L0); } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); @@ -1241,9 +1264,14 @@ * * Returns 1 on success and 0 on contention */ +int __rt_mutex_trylock(struct rt_mutex *lock __LOCKDEP_DECLS) +{ + return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock __LOCKDEP_PARAMS); +} + int __sched rt_mutex_trylock(struct rt_mutex *lock) { - return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + return __rt_mutex_trylock(lock __LOCKDEP_PARAMS_L0); } EXPORT_SYMBOL_GPL(rt_mutex_trylock);