[RFC][PATCH v2 4/5] mutex: Disable preemtion between modifying lock->owner and locking/unlocking mutex
From: Jason Low
Date: Tue Jan 28 2014 - 14:14:04 EST
This RFC patch disables preemption between modifying lock->owner and
locking/unlocking the mutex lock. This prevents situations where the owner
can preempt between those 2 operations, which causes optimistic spinners to
be unable to check if lock->owner is not on CPU. As mentioned in the
thread for this v1 patchset, disabling preemption is a cheap operation.
Signed-off-by: Jason Low <jason.low2@xxxxxx>
---
kernel/locking/mutex.c | 30 ++++++++++++++++++++++++++++--
1 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 6d85b08..cfaaf53 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -98,8 +98,10 @@ void __sched mutex_lock(struct mutex *lock)
* The locking fastpath is the 1->0 transition from
* 'unlocked' into 'locked' state.
*/
+ preempt_disable();
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
mutex_set_owner(lock);
+ preempt_enable();
}
EXPORT_SYMBOL(mutex_lock);
@@ -253,9 +255,13 @@ void __sched mutex_unlock(struct mutex *lock)
* the slow path will always be taken, and that clears the owner field
* after verifying that it was indeed current.
*/
+ preempt_disable();
mutex_clear_owner(lock);
#endif
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
+#ifndef CONFIG_DEBUG_MUTEXES
+ preempt_enable();
+#endif
}
EXPORT_SYMBOL(mutex_unlock);
@@ -292,9 +298,13 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
* the slow path will always be taken, and that clears the owner field
* after verifying that it was indeed current.
*/
+ preempt_disable();
mutex_clear_owner(&lock->base);
#endif
__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
+#ifndef CONFIG_DEBUG_MUTEXES
+ preempt_enable();
+#endif
}
EXPORT_SYMBOL(ww_mutex_unlock);
@@ -780,12 +790,16 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
int ret;
might_sleep();
+ preempt_disable();
ret = __mutex_fastpath_lock_retval(&lock->count);
if (likely(!ret)) {
mutex_set_owner(lock);
+ preempt_enable();
return 0;
- } else
+ } else {
+ preempt_enable();
return __mutex_lock_interruptible_slowpath(lock);
+ }
}
EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -795,12 +809,16 @@ int __sched mutex_lock_killable(struct mutex *lock)
int ret;
might_sleep();
+ preempt_disable();
ret = __mutex_fastpath_lock_retval(&lock->count);
if (likely(!ret)) {
mutex_set_owner(lock);
+ preempt_enable();
return 0;
- } else
+ } else {
+ preempt_enable();
return __mutex_lock_killable_slowpath(lock);
+ }
}
EXPORT_SYMBOL(mutex_lock_killable);
@@ -889,9 +907,11 @@ int __sched mutex_trylock(struct mutex *lock)
{
int ret;
+ preempt_disable();
ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
if (ret)
mutex_set_owner(lock);
+ preempt_enable();
return ret;
}
@@ -904,6 +924,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
int ret;
might_sleep();
+ preempt_disable();
ret = __mutex_fastpath_lock_retval(&lock->base.count);
@@ -912,6 +933,8 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
mutex_set_owner(&lock->base);
} else
ret = __ww_mutex_lock_slowpath(lock, ctx);
+
+ preempt_enable();
return ret;
}
EXPORT_SYMBOL(__ww_mutex_lock);
@@ -922,6 +945,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
int ret;
might_sleep();
+ preempt_disable();
ret = __mutex_fastpath_lock_retval(&lock->base.count);
@@ -930,6 +954,8 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
mutex_set_owner(&lock->base);
} else
ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
+
+ preempt_enable();
return ret;
}
EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
--
1.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/