Re: [PATCH v2] locking/rwsem: Add reader-owned state to the owner field
From: Peter Zijlstra
Date: Thu May 19 2016 - 04:33:06 EST
On Thu, May 19, 2016 at 11:37:53AM +1000, Dave Chinner wrote:
> On Fri, May 06, 2016 at 08:20:24PM -0400, Waiman Long wrote:
> > Currently, it is not possible to determine for sure if a reader
> > owns a rwsem by looking at the content of the rwsem data structure.
> > This patch adds a new state RWSEM_READER_OWNED to the owner field
> > to indicate that readers currently own the lock.
>
> Oh, yes please. This will enable us to get rid of the remaining
> mrlock rwsem abstraction we've carried since the days of Irix in
> XFS. The only reason the abstraction still exists is that we track
> write locks for the purposes of checking for correct inode locking
> contexts via ASSERT(xfs_isilocked()) calls....
That all seems to live under a DEBUG knob; lockdep could also easily
tell you these things.
---
include/linux/lockdep.h | 36 +++++++++++++++++++++++++++++-------
kernel/locking/lockdep.c | 19 ++++++++++++-------
2 files changed, 41 insertions(+), 14 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index eabe0138eb06..82d6453c4660 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -338,9 +338,21 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
-#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
+#define LOCKDEP_HELD_ANY (-1)
+#define LOCKDEP_HELD_EXCLUSIVE (0)
+#define LOCKDEP_HELD_SHARED (1)
+#define LOCKDEP_HELD_RECURSIVE (2)
-extern int lock_is_held(struct lockdep_map *lock);
+extern int _lock_is_held(struct lockdep_map *lock, int read);
+
+static inline int lock_is_held(struct lockdep_map *lock)
+{
+ return _lock_is_held(lock, LOCKDEP_HELD_ANY);
+}
+
+#define lockdep_is_held(lock) _lock_is_held(&(lock)->dep_map, LOCKDEP_HELD_ANY)
+#define lockdep_is_held_exclusive(lock) _lock_is_held(&(lock)->dep_map, LOCKDEP_HELD_EXCLUSIVE)
+#define lockdep_is_held_shared(lock) _lock_is_held(&(lock)->dep_map, LOCKDEP_HELD_SHARED)
extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
@@ -369,12 +381,20 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+ WARN_ON(debug_locks && !lockdep_is_held(l)); \
+} while (0)
-#define lockdep_assert_held_once(l) do { \
- WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert_held_once(l) do { \
+ WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
+} while (0)
+
+#define lockdep_assert_held_exclusive(l) do { \
+ WARN_ON(debug_locks && !lockdep_is_held_exclusive(l)); \
+} while (0)
+
+#define lockdep_assert_held_shared(l) do { \
+ WARN_ON(debug_locks && !lockdep_is_held_shared(l)); \
+} while (0)
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
@@ -430,6 +450,8 @@ struct lock_class_key { };
#define lockdep_assert_held(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_shared(l) do { (void)(l); } while (0)
#define lockdep_recursing(tsk) (0)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 81f1a7107c0e..dca5d982b315 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3183,7 +3183,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
return 0;
}
-static int __lock_is_held(struct lockdep_map *lock);
+static int __lock_is_held(struct lockdep_map *lock, int read);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3324,7 +3324,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
chain_key = iterate_chain_key(chain_key, class_idx);
- if (nest_lock && !__lock_is_held(nest_lock))
+ if (nest_lock && !__lock_is_held(nest_lock, LOCKDEP_HELD_ANY))
return print_lock_nested_lock_not_held(curr, hlock, ip);
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
@@ -3571,7 +3571,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
return 1;
}
-static int __lock_is_held(struct lockdep_map *lock)
+static int __lock_is_held(struct lockdep_map *lock, int read)
{
struct task_struct *curr = current;
int i;
@@ -3579,8 +3579,13 @@ static int __lock_is_held(struct lockdep_map *lock)
for (i = 0; i < curr->lockdep_depth; i++) {
struct held_lock *hlock = curr->held_locks + i;
- if (match_held_lock(hlock, lock))
+ if (!match_held_lock(hlock, lock))
+ continue;
+
+ if (read == LOCKDEP_HELD_ANY)
return 1;
+
+ return read == hlock->read;
}
return 0;
@@ -3764,7 +3769,7 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
-int lock_is_held(struct lockdep_map *lock)
+int _lock_is_held(struct lockdep_map *lock, int read)
{
unsigned long flags;
int ret = 0;
@@ -3776,13 +3781,13 @@ int lock_is_held(struct lockdep_map *lock)
check_flags(flags);
current->lockdep_recursion = 1;
- ret = __lock_is_held(lock);
+ ret = __lock_is_held(lock, read);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
return ret;
}
-EXPORT_SYMBOL_GPL(lock_is_held);
+EXPORT_SYMBOL_GPL(_lock_is_held);
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
{