[PATCH v2 01/17] locking/lockdep: Add lock type enum to explicitly specify read or write locks

From: Yuyang Du
Date: Thu May 16 2019 - 04:02:24 EST


Add an enum to formalize lock types, as those type values now matter.

In order to make:

LOCK_TYPE_READ & LOCK_TYPE_RECURSIVE != 0

The recursive-read type is set to 3 as opposed to the current 2.

Signed-off-by: Yuyang Du <duyuyang@xxxxxxxxx>
---
include/linux/lockdep.h | 45 +++++++++++++++++++++++++++++++++++++++------
include/linux/rcupdate.h | 2 +-
kernel/locking/lockdep.c | 23 +++++++++++++----------
3 files changed, 53 insertions(+), 17 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 7c2fefa..1009c47 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -339,10 +339,38 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
*
* Values for "read":
*
- * 0: exclusive (write) acquire
- * 1: read-acquire (no recursion allowed)
- * 2: read-acquire with same-instance recursion allowed
+ * LOCK_TYPE_EXCLUSIVE (LOCK_TYPE_WRITE): exclusive (write) acquire
+ * LOCK_TYPE_READ: read-acquire (no recursion allowed)
+ * LOCK_TYPE_RECURSIVE: read-acquire with same-instance recursion allowed
*
+ * The same lock between tasks have the following exclusiveness (the read
+ * type includes recursive read):
+ *
+ * read and read (no )
+ * read and write (yes)
+ * write and write (yes)
+ *
+ * The enum values for the types are arranged such that:
+ *
+ * if type1 and type2 are exclusive then type1 & type2 == 0.
+ *
+ * We have a tricky situation concerning read vs. recursive-read. Since
+ * there can be lock classes that may ever be both read lock and
+ * recursive-read lock, for example:
+ *
+ * kernel/qrwlock.c:queued_read_lock_slowpath()
+ *
+ * The values of lock types are defined as follows:
+ */
+enum lock_type {
+ LOCK_TYPE_EXCLUSIVE = 0,
+ LOCK_TYPE_WRITE = 0,
+ LOCK_TYPE_READ,
+ LOCK_TYPE_UNUSED,
+ LOCK_TYPE_RECURSIVE,
+};
+
+/*
* Values for check:
*
* 0: simple checks (freeing, held-at-exit-time, etc.)
@@ -588,9 +616,14 @@ static inline void print_irqtrace_events(struct task_struct *curr)
* on the per lock-class debug mode:
*/

-#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
-#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
-#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
+#define lock_acquire_exclusive(l, s, t, n, i) \
+ lock_acquire(l, s, t, LOCK_TYPE_EXCLUSIVE, 1, n, i)
+
+#define lock_acquire_shared(l, s, t, n, i) \
+ lock_acquire(l, s, t, LOCK_TYPE_READ, 1, n, i)
+
+#define lock_acquire_shared_recursive(l, s, t, n, i) \
+ lock_acquire(l, s, t, LOCK_TYPE_RECURSIVE, 1, n, i)

#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 922bb68..22b41b1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -207,7 +207,7 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { }

static inline void rcu_lock_acquire(struct lockdep_map *map)
{
- lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
+ lock_acquire(map, 0, 0, LOCK_TYPE_RECURSIVE, 0, NULL, _THIS_IP_);
}

static inline void rcu_lock_release(struct lockdep_map *map)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7275d6c..fb6be63 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2297,7 +2297,10 @@ static inline void inc_chains(void)
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
*
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
+ * Returns:
+ * 0: on deadlock detected;
+ * 1: on OK;
+ * 3: LOCK_TYPE_RECURSIVE on recursive read
*/
static int
check_deadlock(struct task_struct *curr, struct held_lock *next)
@@ -2319,15 +2322,15 @@ static inline void inc_chains(void)
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
- if ((next->read == 2) && prev->read)
- return 2;
+ if ((next->read == LOCK_TYPE_RECURSIVE) && prev->read)
+ return LOCK_TYPE_RECURSIVE;

/*
* We're holding the nest_lock, which serializes this lock's
* nesting behaviour.
*/
if (nest)
- return 2;
+ return LOCK_TYPE_RECURSIVE;

print_deadlock_bug(curr, prev, next);
return 0;
@@ -2407,7 +2410,7 @@ static inline void inc_chains(void)
* write-lock never takes any other locks, then the reads are
* equivalent to a NOP.
*/
- if (next->read == 2 || prev->read == 2)
+ if (next->read == LOCK_TYPE_RECURSIVE || prev->read == LOCK_TYPE_RECURSIVE)
return 1;
/*
* Is the <prev> -> <next> dependency already present?
@@ -2493,7 +2496,7 @@ static inline void inc_chains(void)
* Only non-recursive-read entries get new dependencies
* added:
*/
- if (hlock->read != 2 && hlock->check) {
+ if (hlock->read != LOCK_TYPE_RECURSIVE && hlock->check) {
int ret = check_prev_add(curr, hlock, next, distance,
&trace);
if (!ret)
@@ -2877,13 +2880,13 @@ static int validate_chain(struct task_struct *curr,
* building dependencies (just like we jump over
* trylock entries):
*/
- if (ret == 2)
- hlock->read = 2;
+ if (ret == LOCK_TYPE_RECURSIVE)
+ hlock->read = LOCK_TYPE_RECURSIVE;
/*
* Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock:
*/
- if (!chain_head && ret != 2) {
+ if (!chain_head && ret != LOCK_TYPE_RECURSIVE) {
if (!check_prevs_add(curr, hlock))
return 0;
}
@@ -4105,7 +4108,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
curr->curr_chain_key = hlock->prev_chain_key;

WARN(hlock->read, "downgrading a read lock");
- hlock->read = 1;
+ hlock->read = LOCK_TYPE_READ;
hlock->acquire_ip = ip;

if (reacquire_held_locks(curr, depth, i))
--
1.8.3.1