[RFC tip/locking v2 02/13] lockdep: Make __bfs() visit every dependency rather than every class until a match

From: Boqun Feng
Date: Wed Sep 06 2017 - 04:31:06 EST


Currently, __bfs() will do a breadth-first search in the dependency
graph and visit each lock class in the graph exactly once, so for
example, in the following graph:

A ---------> B
| ^
| |
+----------> C

a __bfs() call starts at A, will visit B through dependency A -> B and
visit C through dependency C -> B and that's it.

This is OK for now, as we only have strong dependencies in the
dependency graph, so whenever there is a traverse path from A to B in
__bfs(), it means A has strong dependency to B(IOW, B depends on A). So
no need to visit all dependencies in the graph.

However, as we are going to add recursive-read lock into the dependency
graph, not all the paths mean strong dependencies afterwards, in the
same example above, dependency A --> B may be a weak dependency and
traverse A -> C -> B may be a strong dependency path. So now we need to
visit each dependency in the graph once in __bfs() until a match.

The solution is simple:

We used to mark lock_class::lockdep_dependency_gen_id to indicate a
class has been visited in __bfs(), now we change the semantics a little
bit, we now mark lock_class::lockdep_dependency_gen_id to indicate all
the dependencies in its lock_{after,before} have been visited in the
__bfs(), note we only take one direction in a __bfs() search. In this
way, each dependency is guaranteed to be visited until we find a match.

Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx>
---
kernel/locking/lockdep.c | 61 +++++++++++++++++++++++++++---------------------
1 file changed, 34 insertions(+), 27 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index df0b7e620659..5dbedcc571fd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -960,24 +960,20 @@ static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
return (cq->rear - cq->front) & CQ_MASK;
}

-static inline void mark_lock_accessed(struct lock_list *lock,
- struct lock_list *parent)
+static inline void mark_lock_list_accessed(struct lock_class *class)
{
- unsigned long nr;
+ class->dep_gen_id = lockdep_dependency_gen_id;
+}

- nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
+static inline void visit_lock_entry(struct lock_list *lock,
+ struct lock_list *parent)
+{
lock->parent = parent;
- lock->class->dep_gen_id = lockdep_dependency_gen_id;
}

-static inline unsigned long lock_accessed(struct lock_list *lock)
+static inline unsigned long lock_list_accessed(struct lock_class *class)
{
- unsigned long nr;
-
- nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
- return lock->class->dep_gen_id == lockdep_dependency_gen_id;
+ return class->dep_gen_id == lockdep_dependency_gen_id;
}

static inline struct lock_list *get_lock_parent(struct lock_list *child)
@@ -1066,6 +1062,18 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
goto exit;
}

+ /*
+ * If we have visited all the dependencies from this @lock to
+ * others(iow, if we have visited all lock_list entries in
+ * @lock->class->locks_{after,before}, we skip, otherwise go
+ * and visit all the dependencies in the list and mark this
+ * list accessed.
+ */
+ if (lock_list_accessed(lock->class))
+ continue;
+ else
+ mark_lock_list_accessed(lock->class);
+
if (forward)
head = &lock->class->locks_after;
else
@@ -1074,23 +1082,22 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
DEBUG_LOCKS_WARN_ON(!irqs_disabled());

list_for_each_entry_rcu(entry, head, entry) {
- if (!lock_accessed(entry)) {
- unsigned int cq_depth;
- mark_lock_accessed(entry, lock);
- if (match(entry, data)) {
- *target_entry = entry;
- ret = BFS_RMATCH;
- goto exit;
- }
+ unsigned int cq_depth;

- if (__cq_enqueue(cq, (unsigned long)entry)) {
- ret = BFS_EQUEUEFULL;
- goto exit;
- }
- cq_depth = __cq_get_elem_count(cq);
- if (max_bfs_queue_depth < cq_depth)
- max_bfs_queue_depth = cq_depth;
+ visit_lock_entry(entry, lock);
+ if (match(entry, data)) {
+ *target_entry = entry;
+ ret = BFS_RMATCH;
+ goto exit;
+ }
+
+ if (__cq_enqueue(cq, (unsigned long)entry)) {
+ ret = BFS_EQUEUEFULL;
+ goto exit;
}
+ cq_depth = __cq_get_elem_count(cq);
+ if (max_bfs_queue_depth < cq_depth)
+ max_bfs_queue_depth = cq_depth;
}
}
exit:
--
2.14.1