[PATCH v3 07/30] locking/lockdep: Remove indirect dependency redundancy check
From: Yuyang Du
Date: Fri Jun 28 2019 - 05:16:17 EST
Indirect dependency redundancy check was added for cross-release, which
has been reverted. Then suggested by Peter, only when setting
CONFIG_LOCKDEP_SMALL it takes effect.
With (recursive) read-write lock types considered in dependency graph,
indirect dependency redundancy check would be very complicated if not
impossible to be correctly done. Lets remove it for good.
Signed-off-by: Yuyang Du <duyuyang@xxxxxxxxx>
---
kernel/locking/lockdep.c | 41 --------------------------------------
kernel/locking/lockdep_internals.h | 1 -
kernel/locking/lockdep_proc.c | 2 --
3 files changed, 44 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f171c6e..c61fdef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1744,38 +1744,6 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
return ret;
}
-#ifdef CONFIG_LOCKDEP_SMALL
-/*
- * Check that the dependency graph starting at <src> can lead to
- * <target> or not. If it can, <src> -> <target> dependency is already
- * in the graph.
- *
- * Print an error and return 2 if it does or 1 if it does not.
- */
-static noinline int
-check_redundant(struct held_lock *src, struct held_lock *target)
-{
- int ret;
- struct lock_list *uninitialized_var(target_entry);
- struct lock_list src_entry = {
- .class = hlock_class(src),
- .parent = NULL,
- };
-
- debug_atomic_inc(nr_redundant_checks);
-
- ret = check_path(hlock_class(target), &src_entry, &target_entry);
-
- if (!ret) {
- debug_atomic_inc(nr_redundant);
- ret = 2;
- } else if (ret < 0)
- ret = 0;
-
- return ret;
-}
-#endif
-
#ifdef CONFIG_TRACE_IRQFLAGS
static inline int usage_accumulate(struct lock_list *entry, void *mask)
@@ -2437,15 +2405,6 @@ static inline void inc_chains(void)
}
}
-#ifdef CONFIG_LOCKDEP_SMALL
- /*
- * Is the <prev> -> <next> link redundant?
- */
- ret = check_redundant(prev, next);
- if (ret != 1)
- return ret;
-#endif
-
if (!trace->nr_entries && !save_trace(trace))
return 0;
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index cc83568..e9a8ed6 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -170,7 +170,6 @@ struct lockdep_stats {
unsigned long redundant_softirqs_on;
unsigned long redundant_softirqs_off;
int nr_unused_locks;
- unsigned int nr_redundant_checks;
unsigned int nr_redundant;
unsigned int nr_cyclic_checks;
unsigned int nr_find_usage_forwards_checks;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 9c49ec6..58ed889 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -178,8 +178,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
debug_atomic_read(chain_lookup_hits));
seq_printf(m, " cyclic checks: %11llu\n",
debug_atomic_read(nr_cyclic_checks));
- seq_printf(m, " redundant checks: %11llu\n",
- debug_atomic_read(nr_redundant_checks));
seq_printf(m, " redundant links: %11llu\n",
debug_atomic_read(nr_redundant));
seq_printf(m, " find-mask forwards checks: %11llu\n",
--
1.8.3.1