[PATCH v2 07/23] locking/lockdep: Use lockdep_init_task for task initiation consistently

From: Yuyang Du
Date: Mon May 06 2019 - 04:20:52 EST


Despite that there is a lockdep_init_task() which does nothing, lockdep
initiates tasks by assigning lockdep fields and does so inconsistently. Fix
this by using lockdep_init_task().

Signed-off-by: Yuyang Du <duyuyang@xxxxxxxxx>
---
include/linux/lockdep.h | 7 ++++++-
init/init_task.c | 2 ++
kernel/fork.c | 3 ---
kernel/locking/lockdep.c | 11 ++++++++---
4 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 851d44f..751a2cc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -287,6 +287,8 @@ struct held_lock {
extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_set_selftest_task(struct task_struct *task);

+extern inline void lockdep_init_task(struct task_struct *task);
+
extern void lockdep_off(void);
extern void lockdep_on(void);

@@ -411,6 +413,10 @@ static inline void lock_set_subclass(struct lockdep_map *lock,

#else /* !CONFIG_LOCKDEP */

+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
static inline void lockdep_off(void)
{
}
@@ -503,7 +509,6 @@ enum xhlock_context_t {
{ .name = (_name), .key = (void *)(_key), }

static inline void lockdep_invariant_state(bool force) {}
-static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}

#ifdef CONFIG_LOCK_STAT
diff --git a/init/init_task.c b/init/init_task.c
index c70ef65..1b15cb9 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -166,6 +166,8 @@ struct task_struct init_task
.softirqs_enabled = 1,
#endif
#ifdef CONFIG_LOCKDEP
+ .lockdep_depth = 0, /* no locks held yet */
+ .curr_chain_key = 0,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/fork.c b/kernel/fork.c
index 9dcd18a..8ec9e78 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1868,9 +1868,6 @@ static __latent_entropy struct task_struct *copy_process(
p->pagefault_disabled = 0;

#ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
- p->lockdep_recursion = 0;
lockdep_init_task(p);
#endif

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 222ee91..f02f7dc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -358,6 +358,13 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
return k0 | (u64)k1 << 32;
}

+inline void lockdep_init_task(struct task_struct *task)
+{
+ task->lockdep_depth = 0; /* no locks held yet */
+ task->curr_chain_key = 0;
+ task->lockdep_recursion = 0;
+}
+
void lockdep_off(void)
{
current->lockdep_recursion++;
@@ -4588,9 +4595,7 @@ void lockdep_reset(void)
int i;

raw_local_irq_save(flags);
- current->curr_chain_key = 0;
- current->lockdep_depth = 0;
- current->lockdep_recursion = 0;
+ lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;
--
1.8.3.1