[PATCH] locking/spinlock/debug: snapshot lock fields
From: Mark Rutland
Date: Mon Oct 23 2017 - 08:29:25 EST
Currently, the lock debug code doesn't use {READ,WRITE}_ONCE() to access
lock fields, and thus may observe torn values given a suitably unhelpful
compiler. These could result in false positives and/or false negatives
for some sanity checks.
Further, as we don't snapshot the values of various fields, these might
change between the time they are sanity checked and the time they are
logged, making debugging difficult.
This patch ensures that lock fields are accessed with
{READ,WRITE}_ONCE(), and uses a snapshot of the lock to ensure that
logged values are the same as those used for the sanity checks.
Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
---
kernel/locking/spinlock_debug.c | 73 ++++++++++++++++++++++++++++-------------
1 file changed, 50 insertions(+), 23 deletions(-)
Hi Peter,
As mentioned at ELCE, I put this together while trying to track down an (as
yet) inexplicable spinlock recursion bug. Luckily, it seems that my compiler
wasn't actively out to get me, and this didn't help, but it might save someone
a few hours of painful debugging in future.
Mark.
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 9aa0fccd5d43..be0f0e5a7097 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -49,58 +49,85 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
-static void spin_dump(raw_spinlock_t *lock, const char *msg)
+static void spin_dump(raw_spinlock_t *lockp, raw_spinlock_t lock,
+ const char *msg)
{
struct task_struct *owner = NULL;
- if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
- owner = lock->owner;
+ if (lock.owner && lock.owner != SPINLOCK_OWNER_INIT)
+ owner = lock.owner;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
- lock, lock->magic,
+ lockp, lock.magic,
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
- lock->owner_cpu);
+ lock.owner_cpu);
dump_stack();
}
-static void spin_bug(raw_spinlock_t *lock, const char *msg)
+static void spin_bug(raw_spinlock_t *lockp, raw_spinlock_t lock,
+ const char *msg)
{
if (!debug_locks_off())
return;
- spin_dump(lock, msg);
+ spin_dump(lockp, lock, msg);
}
-#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
+#define SPIN_BUG_ON(cond, lockp, lock, msg) \
+ if (unlikely(cond)) spin_bug(lockp, lock, msg)
+
+/*
+ * Copy fields from a lock, ensuring that each field is read without tearing.
+ * We cannot read the whole lock atomically, so this isn't a snapshot of the
+ * whole lock at an instant in time. However, it is a stable snapshot of each
+ * field that won't change under our feet.
+ */
+static inline raw_spinlock_t
+debug_spin_lock_snapshot(raw_spinlock_t *lockp)
+{
+ raw_spinlock_t lock;
+
+ lock.raw_lock = READ_ONCE(lockp->raw_lock);
+ lock.magic = READ_ONCE(lockp->magic);
+ lock.owner_cpu = READ_ONCE(lockp->owner_cpu);
+ lock.owner = READ_ONCE(lockp->owner);
+
+ return lock;
+}
static inline void
-debug_spin_lock_before(raw_spinlock_t *lock)
+debug_spin_lock_before(raw_spinlock_t *lockp)
{
- SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(lock->owner == current, lock, "recursion");
- SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
- lock, "cpu recursion");
+ raw_spinlock_t lock = debug_spin_lock_snapshot(lockp);
+
+ SPIN_BUG_ON(lock.magic != SPINLOCK_MAGIC, lockp, lock, "bad magic");
+ SPIN_BUG_ON(lock.owner == current, lockp, lock, "recursion");
+ SPIN_BUG_ON(lock.owner_cpu == raw_smp_processor_id(), lockp, lock,
+ "cpu recursion");
}
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
- lock->owner_cpu = raw_smp_processor_id();
- lock->owner = current;
+ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
+ WRITE_ONCE(lock->owner, current);
}
-static inline void debug_spin_unlock(raw_spinlock_t *lock)
+static inline void debug_spin_unlock(raw_spinlock_t *lockp)
{
- SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
- SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
- SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
- lock, "wrong CPU");
- lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ raw_spinlock_t lock = debug_spin_lock_snapshot(lockp);
+
+ SPIN_BUG_ON(lock.magic != SPINLOCK_MAGIC, lockp, lock, "bad magic");
+ SPIN_BUG_ON(arch_spin_value_unlocked(lock.raw_lock), lockp, lock,
+ "already unlocked");
+ SPIN_BUG_ON(lock.owner != current, lockp, lock, "wrong owner");
+ SPIN_BUG_ON(lock.owner_cpu != raw_smp_processor_id(), lockp, lock,
+ "wrong CPU");
+ WRITE_ONCE(lockp->owner, SPINLOCK_OWNER_INIT);
+ WRITE_ONCE(lockp->owner_cpu, -1);
}
/*
--
2.11.0