Re: lockdep pinning

From: Liam R. Howlett

Date: Fri Feb 27 2026 - 11:36:40 EST


* Peter Zijlstra <peterz@xxxxxxxxxxxxx> [260227 08:40]:
> On Thu, Feb 26, 2026 at 05:01:08PM -0500, Liam R. Howlett wrote:
> > Hello,
> >
> > I'm trying to detect locking issues in the maple tree, which is severely
> > complicated by allowing external locks. I have an rcu lock validation
> > using the get_state_synchronize_rcu() and poll_state_synchronize_rcu()
> > interface working and would like something like this for write locks.
> >
> > The issue is that, with external locks, the code that does the unlocking
> > is outside my scope. Although I can detect a start of an operation and
> > pin a lock using the existing lockdep_pin_lock() function, I do not know
> > when the lock is 'done' as it can be used for multiple operations on the
> > tree.
> >
> > So I'd either be unpinning too early and be unable to tell if it was
> > unlocked/relocked, or I'd leave it pinned forever - which will trigger a
> > warning on release.
> >
> > What I'm looking for is a way to ensure my code is still in the same
> > lock context, but not unpin/pin. Maybe something like storing a lock
> > cookie id in the lock every time it's locked (just overwrite would be
> > fine), and a way to read that value. Then I can validate the value
> > against the one I've stored.
> >
> > AFAICT, I cannot accomplish what I want with the existing lockdep
> > implementation?
>
> Right, that seems to be the exact opposite of that the pin thing was
> build for. That was to ensure nobody else releases my lock, whereas you
> want to make sure you're in the same critical section of this other
> person's lock.
>
> So if you have LOCK_STAT=y, you could use hlock->holdtime_stamp for this
> I suppose, but that is a bit yuck.
>
> How's something like so? It isn't really nice, but should mostly work I
> recon.
>
> When consecutive lockdep_sequence(your_lock) calls return a different
> value you can assume it ain't the same critical section no moar.

Yes, I think this would work. I can just stash the sequence count and
validate it is the same. This should allow me to detect cases where
people drop the lock but don't reset the tree walker.

>
> ---
>
> diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
> index 621566345406..a6451ecbbe9a 100644
> --- a/include/linux/lockdep.h
> +++ b/include/linux/lockdep.h
> @@ -273,6 +273,9 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
> extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
> extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
>
> +extern u32 lock_sequence(struct lockdep_map *lock);
> +#define lockdep_sequence(lock) lock_sequence(&(lock)->dep_map)
> +
> #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
>
> #define lockdep_assert(cond) \
> diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
> index eae115a26488..55c4b152fedf 100644
> --- a/include/linux/lockdep_types.h
> +++ b/include/linux/lockdep_types.h
> @@ -253,7 +253,8 @@ struct held_lock {
> unsigned int hardirqs_off:1;
> unsigned int sync:1;
> unsigned int references:11; /* 32 bits */
> - unsigned int pin_count;
> + unsigned int pin_count:24;
> + unsigned int seq_count:8;
> };
>
> #else /* !CONFIG_LOCKDEP */
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 074ad4ef3d81..41a440685c32 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1267,6 +1267,7 @@ struct task_struct {
> u64 curr_chain_key;
> int lockdep_depth;
> unsigned int lockdep_recursion;
> + unsigned int lockdep_seq;
> struct held_lock held_locks[MAX_LOCK_DEPTH];
> #endif
>
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 2d4c5bab5af8..b2b0a506960b 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -5077,7 +5077,7 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
> static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
> int trylock, int read, int check, int hardirqs_off,
> struct lockdep_map *nest_lock, unsigned long ip,
> - int references, int pin_count, int sync)
> + int references, int pin_count, int sync, int seq)
> {
> struct task_struct *curr = current;
> struct lock_class *class = NULL;
> @@ -5183,6 +5183,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
> hlock->holdtime_stamp = lockstat_clock();
> #endif
> hlock->pin_count = pin_count;
> + hlock->seq_count = seq;
>
> if (check_wait_context(curr, hlock))
> return 0;
> @@ -5388,7 +5389,7 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
> hlock->read, hlock->check,
> hlock->hardirqs_off,
> hlock->nest_lock, hlock->acquire_ip,
> - hlock->references, hlock->pin_count, 0)) {
> + hlock->references, hlock->pin_count, 0, hlock->seq_count)) {
> case 0:
> return 1;
> case 1:
> @@ -5684,6 +5685,24 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
> WARN(1, "unpinning an unheld lock\n");
> }
>
> +static u32 __lock_sequence(struct lockdep_map *lock)
> +{
> + struct task_struct *curr = current;
> + int i;
> +
> + if (unlikely(!debug_locks))
> + return ~0;
> +
> + for (i = 0; i < curr->lockdep_depth; i++) {
> + struct held_lock *hlock = curr->held_locks + i;
> +
> + if (match_held_lock(hlock, lock))
> + return hlock->seq_count;
> + }
> +
> + return ~0;
> +}
> +
> /*
> * Check whether we follow the irq-flags state precisely:
> */
> @@ -5866,7 +5885,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
>
> lockdep_recursion_inc();
> __lock_acquire(lock, subclass, trylock, read, check,
> - irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 0);
> + irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 0,
> + ++current->lockdep_seq);
> lockdep_recursion_finish();
> raw_local_irq_restore(flags);
> }
> @@ -5914,7 +5934,8 @@ void lock_sync(struct lockdep_map *lock, unsigned subclass, int read,
>
> lockdep_recursion_inc();
> __lock_acquire(lock, subclass, 0, read, check,
> - irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 1);
> + irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 1,
> + ++current->lockdep_seq);
> check_chain_key(current);
> lockdep_recursion_finish();
> raw_local_irq_restore(flags);
> @@ -6000,6 +6021,26 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
> }
> EXPORT_SYMBOL_GPL(lock_unpin_lock);
>
> +u32 lock_sequence(struct lockdep_map *lock)
> +{
> + unsigned long flags;
> + u32 seq = ~0;
> +
> + if (unlikely(!lockdep_enabled()))
> + return seq;
> +
> + raw_local_irq_save(flags);
> + check_flags(flags);
> +
> + lockdep_recursion_inc();
> + seq = __lock_sequence(lock);
> + lockdep_recursion_finish();
> + raw_local_irq_restore(flags);
> +
> + return seq;
> +}
> +EXPORT_SYMBOL_GPL(lock_sequence);
> +
> #ifdef CONFIG_LOCK_STAT
> static void print_lock_contention_bug(struct task_struct *curr,
> struct lockdep_map *lock,