Re: [PATCH v3 6/6] pid: drop irq disablement around pidmap_lock

From: Oleg Nesterov
Date: Sat Feb 01 2025 - 12:42:47 EST


I'll try to review on Monday, but I see nothing obviously wrong after
a very quick glance. Although I am not sure 4/6 is really useful, but
I won't argue.

However, shouldn't this patch also remove the comment which explains
the possible lock inversion? Above put_pid(),

/*
* Note: disable interrupts while the pidmap_lock is held as an
* interrupt might come in and do read_lock(&tasklist_lock).
*
* If we don't disable interrupts there is a nasty deadlock between
* detach_pid()->free_pid() and another cpu that does
* spin_lock(&pidmap_lock) followed by an interrupt routine that does
* read_lock(&tasklist_lock);
*
* After we clean up the tasklist_lock and know there are no
* irq handlers that take it we can leave the interrupts enabled.
* For now it is easier to be safe than to prove it can't happen.
*/

Oleg.

On 02/01, Mateusz Guzik wrote:
>
> It no longer serves any purpose now that the tasklist_lock ->
> pidmap_lock ordering got eliminated.
>
> Signed-off-by: Mateusz Guzik <mjguzik@xxxxxxxxx>
> ---
> kernel/pid.c | 23 +++++++++++------------
> 1 file changed, 11 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/pid.c b/kernel/pid.c
> index 73625f28c166..900193de4232 100644
> --- a/kernel/pid.c
> +++ b/kernel/pid.c
> @@ -115,11 +115,10 @@ static void delayed_put_pid(struct rcu_head *rhp)
> void free_pid(struct pid *pid)
> {
> int i;
> - unsigned long flags;
>
> lockdep_assert_not_held(&tasklist_lock);
>
> - spin_lock_irqsave(&pidmap_lock, flags);
> + spin_lock(&pidmap_lock);
> for (i = 0; i <= pid->level; i++) {
> struct upid *upid = pid->numbers + i;
> struct pid_namespace *ns = upid->ns;
> @@ -142,7 +141,7 @@ void free_pid(struct pid *pid)
> idr_remove(&ns->idr, upid->nr);
> }
> pidfs_remove_pid(pid);
> - spin_unlock_irqrestore(&pidmap_lock, flags);
> + spin_unlock(&pidmap_lock);
>
> call_rcu(&pid->rcu, delayed_put_pid);
> }
> @@ -210,7 +209,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> }
>
> idr_preload(GFP_KERNEL);
> - spin_lock_irq(&pidmap_lock);
> + spin_lock(&pidmap_lock);
>
> if (tid) {
> nr = idr_alloc(&tmp->idr, NULL, tid,
> @@ -237,7 +236,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
> pid_max, GFP_ATOMIC);
> }
> - spin_unlock_irq(&pidmap_lock);
> + spin_unlock(&pidmap_lock);
> idr_preload_end();
>
> if (nr < 0) {
> @@ -271,7 +270,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>
> upid = pid->numbers + ns->level;
> idr_preload(GFP_KERNEL);
> - spin_lock_irq(&pidmap_lock);
> + spin_lock(&pidmap_lock);
> if (!(ns->pid_allocated & PIDNS_ADDING))
> goto out_unlock;
> pidfs_add_pid(pid);
> @@ -280,18 +279,18 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> idr_replace(&upid->ns->idr, pid, upid->nr);
> upid->ns->pid_allocated++;
> }
> - spin_unlock_irq(&pidmap_lock);
> + spin_unlock(&pidmap_lock);
> idr_preload_end();
>
> return pid;
>
> out_unlock:
> - spin_unlock_irq(&pidmap_lock);
> + spin_unlock(&pidmap_lock);
> idr_preload_end();
> put_pid_ns(ns);
>
> out_free:
> - spin_lock_irq(&pidmap_lock);
> + spin_lock(&pidmap_lock);
> while (++i <= ns->level) {
> upid = pid->numbers + i;
> idr_remove(&upid->ns->idr, upid->nr);
> @@ -301,7 +300,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> if (ns->pid_allocated == PIDNS_ADDING)
> idr_set_cursor(&ns->idr, 0);
>
> - spin_unlock_irq(&pidmap_lock);
> + spin_unlock(&pidmap_lock);
>
> kmem_cache_free(ns->pid_cachep, pid);
> return ERR_PTR(retval);
> @@ -309,9 +308,9 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>
> void disable_pid_allocation(struct pid_namespace *ns)
> {
> - spin_lock_irq(&pidmap_lock);
> + spin_lock(&pidmap_lock);
> ns->pid_allocated &= ~PIDNS_ADDING;
> - spin_unlock_irq(&pidmap_lock);
> + spin_unlock(&pidmap_lock);
> }
>
> struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
> --
> 2.43.0
>