Re: [PATCH] AB-BA deadlock between uidhash_lock and tasklist_lock.

From: Andrew Morton
Date: Thu Dec 23 2004 - 18:03:37 EST


Robin Holt <holt@xxxxxxx> wrote:
>
> We have uncovered a very difficult to trip AB-BA deadlock between the
> uidhash_lock and tasklist_lock.

yup. I made some changes to your patch - please review.

- s/spin_lock_irqsave/spin_lock_irq/ in those places where local
interrupts are obviously always enabled.

- your second patch still missed find_user().

- add comment.


--- 25/kernel/user.c~ab-ba-deadlock-between-uidhash_lock-and-tasklist_lock Thu Dec 23 14:45:02 2004
+++ 25-akpm/kernel/user.c Thu Dec 23 14:52:07 2004
@@ -26,6 +26,14 @@

static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
+
+/*
+ * uidhash_lock is taken inside write_lock_irq(&tasklist_lock). If a timer
+ * interrupt were to occur while we hold uidhash_lock, and that interrupt takes
+ * read_lock(&tasklist_lock) then we have an ab/ba deadlock scenario. Hence
+ * uidhash_lock must always be taken in an ir-qsafe manner to hold off the
+ * timer interrupt.
+ */
static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;

struct user_struct root_user = {
@@ -81,15 +89,19 @@ static inline struct user_struct *uid_ha
struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
+ unsigned long flags;

- spin_lock(&uidhash_lock);
+ spin_lock_irqsave(&uidhash_lock, flags);
ret = uid_hash_find(uid, uidhashentry(uid));
- spin_unlock(&uidhash_lock);
+ spin_unlock_irqrestore(&uidhash_lock, flags);
return ret;
}

void free_uid(struct user_struct *up)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
key_put(up->uid_keyring);
@@ -97,6 +109,7 @@ void free_uid(struct user_struct *up)
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
+ local_irq_restore(flags);
}

struct user_struct * alloc_uid(uid_t uid)
@@ -104,9 +117,9 @@ struct user_struct * alloc_uid(uid_t uid
struct list_head *hashent = uidhashentry(uid);
struct user_struct *up;

- spin_lock(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
- spin_unlock(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);

if (!up) {
struct user_struct *new;
@@ -132,7 +145,7 @@ struct user_struct * alloc_uid(uid_t uid
* Before adding this, check whether we raced
* on adding the same user already..
*/
- spin_lock(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
key_put(new->uid_keyring);
@@ -142,7 +155,7 @@ struct user_struct * alloc_uid(uid_t uid
uid_hash_insert(new, hashent);
up = new;
}
- spin_unlock(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);

}
return up;
@@ -178,9 +191,9 @@ static int __init uid_cache_init(void)
INIT_LIST_HEAD(uidhash_table + n);

/* Insert the root user immediately (init already runs as root) */
- spin_lock(&uidhash_lock);
+ spin_lock_irq(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(0));
- spin_unlock(&uidhash_lock);
+ spin_unlock_irq(&uidhash_lock);

return 0;
}
_

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/