Re: [RFC][PATCH 5/5] sched: Reduce ttwu rq->lock contention

From: Peter Zijlstra
Date: Fri Dec 17 2010 - 08:23:52 EST


On Fri, 2010-12-17 at 11:06 +0800, Yan, Zheng wrote:
> On Fri, Dec 17, 2010 at 4:32 AM, Peter Zijlstra <peterz@xxxxxxxxxxxxx> wrote:
> > @@ -953,7 +955,7 @@ static inline struct rq *__task_rq_lock(
> > for (;;) {
> > rq = task_rq(p);
> > raw_spin_lock(&rq->lock);
> > - if (likely(rq == task_rq(p)))
> > + if (likely(rq == task_rq(p)) && !task_is_waking(p))
> > return rq;
> > raw_spin_unlock(&rq->lock);
> > }
> > @@ -973,7 +975,7 @@ static struct rq *task_rq_lock(struct ta
> > local_irq_save(*flags);
> > rq = task_rq(p);
> > raw_spin_lock(&rq->lock);
> > - if (likely(rq == task_rq(p)))
> > + if (likely(rq == task_rq(p)) && !task_is_waking(p))
> > return rq;
> > raw_spin_unlock_irqrestore(&rq->lock, *flags);
> > }
>
> Looks like nothing prevents ttwu() from changing task's CPU while
> some one else is holding task_rq_lock(). Is this OK?

Ah, crud, good catch. No that is not quite OK ;-)

I'm starting to think adding a per-task scheduler lock isn't such a bad
idea after all :-)

How does something like the below look, it waits for the current
task_rq(p)->lock owner to go away after we flip p->state to TASK_WAKING.

It also optimizes the x86 spinlock code a bit, no need to wait for all
pending owners to go away, just the current one.

This also solves the p->cpus_allowed race..

---
Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -2518,6 +2518,8 @@ try_to_wake_up(struct task_struct *p, un
break;
}

+ raw_spin_unlock_wait(&task_rq(p)->lock);
+
ret = 1; /* we qualify as a proper wakeup now */

if (load) // XXX racy
@@ -2536,10 +2538,7 @@ try_to_wake_up(struct task_struct *p, un

if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
- /*
- * XXX: by having set TASK_WAKING outside of rq->lock, there
- * could be an in-flight change to p->cpus_allowed..
- */
+
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
#endif
ttwu_queue(p, cpu);
Index: linux-2.6/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6/arch/x86/include/asm/spinlock.h
@@ -158,18 +158,34 @@ static __always_inline void __ticket_spi
}
#endif

+#define TICKET_MASK ((1 << TICKET_SHIFT) - 1)
+
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);

- return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}

static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);

- return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
+ return (((tmp >> TICKET_SHIFT) - tmp) & TICKET_MASK) > 1;
+}
+
+static inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ int tmp = ACCESS_ONCE(lock->slock);
+
+ if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
+ return; /* not locked */
+
+ tmp &= TICKET_MASK;
+
+ /* wait until the current lock holder goes away */
+ while ((ACCESS_ONCE(lock->slock) & TICKET_MASK) == tmp)
+ cpu_relax();
}

#ifndef CONFIG_PARAVIRT_SPINLOCKS
@@ -206,7 +222,11 @@ static __always_inline void arch_spin_lo
arch_spin_lock(lock);
}

-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ __ticket_spin_unlock_wait(lock);
+}
+#else

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
@@ -214,6 +234,8 @@ static inline void arch_spin_unlock_wait
cpu_relax();
}

+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/