Hi Kevin,
I may have found the bad thing that could have happened to ksoftirqd.
If you feel like testing, try the below. We were altering the task
struct outside of locks, which is not interrupt etc safe. It cures a
problem I ran into, and will hopefully cure yours as well.
sched: fix runqueue locking buglet.
Calling set_task_cpu() with the runqueue unlocked is unsafe. Add cpu_rq_lock()
locking primitive, and lock the runqueue. Also, update rq->clock before calling
set_task_cpu(), as it could be stale.
Running netperf UDP_STREAM with two pinned tasks with tip 1b9508f applied emitted
the thoroughly unbelievable result that ratelimiting newidle could produce twice
the throughput of the virgin kernel. Reverting to locking the runqueue prior to
runqueue selection restored benchmarking sanity, as did this patchlet.
Signed-off-by: Mike Galbraith <efault@xxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxx>
Cc: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
LKML-Reference: <new-submission>
---
kernel/sched.c | 32 +++++++++++++++++++++++++-------
1 file changed, 25 insertions(+), 7 deletions(-)
Index: linux-2.6.32.git/kernel/sched.c
===================================================================
--- linux-2.6.32.git.orig/kernel/sched.c
+++ linux-2.6.32.git/kernel/sched.c
@@ -1011,6 +1011,24 @@ static struct rq *this_rq_lock(void)
return rq;
}
+/*
+ * cpu_rq_lock - lock the runqueue a given cpu and disable interrupts.
+ */
+static struct rq *cpu_rq_lock(int cpu, unsigned long *flags)
+ __acquires(rq->lock)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ spin_lock_irqsave(&rq->lock, *flags);
+ return rq;
+}
+
+static inline void cpu_rq_unlock(struct rq *rq, unsigned long *flags)
+ __releases(rq->lock)
+{
+ spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
#ifdef CONFIG_SCHED_HRTICK
/*
* Use HR-timers to deliver accurate preemption points.
@@ -2342,16 +2360,17 @@ static int try_to_wake_up(struct task_st
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
p->state = TASK_WAKING;
+ preempt_disable();
task_rq_unlock(rq, &flags);
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
- if (cpu != orig_cpu)
- set_task_cpu(p, cpu);
-
- rq = task_rq_lock(p, &flags);
-
- if (rq != orig_rq)
+ if (cpu != orig_cpu) {
+ rq = cpu_rq_lock(cpu, &flags);
update_rq_clock(rq);
+ set_task_cpu(p, cpu);
+ } else
+ rq = task_rq_lock(p, &flags);
+ preempt_enable_no_resched();
if (rq->idle_stamp) {
u64 delta = rq->clock - rq->idle_stamp;
@@ -2365,7 +2384,6 @@ static int try_to_wake_up(struct task_st
}
WARN_ON(p->state != TASK_WAKING);
- cpu = task_cpu(p);
#ifdef CONFIG_SCHEDSTATS
schedstat_inc(rq, ttwu_count);