Re: 4.2-rc5 rcu stalls.

From: Sasha Levin
Date: Tue Aug 04 2015 - 00:55:49 EST


On 08/03/2015 06:03 PM, Paul E. McKenney wrote:
>> > Ugh, that doesn't revert cleanly. Got something handy ?
> I do not, but perhaps either Sasha or Frederic do.

I've attached a revert courtesy of Peter.


Thanks,
Sasha

include/linux/preempt.h | 12 ------------
kernel/sched/core.c | 34 +++++++++++++++++++---------------
2 files changed, 19 insertions(+), 27 deletions(-)

diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 84991f185173..3a93d4cdcce9 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -137,18 +137,6 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)

-#define preempt_active_enter() \
-do { \
- preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
- barrier(); \
-} while (0)
-
-#define preempt_active_exit() \
-do { \
- barrier(); \
- preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
-} while (0)
-
#ifdef CONFIG_PREEMPT_COUNT

#define preempt_disable() \
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78b4bad10081..bd378bd21a0e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2983,7 +2983,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
* - return from syscall or exception to user-space
* - return from interrupt-handler to user-space
*
- * WARNING: must be called with preemption disabled!
+ * WARNING: all callers must re-check need_resched() afterward and reschedule
+ * accordingly in case an event triggered the need for rescheduling (such as
+ * an interrupt waking up a task) while preemption was disabled in __schedule().
*/
static void __sched __schedule(void)
{
@@ -2992,6 +2994,7 @@ static void __sched __schedule(void)
struct rq *rq;
int cpu;

+ preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch();
@@ -3058,6 +3061,8 @@ static void __sched __schedule(void)
}

balance_callback(rq);
+
+ sched_preempt_enable_no_resched();
}

static inline void sched_submit_work(struct task_struct *tsk)
@@ -3078,9 +3083,7 @@ asmlinkage __visible void __sched schedule(void)

sched_submit_work(tsk);
do {
- preempt_disable();
__schedule();
- sched_preempt_enable_no_resched();
} while (need_resched());
}
EXPORT_SYMBOL(schedule);
@@ -3119,14 +3122,15 @@ void __sched schedule_preempt_disabled(void)
static void __sched notrace preempt_schedule_common(void)
{
do {
- preempt_active_enter();
+ __preempt_count_add(PREEMPT_ACTIVE);
__schedule();
- preempt_active_exit();
+ __preempt_count_sub(PREEMPT_ACTIVE);

/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
+ barrier();
} while (need_resched());
}

@@ -3172,13 +3176,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
return;

do {
- /*
- * Use raw __prempt_count() ops that don't call function.
- * We can't call functions before disabling preemption which
- * disarm preemption tracing recursions.
- */
- __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
- barrier();
+ __preempt_count_add(PREEMPT_ACTIVE);
/*
* Needs preempt disabled in case user_exit() is traced
* and the tracer calls preempt_enable_notrace() causing
@@ -3188,8 +3186,8 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
__schedule();
exception_exit(prev_ctx);

+ __preempt_count_sub(PREEMPT_ACTIVE);
barrier();
- __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
} while (need_resched());
}
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
@@ -3212,11 +3210,17 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
prev_state = exception_enter();

do {
- preempt_active_enter();
+ __preempt_count_add(PREEMPT_ACTIVE);
local_irq_enable();
__schedule();
local_irq_disable();
- preempt_active_exit();
+ __preempt_count_sub(PREEMPT_ACTIVE);
+
+ /*
+ * Check again in case we missed a preemption opportunity
+ * between schedule and now.
+ */
+ barrier();
} while (need_resched());

exception_exit(prev_state);