[ANNOUNCE] 3.2.77-rt111

From: Steven Rostedt
Date: Mon Feb 29 2016 - 12:07:44 EST



Dear RT Folks,

I'm pleased to announce the 3.2.77-rt111 stable release.


You can get this release via the git tree at:

git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git

branch: v3.2-rt
Head SHA1: 3719e6c3f6c968343cf153a67429ba62fe2be2e2


Or to build 3.2.77-rt111 directly, the following patches should be applied:

http://www.kernel.org/pub/linux/kernel/v3.x/linux-3.2.tar.xz

http://www.kernel.org/pub/linux/kernel/v3.x/patch-3.2.77.xz

http://www.kernel.org/pub/linux/kernel/projects/rt/3.2/patch-3.2.77-rt111.patch.xz



You can also build from 3.2.77-rt110 by applying the incremental patch:

http://www.kernel.org/pub/linux/kernel/projects/rt/3.2/incr/patch-3.2.77-rt110-rt111.patch.xz



Enjoy,

-- Steve


Changes from v3.2.77-rt110:

---

Josh Cartwright (1):
net: Make synchronize_rcu_expedited() conditional on !RT_FULL

Peter Zijlstra (1):
sched: Introduce the trace_sched_waking tracepoint

Sebastian Andrzej Siewior (1):
dump stack: don't disable preemption during trace

Steven Rostedt (Red Hat) (2):
rtmutex: Have slowfn of rt_mutex_timed_fastlock() use enum
Linux 3.2.77-rt111

Thomas Gleixner (1):
rtmutex: Handle non enqueued waiters gracefully

bmouring@xxxxxx (1):
rtmutex: Use chainwalking control enum

----
arch/x86/kernel/dumpstack_64.c | 8 ++++----
include/trace/events/sched.h | 30 +++++++++++++++++++++---------
kernel/rtmutex.c | 6 +++---
kernel/sched.c | 7 +++++--
kernel/trace/trace_sched_switch.c | 2 +-
kernel/trace/trace_sched_wakeup.c | 2 +-
localversion-rt | 2 +-
net/core/dev.c | 2 +-
8 files changed, 37 insertions(+), 22 deletions(-)
---------------------------
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5e890ccd5429..530fdb80d250 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -114,7 +114,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
- const unsigned cpu = get_cpu();
+ const unsigned cpu = get_cpu_light();
unsigned long *irq_stack_end =
(unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned used = 0;
@@ -191,7 +191,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* This handles the process stack:
*/
bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
- put_cpu();
+ put_cpu_light();
}
EXPORT_SYMBOL(dump_trace);

@@ -205,7 +205,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
int cpu;
int i;

- preempt_disable();
+ migrate_disable();
cpu = smp_processor_id();

irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
@@ -238,7 +238,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk(KERN_CONT " %016lx", *stack++);
touch_nmi_watchdog();
}
- preempt_enable();
+ migrate_enable();

printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 959ff18b63b6..29cfc3fe68ad 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -54,9 +54,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,

- TP_PROTO(struct task_struct *p, int success),
+ TP_PROTO(struct task_struct *p),

- TP_ARGS(p, success),
+ TP_ARGS(p),

TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -70,25 +70,37 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
- __entry->success = success;
+ __entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),

- TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
+ TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
- __entry->success, __entry->target_cpu)
+ __entry->target_cpu)
);

+/*
+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
+ * called from the waking context.
+ */
+DEFINE_EVENT(sched_wakeup_template, sched_waking,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+ /*
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
+ * It it not always called from the waking context.
+ */
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
- TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(p, success));
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));

/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
- TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(p, success));
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));

#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(struct task_struct *p)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 52cab2720a06..e836b05093a1 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1179,7 +1179,7 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
__set_current_state(TASK_UNINTERRUPTIBLE);
pi_unlock(&self->pi_lock);

- ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
BUG_ON(ret);

for (;;) {
@@ -1657,7 +1657,7 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
enum rtmutex_chainwalk chwalk,
int (*slowfn)(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
- int detect_deadlock))
+ enum rtmutex_chainwalk chwalk))
{
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
likely(rt_mutex_cmpxchg(lock, NULL, current))) {
@@ -1935,7 +1935,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
ret = 0;
}

- if (unlikely(ret))
+ if (ret && rt_mutex_has_waiters(lock))
remove_waiter(lock, waiter);

raw_spin_unlock(&lock->wait_lock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 1615ca209afd..a9f6d6c0ab93 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2661,10 +2661,11 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
static void
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
- trace_sched_wakeup(p, true);
check_preempt_curr(rq, p, wake_flags);

p->state = TASK_RUNNING;
+ trace_sched_wakeup(p);
+
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
@@ -2852,6 +2853,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (!(wake_flags & WF_LOCK_SLEEPER))
p->saved_state = TASK_RUNNING;

+ trace_sched_waking(p);
+
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);

@@ -3070,7 +3073,7 @@ void wake_up_new_task(struct task_struct *p)
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = 1;
- trace_sched_wakeup_new(p, true);
+ trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 7e62c0a18456..5982146ee863 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -108,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
}

static void
-probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
+probe_sched_wakeup(void *ignore, struct task_struct *wakee)
{
struct trace_array_cpu *data;
unsigned long flags;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 6857e0c99656..a3f6ef0642f1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -403,7 +403,7 @@ static void wakeup_reset(struct trace_array *tr)
}

static void
-probe_wakeup(void *ignore, struct task_struct *p, int success)
+probe_wakeup(void *ignore, struct task_struct *p)
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
diff --git a/localversion-rt b/localversion-rt
index b3e668a8fb94..9969a4b69fad 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt110
+-rt111
diff --git a/net/core/dev.c b/net/core/dev.c
index f7f91c97933b..b6fa0b2fd713 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6169,7 +6169,7 @@ EXPORT_SYMBOL(free_netdev);
void synchronize_net(void)
{
might_sleep();
- if (rtnl_is_locked())
+ if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
synchronize_rcu_expedited();
else
synchronize_rcu();