Re: [PATCH 4/4] futex: Add FUTEX_LOCK with optional adaptive spinning

From: Darren Hart
Date: Fri May 07 2010 - 15:06:22 EST


Thomas Gleixner wrote:
> On Fri, 7 May 2010, Peter Zijlstra wrote:
>
>> On Fri, 2010-05-07 at 18:30 +0200, Thomas Gleixner wrote:
>>>> Please keep the code as near mutex_spin_on_owner() as possible.
>>> There is no reason why we can't make that unconditional.
>>>
>> Sure, but lets do that in a separate series.
>
> Sure. I'm not touching mutex_spin_on_owner() now. It's just for
> testing now.
>
> Thanks,
>
> tglx


One bug below, see patch below for fix.


> ---
> Index: linux-2.6-tip/kernel/sched.c
> ===================================================================
> --- linux-2.6-tip.orig/kernel/sched.c
> +++ linux-2.6-tip/kernel/sched.c
> @@ -841,6 +841,10 @@ static inline int task_running(struct rq
>
> static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
> {
> +#ifdef CONFIG_SMP
> + next->oncpu = 1;
> + prev->oncpu = 0;

no prev in context, moved to finish_lock_switch:

How's this?

Signed-off-by: Darren Hart <dvhltc@xxxxxxxxxx>
---
include/linux/sched.h | 2 --
kernel/sched.c | 10 ++++++++--
2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 885d659..3fb8a45 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1178,10 +1178,8 @@ struct task_struct {
int lock_depth; /* BKL lock depth */

#ifdef CONFIG_SMP
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
-#endif

int prio, static_prio, normal_prio;
unsigned int rt_priority;
diff --git a/kernel/sched.c b/kernel/sched.c
index 20b8d99..9915bdf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -841,10 +841,16 @@ static inline int task_running(struct rq *rq, struct task_struct *p)

static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
+#ifdef CONFIG_SMP
+ next->oncpu = 1;
+#endif
}

static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
+#ifdef CONFIG_SMP
+ prev->oncpu = 0;
+#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
@@ -2628,7 +2634,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
p->oncpu = 0;
#endif
#ifdef CONFIG_PREEMPT
@@ -5316,7 +5322,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
__set_task_cpu(idle, cpu);

rq->curr = rq->idle = idle;
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
idle->oncpu = 1;
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
--
1.6.3.3


--
Darren Hart
IBM Linux Technology Center
Real-Time Linux Team
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/