[v2 PATCH 4/4] timers: logic to enable timer migration.

From: Arun R Bharadwaj
Date: Wed Mar 04 2009 - 07:19:56 EST


* Arun R Bharadwaj <arun@xxxxxxxxxxxxxxxxxx> [2009-03-04 17:42:49]:

This patch migrates all non pinned timers and hrtimers to the current
idle load balancer, from all the idle CPUs. Timers firing on busy CPUs
are not migrated.


Signed-off-by: Arun R Bharadwaj <arun@xxxxxxxxxxxxxxxxxx>
---
include/linux/sched.h | 1 +
kernel/hrtimer.c | 12 +++++++++++-
kernel/sched.c | 5 +++++
kernel/timer.c | 13 ++++++++++++-
4 files changed, 29 insertions(+), 2 deletions(-)

Index: linux.trees.git/kernel/timer.c
===================================================================
--- linux.trees.git.orig/kernel/timer.c
+++ linux.trees.git/kernel/timer.c
@@ -38,6 +38,7 @@
#include <linux/tick.h>
#include <linux/kallsyms.h>
#include <linux/timer.h>
+#include <linux/sched.h>

#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -628,7 +629,7 @@ __mod_timer(struct timer_list *timer, un
{
struct tvec_base *base, *new_base;
unsigned long flags;
- int ret;
+ int ret, current_cpu, preferred_cpu;

ret = 0;

@@ -649,6 +650,16 @@ __mod_timer(struct timer_list *timer, un

new_base = __get_cpu_var(tvec_bases);

+ current_cpu = smp_processor_id();
+ preferred_cpu = get_nohz_load_balancer();
+ if (enable_timer_migration && !tbase_get_pinned(timer->base) &&
+ idle_cpu(current_cpu) && preferred_cpu != -1) {
+ new_base = per_cpu(tvec_bases, preferred_cpu);
+ timer_set_base(timer, new_base);
+ timer->expires = expires;
+ internal_add_timer(new_base, timer);
+ goto out_unlock;
+ }
if (base != new_base) {
/*
* We are trying to schedule the timer on the local CPU.
Index: linux.trees.git/kernel/hrtimer.c
===================================================================
--- linux.trees.git.orig/kernel/hrtimer.c
+++ linux.trees.git/kernel/hrtimer.c
@@ -43,6 +43,8 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/debugobjects.h>
+#include <linux/sched.h>
+#include <linux/timer.h>

#include <asm/uaccess.h>

@@ -198,8 +200,16 @@ int pinned)
{
struct hrtimer_clock_base *new_base;
struct hrtimer_cpu_base *new_cpu_base;
+ int current_cpu, preferred_cpu;
+
+ current_cpu = smp_processor_id();
+ preferred_cpu = get_nohz_load_balancer();
+ if (enable_timer_migration && !pinned && preferred_cpu != -1 &&
+ idle_cpu(current_cpu))
+ new_cpu_base = &per_cpu(hrtimer_bases, preferred_cpu);
+ else
+ new_cpu_base = &__get_cpu_var(hrtimer_bases);

- new_cpu_base = &__get_cpu_var(hrtimer_bases);
new_base = &new_cpu_base->clock_base[base->index];

if (base != new_base) {
Index: linux.trees.git/include/linux/sched.h
===================================================================
--- linux.trees.git.orig/include/linux/sched.h
+++ linux.trees.git/include/linux/sched.h
@@ -265,6 +265,7 @@ static inline int select_nohz_load_balan
}
#endif

+extern int get_nohz_load_balancer(void);
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
Index: linux.trees.git/kernel/sched.c
===================================================================
--- linux.trees.git.orig/kernel/sched.c
+++ linux.trees.git/kernel/sched.c
@@ -4009,6 +4009,11 @@ static struct {
.load_balancer = ATOMIC_INIT(-1),
};

+inline int get_nohz_load_balancer(void)
+{
+ return atomic_read(&nohz.load_balancer);
+}
+
/*
* This routine will try to nominate the ilb (idle load balancing)
* owner among the cpus whose ticks are stopped. ilb owner will do the idle
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/