[105/115] sched: Remove irq time from available CPU power

From: Greg KH
Date: Tue Feb 15 2011 - 20:54:43 EST


2.6.32-longterm review patch. If anyone has any objections, please let us know.

------------------

Commit: aa483808516ca5cacfa0e5849691f64fec25828e upstream

The idea was suggested by Peter Zijlstra here:

http://marc.info/?l=linux-kernel&m=127476934517534&w=2

irq time is technically not available to the tasks running on the CPU.
This patch removes irq time from CPU power piggybacking on
sched_rt_avg_update().

Tested this by keeping CPU X busy with a network intensive task having 75%
oa a single CPU irq processing (hard+soft) on a 4-way system. And start seven
cycle soakers on the system. Without this change, there will be two tasks on
each CPU. With this change, there is a single task on irq busy CPU X and
remaining 7 tasks are spread around among other 3 CPUs.

Signed-off-by: Venkatesh Pallipadi <venki@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
LKML-Reference: <1286237003-12406-8-git-send-email-venki@xxxxxxxxxx>
Signed-off-by: Ingo Molnar <mingo@xxxxxxx>
Signed-off-by: Mike Galbraith <efault@xxxxxx>
Acked-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxx>
---
kernel/sched.c | 26 +++++++++++++++++++++++++-
kernel/sched_features.h | 5 +++++
2 files changed, 30 insertions(+), 1 deletion(-)

--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -553,6 +553,10 @@ struct rq {
u64 avg_idle;
#endif

+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
+#endif
+
/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
@@ -622,6 +626,7 @@ static inline int cpu_of(struct rq *rq)
#define raw_rq() (&__raw_get_cpu_var(runqueues))

static u64 irq_time_cpu(int cpu);
+static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);

inline void update_rq_clock(struct rq *rq)
{
@@ -632,6 +637,8 @@ inline void update_rq_clock(struct rq *r
irq_time = irq_time_cpu(cpu);
if (rq->clock - irq_time > rq->clock_task)
rq->clock_task = rq->clock - irq_time;
+
+ sched_irq_time_avg_update(rq, irq_time);
}

/*
@@ -1883,6 +1890,15 @@ void account_system_vtime(struct task_st
local_irq_restore(flags);
}

+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
+{
+ if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
+ u64 delta_irq = curr_irq_time - rq->prev_irq_time;
+ rq->prev_irq_time = curr_irq_time;
+ sched_rt_avg_update(rq, delta_irq);
+ }
+}
+
#else

static u64 irq_time_cpu(int cpu)
@@ -1890,6 +1906,8 @@ static u64 irq_time_cpu(int cpu)
return 0;
}

+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
+
#endif

#include "sched_stats.h"
@@ -3755,7 +3773,13 @@ unsigned long scale_rt_power(int cpu)
u64 total, available;

total = sched_avg_period() + (rq->clock - rq->age_stamp);
- available = total - rq->rt_avg;
+
+ if (unlikely(total < rq->rt_avg)) {
+ /* Ensures that power won't end up being negative */
+ available = 0;
+ } else {
+ available = total - rq->rt_avg;
+ }

if (unlikely((s64)total < SCHED_LOAD_SCALE))
total = SCHED_LOAD_SCALE;
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -121,3 +121,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1)
* release the lock. Decreases scheduling overhead.
*/
SCHED_FEAT(OWNER_SPIN, 1)
+
+/*
+ * Decrement CPU power based on irq activity
+ */
+SCHED_FEAT(NONIRQ_POWER, 1)


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/