[PATCH 4/4] sched/fair: Add schedstat debug values for REBALANCE_AFFINITY

From: Jiri Olsa
Date: Mon Jun 20 2016 - 08:23:55 EST


It was helpful to watch several stats when testing
this feature. Adding the most useful.

Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx>
---
include/linux/sched.h | 2 ++
kernel/sched/debug.c | 4 ++++
kernel/sched/fair.c | 15 ++++++++++++++-
kernel/sched/sched.h | 5 +++++
4 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0e6ac882283b..4b772820436b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1305,6 +1305,8 @@ struct sched_statistics {
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
+ u64 nr_dont_balance;
+ u64 nr_balanced_affinity;

u64 nr_wakeups;
u64 nr_wakeups_sync;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2a0a9995256d..5947558dab65 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -635,6 +635,9 @@ do { \
P(sched_goidle);
P(ttwu_count);
P(ttwu_local);
+ P(nr_dont_balance);
+ P(nr_affinity_out);
+ P(nr_affinity_in);
}

#undef P
@@ -912,6 +915,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.statistics.nr_wakeups_affine_attempts);
P(se.statistics.nr_wakeups_passive);
P(se.statistics.nr_wakeups_idle);
+ P(se.statistics.nr_dont_balance);

avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 736e525e189c..a4f1ed403f1e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -902,7 +902,15 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)

static bool dont_balance(struct task_struct *p)
{
- return sched_feat(REBALANCE_AFFINITY) && p->se.dont_balance;
+ bool dont_balance = false;
+
+ if (sched_feat(REBALANCE_AFFINITY) && p->se.dont_balance) {
+ dont_balance = true;
+ schedstat_inc(task_rq(p), nr_dont_balance);
+ schedstat_inc(p, se.statistics.nr_dont_balance);
+ }
+
+ return dont_balance;
}

/**************************************************
@@ -7903,6 +7911,7 @@ static void rebalance_affinity(struct rq *rq)
if (cpu >= nr_cpu_ids)
continue;

+ schedstat_inc(rq, nr_affinity_out);
__detach_task(p, rq, cpu);
raw_spin_unlock(&rq->lock);

@@ -7911,6 +7920,10 @@ static void rebalance_affinity(struct rq *rq)
raw_spin_lock(&dst_rq->lock);
attach_task(dst_rq, p);
p->se.dont_balance = true;
+
+ schedstat_inc(p, se.statistics.nr_balanced_affinity);
+ schedstat_inc(dst_rq, nr_affinity_in);
+
raw_spin_unlock(&dst_rq->lock);

local_irq_restore(flags);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d1a6224cd140..f086e233f7e6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -701,6 +701,11 @@ struct rq {
/* try_to_wake_up() stats */
unsigned int ttwu_count;
unsigned int ttwu_local;
+
+ /* rebalance_affinity() stats */
+ unsigned int nr_dont_balance;
+ unsigned int nr_affinity_out;
+ unsigned int nr_affinity_in;
#endif

#ifdef CONFIG_SMP
--
2.4.11