[PATCH v3 3/6] sched: Add new tracepoints to track pelt at rq level

From: Qais Yousef
Date: Tue Jun 04 2019 - 07:19:12 EST


The new tracepoints allow tracking PELT signals at rq level for all
scheduling classes + irq.

Signed-off-by: Qais Yousef <qais.yousef@xxxxxxx>
---
include/trace/events/sched.h | 23 +++++++++++++++++++++++
kernel/sched/fair.c | 6 ++++++
kernel/sched/pelt.c | 9 ++++++++-
3 files changed, 37 insertions(+), 1 deletion(-)

diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index c8c7c7efb487..520b89d384ec 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -594,6 +594,29 @@ TRACE_EVENT(sched_wake_idle_without_ipi,

TP_printk("cpu=%d", __entry->cpu)
);
+
+/*
+ * Following tracepoints are not exported in tracefs and provide hooking
+ * mechanisms only for testing and debugging purposes.
+ *
+ * Postfixed with _tp to make them easily identifiable in the code.
+ */
+DECLARE_TRACE(pelt_cfs_tp,
+ TP_PROTO(struct cfs_rq *cfs_rq),
+ TP_ARGS(cfs_rq));
+
+DECLARE_TRACE(pelt_rt_tp,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
+DECLARE_TRACE(pelt_dl_tp,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
+DECLARE_TRACE(pelt_irq_tp,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
#endif /* _TRACE_SCHED_H */

/* This part must be outside protection */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 18c89ebadfc7..dee1338ec4a9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3353,6 +3353,8 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
update_tg_cfs_util(cfs_rq, se, gcfs_rq);
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);

+ trace_pelt_cfs_tp(cfs_rq);
+
return 1;
}

@@ -3505,6 +3507,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);

cfs_rq_util_change(cfs_rq, flags);
+
+ trace_pelt_cfs_tp(cfs_rq);
}

/**
@@ -3524,6 +3528,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);

cfs_rq_util_change(cfs_rq, 0);
+
+ trace_pelt_cfs_tp(cfs_rq);
}

/*
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index befce29bd882..c9d4945861a4 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -28,6 +28,8 @@
#include "sched.h"
#include "pelt.h"

+#include <trace/events/sched.h>
+
/*
* Approximate:
* val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
@@ -292,6 +294,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->curr != NULL)) {

___update_load_avg(&cfs_rq->avg, 1, 1);
+ trace_pelt_cfs_tp(cfs_rq);
return 1;
}

@@ -317,6 +320,7 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
running)) {

___update_load_avg(&rq->avg_rt, 1, 1);
+ trace_pelt_rt_tp(rq);
return 1;
}

@@ -340,6 +344,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
running)) {

___update_load_avg(&rq->avg_dl, 1, 1);
+ trace_pelt_dl_tp(rq);
return 1;
}

@@ -388,8 +393,10 @@ int update_irq_load_avg(struct rq *rq, u64 running)
1,
1);

- if (ret)
+ if (ret) {
___update_load_avg(&rq->avg_irq, 1, 1);
+ trace_pelt_irq_tp(rq);
+ }

return ret;
}
--
2.17.1