[PATCH 5/7] sched: Add sched_load_se tracepoint
From: Qais Yousef
Date: Sun May 05 2019 - 07:59:25 EST
The new tracepoint allows tracking PELT signals at sched_entity level.
Which is supported in CFS tasks and taskgroups only.
Signed-off-by: Qais Yousef <qais.yousef@xxxxxxx>
---
include/trace/events/sched.h | 4 ++++
kernel/sched/fair.c | 1 +
kernel/sched/pelt.c | 2 ++
kernel/sched/sched_tracepoints.h | 13 +++++++++++++
4 files changed, 20 insertions(+)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 2be4c471c6e9..0933c08cfc7e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -596,6 +596,10 @@ DECLARE_TRACE(sched_load_rq,
TP_PROTO(int cpu, const char *path, struct sched_avg *avg),
TP_ARGS(cpu, path, avg));
+DECLARE_TRACE(sched_load_se,
+ TP_PROTO(int cpu, const char *path, struct sched_entity *se),
+ TP_ARGS(cpu, path, se));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e1e0cc7db7f6..3fd306079b57 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3139,6 +3139,7 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
sched_tp_load_cfs_rq(cfs_rq);
+ sched_tp_load_se(se);
return 1;
}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 302affb14302..74e7bd121324 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -266,6 +266,7 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ sched_tp_load_se(se);
return 1;
}
@@ -279,6 +280,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
cfs_se_util_change(&se->avg);
+ sched_tp_load_se(se);
return 1;
}
diff --git a/kernel/sched/sched_tracepoints.h b/kernel/sched/sched_tracepoints.h
index f4ded705118e..4a53578c9a69 100644
--- a/kernel/sched/sched_tracepoints.h
+++ b/kernel/sched/sched_tracepoints.h
@@ -37,3 +37,16 @@ static __always_inline void sched_tp_load_dl_rq(struct rq *rq)
trace_sched_load_rq(cpu, NULL, &rq->avg_dl);
}
}
+
+static __always_inline void sched_tp_load_se(struct sched_entity *se)
+{
+ if (trace_sched_load_se_enabled()) {
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ char path[SCHED_TP_PATH_LEN];
+ int cpu = cpu_of(rq_of(cfs_rq));
+
+ cfs_rq_tg_path(gcfs_rq, path, SCHED_TP_PATH_LEN);
+ trace_sched_load_se(cpu, path, se);
+ }
+}
--
2.17.1