[PATCH v3 17/19] sched: Add migrate_disable() tracepoints
From: Peter Zijlstra
Date: Thu Oct 15 2020 - 07:10:12 EST
XXX write a tracer:
- 'migirate_disable() -> migrate_enable()' time in task_sched_runtime()
- 'migrate_pull -> sched-in' time in task_sched_runtime()
The first will give worst case for the second, which is the actual
interference experienced by the task to due migration constraints of
migrate_disable().
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
include/trace/events/sched.h | 12 ++++++++++++
kernel/sched/core.c | 4 ++++
kernel/sched/deadline.c | 1 +
kernel/sched/rt.c | 8 +++++++-
4 files changed, 24 insertions(+), 1 deletion(-)
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -646,6 +646,18 @@ DECLARE_TRACE(sched_update_nr_running_tp
TP_PROTO(struct rq *rq, int change),
TP_ARGS(rq, change));
+DECLARE_TRACE(sched_migrate_disable_tp,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+DECLARE_TRACE(sched_migrate_enable_tp,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+DECLARE_TRACE(sched_migrate_pull_tp,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1732,6 +1732,8 @@ void migrate_disable(void)
return;
}
+ trace_sched_migrate_disable_tp(p);
+
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
@@ -1764,6 +1766,8 @@ void migrate_enable(void)
p->migration_disabled = 0;
this_rq()->nr_pinned--;
preempt_enable();
+
+ trace_sched_migrate_enable_tp(p);
}
EXPORT_SYMBOL_GPL(migrate_enable);
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2245,6 +2245,7 @@ static void pull_dl_task(struct rq *this
goto skip;
if (is_migration_disabled(p)) {
+ trace_sched_migrate_pull_tp(p);
push_task = get_push_task(src_rq);
} else {
deactivate_task(src_rq, p, 0);
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1877,7 +1877,12 @@ static int push_rt_task(struct rq *rq, b
struct task_struct *push_task = NULL;
int cpu;
- if (!pull || rq->push_busy)
+ if (!pull)
+ return 0;
+
+ trace_sched_migrate_pull_tp(next_task);
+
+ if (rq->push_busy)
return 0;
cpu = find_lowest_rq(rq->curr);
@@ -2223,6 +2228,7 @@ static void pull_rt_task(struct rq *this
goto skip;
if (is_migration_disabled(p)) {
+ trace_sched_migrate_pull_tp(p);
push_task = get_push_task(src_rq);
} else {
deactivate_task(src_rq, p, 0);