[patch 06/10] sched: Add task components for migration control

From: Thomas Gleixner
Date: Thu Sep 17 2020 - 06:51:39 EST


The upcoming RT migrate_enable/disable() support will track migrate
disabled state in task_struct.

Add a new migration_ctrl struct to hold all necessary information and add
the required initializers.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
include/linux/sched.h | 30 +++++++++++++++++++++++++++---
init/init_task.c | 3 +++
kernel/fork.c | 1 +
kernel/sched/debug.c | 4 ++++
4 files changed, 35 insertions(+), 3 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -628,6 +628,20 @@ struct wake_q_node {
struct wake_q_node *next;
};

+#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP)
+struct task_migration_ctrl {
+ struct mutex mutex;
+ int disable_cnt;
+};
+
+#define INIT_TASK_MIGRATION_CTRL_INITIALIZER \
+{ \
+ .mutex = __MUTEX_INITIALIZER(init_task.migration_ctrl.mutex), \
+}
+#else /* CONFIG_PREEMPT_RT && CONFIG_SMP */
+struct task_migration_ctrl { };
+#endif /* !(CONFIG_PREEMPT_RT && CONFIG_SMP) */
+
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -713,6 +727,7 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
+ struct task_migration_ctrl migration_ctrl;

#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -1865,7 +1880,7 @@ static __always_inline bool need_resched
}

/*
- * Wrappers for p->thread_info->cpu access. No-op on UP.
+ * Various SMP helper functions. No-ops on UP.
*/
#ifdef CONFIG_SMP

@@ -1880,7 +1895,14 @@ static inline unsigned int task_cpu(cons

extern void set_task_cpu(struct task_struct *p, unsigned int cpu);

-#else
+static inline void task_migration_ctrl_init(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT
+ mutex_init(&p->migration_ctrl.mutex);
+#endif
+}
+
+#else /* CONFIG_SMP */

static inline unsigned int task_cpu(const struct task_struct *p)
{
@@ -1891,7 +1913,9 @@ static inline void set_task_cpu(struct t
{
}

-#endif /* CONFIG_SMP */
+static inline void task_migration_ctrl_init(struct task_struct *p) { }
+
+#endif /* !CONFIG_SMP */

/*
* In order to reduce various lock holder preemption latencies provide an
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -209,6 +209,9 @@ struct task_struct init_task
#ifdef CONFIG_SECCOMP
.seccomp = { .filter_count = ATOMIC_INIT(0) },
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+ .migration_ctrl = INIT_TASK_MIGRATION_CTRL_INITIALIZER,
+#endif
};
EXPORT_SYMBOL(init_task);

--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2119,6 +2119,7 @@ static __latent_entropy struct task_stru
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif
+ task_migration_ctrl_init(p);
futex_init_task(p);

/*
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -958,6 +958,10 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+ P(migration_ctrl.disable_cnt);
+ P(nr_cpus_allowed);
+#endif
#undef PN_SCHEDSTAT
#undef P_SCHEDSTAT