[RFC PATCH v3 5/7] mm/asi: Switch ASI on task switch

From: Alexandre Chartre
Date: Wed Feb 26 2020 - 11:24:00 EST


If a task using ASI is scheduled in/out then save/restore the
corresponding ASI and update the cpu ASI session.

Signed-off-by: Alexandre Chartre <alexandre.chartre@xxxxxxxxxx>
---
arch/x86/include/asm/asi.h | 3 ++
arch/x86/mm/asi.c | 67 ++++++++++++++++++++++++++++++++++++++++++++
include/linux/sched.h | 9 ++++++
kernel/sched/core.c | 8 +++++
4 files changed, 87 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/asi.h b/arch/x86/include/asm/asi.h
index d240954..a0733f1 100644
--- a/arch/x86/include/asm/asi.h
+++ b/arch/x86/include/asm/asi.h
@@ -102,6 +102,9 @@ struct asi {
unsigned long base_cr3; /* base ASI CR3 */
};

+void asi_schedule_out(struct task_struct *task);
+void asi_schedule_in(struct task_struct *task);
+
extern struct asi *asi_create(struct asi_type *type);
extern void asi_destroy(struct asi *asi);
extern void asi_set_pagetable(struct asi *asi, pgd_t *pagetable);
diff --git a/arch/x86/mm/asi.c b/arch/x86/mm/asi.c
index c91ba82..9955eb2 100644
--- a/arch/x86/mm/asi.c
+++ b/arch/x86/mm/asi.c
@@ -229,3 +229,70 @@ void asi_prepare_resume(void)

asi_switch_to_asi_cr3(asi_session->asi, ASI_SWITCH_ON_RESUME);
}
+
+void asi_schedule_out(struct task_struct *task)
+{
+ struct asi_session *asi_session;
+ unsigned long flags;
+ struct asi *asi;
+
+ asi = this_cpu_read(cpu_asi_session.asi);
+ if (!asi)
+ return;
+
+ /*
+ * Save the ASI session.
+ *
+ * Exit the session if it hasn't been interrupted, otherwise
+ * just save the session state.
+ */
+ local_irq_save(flags);
+ if (!this_cpu_read(cpu_asi_session.idepth)) {
+ asi_switch_to_kernel_cr3(asi);
+ task->asi_session.asi = asi;
+ task->asi_session.idepth = 0;
+ } else {
+ asi_session = &get_cpu_var(cpu_asi_session);
+ task->asi_session = *asi_session;
+ asi_session->asi = NULL;
+ asi_session->idepth = 0;
+ }
+ local_irq_restore(flags);
+}
+
+void asi_schedule_in(struct task_struct *task)
+{
+ struct asi_session *asi_session;
+ unsigned long flags;
+ struct asi *asi;
+
+ asi = task->asi_session.asi;
+ if (!asi)
+ return;
+
+ /*
+ * At this point, the CPU shouldn't be using ASI because the
+ * ASI session is expected to be cleared in asi_schedule_out().
+ */
+ WARN_ON(this_cpu_read(cpu_asi_session.asi));
+
+ /*
+ * Restore ASI.
+ *
+ * If the task was scheduled out while using ASI, then the ASI
+ * is already setup and we can immediately switch to ASI page
+ * table.
+ *
+ * Otherwise, if the task was scheduled out while ASI was
+ * interrupted, just restore the ASI session.
+ */
+ local_irq_save(flags);
+ if (!task->asi_session.idepth) {
+ asi_switch_to_asi_cr3(asi, ASI_SWITCH_NOW);
+ } else {
+ asi_session = &get_cpu_var(cpu_asi_session);
+ *asi_session = task->asi_session;
+ task->asi_session.asi = NULL;
+ }
+ local_irq_restore(flags);
+}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 716ad1d..66cc583 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -10,6 +10,7 @@
#include <uapi/linux/sched.h>

#include <asm/current.h>
+#include <asm/asi_session.h>

#include <linux/pid.h>
#include <linux/sem.h>
@@ -1281,6 +1282,14 @@ struct task_struct {
unsigned long prev_lowest_stack;
#endif

+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+ /*
+ * ASI session is saved here when the task is scheduled out
+ * while an ASI session was active or interrupted.
+ */
+ struct asi_session asi_session;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 90e4b00..a2c8604 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -14,6 +14,7 @@

#include <asm/switch_to.h>
#include <asm/tlb.h>
+#include <asm/asi.h>

#include "../workqueue_internal.h"
#include "../../fs/io-wq.h"
@@ -3153,6 +3154,9 @@ static inline void finish_lock_switch(struct rq *rq)
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
+ if (IS_ENABLED(CONFIG_ADDRESS_SPACE_ISOLATION))
+ asi_schedule_out(prev);
+
kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
@@ -3259,6 +3263,10 @@ static inline void finish_lock_switch(struct rq *rq)
}

tick_nohz_task_switch();
+
+ if (IS_ENABLED(CONFIG_ADDRESS_SPACE_ISOLATION))
+ asi_schedule_in(current);
+
return rq;
}

--
1.7.1