[PATCH] arm64: Force SSBS on context switch
From: Marc Zyngier
Date: Tue Jun 04 2019 - 12:35:18 EST
On a CPU that doesn't support SSBS, PSTATE[12] is RES0. In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.
To address this issue, let's force the SSBS bit on context switch.
Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx>
---
arch/arm64/include/asm/processor.h | 14 ++++++++++++--
arch/arm64/kernel/process.c | 14 ++++++++++++++
2 files changed, 26 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON;
}
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = PSR_MODE_EL0t;
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= PSR_SSBS_BIT;
+ set_ssbs_bit(regs);
regs->sp = sp;
}
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
#endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= PSR_AA32_SSBS_BIT;
+ set_compat_ssbs_bit(regs);
regs->compat_sp = sp;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9856395ccdb7..d451b3b248cf 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -442,6 +442,19 @@ void uao_thread_switch(struct task_struct *next)
}
}
+static void ssbs_thread_switch(struct task_struct *next)
+{
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
+ !test_tsk_thread_flag(next, TIF_SSBD)) {
+ struct pt_regs *regs = task_pt_regs(next);
+
+ if (compat_user_mode(regs))
+ set_compat_ssbs_bit(regs);
+ else if (user_mode(regs))
+ set_ssbs_bit(regs);
+ }
+}
+
/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
@@ -471,6 +484,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ptrauth_thread_switch(next);
+ ssbs_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
--
2.20.1
--
Jazz is not dead. It just smells funny...