[PATCH 4.14 093/109] ARM: spectre-v2: add firmware based hardening
From: Greg Kroah-Hartman
Date: Tue Oct 16 2018 - 13:21:25 EST
4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Russell King <rmk+kernel@xxxxxxxxxxxxxxx>
Commit 10115105cb3aa17b5da1cb726ae8dd5f6854bd93 upstream.
Add firmware based hardening for cores that require more complex
handling in firmware.
Signed-off-by: Russell King <rmk+kernel@xxxxxxxxxxxxxxx>
Boot-tested-by: Tony Lindgren <tony@xxxxxxxxxxx>
Reviewed-by: Tony Lindgren <tony@xxxxxxxxxxx>
Reviewed-by: Marc Zyngier <marc.zyngier@xxxxxxx>
Signed-off-by: David A. Long <dave.long@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/arm/mm/proc-v7-bugs.c | 60 +++++++++++++++++++++++++++++++++++++++++++++
arch/arm/mm/proc-v7.S | 21 +++++++++++++++
2 files changed, 81 insertions(+)
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -1,14 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/arm-smccc.h>
#include <linux/kernel.h>
+#include <linux/psci.h>
#include <linux/smp.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
+#include <asm/proc-fns.h>
#include <asm/system_misc.h>
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+
static void harden_branch_predictor_bpiall(void)
{
write_sysreg(0, BPIALL);
@@ -19,6 +25,16 @@ static void harden_branch_predictor_icia
write_sysreg(0, ICIALLU);
}
+static void __maybe_unused call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void __maybe_unused call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
static void cpu_v7_spectre_init(void)
{
const char *spectre_v2_method = NULL;
@@ -45,7 +61,51 @@ static void cpu_v7_spectre_init(void)
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
break;
+
+#ifdef CONFIG_ARM_PSCI
+ default:
+ /* Other ARM CPUs require no workaround */
+ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
+ break;
+ /* fallthrough */
+ /* Cortex A57/A72 require firmware workaround */
+ case ARM_CPU_PART_CORTEX_A57:
+ case ARM_CPU_PART_CORTEX_A72: {
+ struct arm_smccc_res res;
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ break;
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if ((int)res.a0 != 0)
+ break;
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ call_hvc_arch_workaround_1;
+ processor.switch_mm = cpu_v7_hvc_switch_mm;
+ spectre_v2_method = "hypervisor";
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if ((int)res.a0 != 0)
+ break;
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ call_smc_arch_workaround_1;
+ processor.switch_mm = cpu_v7_smc_switch_mm;
+ spectre_v2_method = "firmware";
+ break;
+
+ default:
+ break;
+ }
}
+#endif
+ }
+
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -9,6 +9,7 @@
*
* This is the "shell" of the ARMv7 processor support.
*/
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
@@ -93,6 +94,26 @@ ENTRY(cpu_v7_dcache_clean_area)
ret lr
ENDPROC(cpu_v7_dcache_clean_area)
+#ifdef CONFIG_ARM_PSCI
+ .arch_extension sec
+ENTRY(cpu_v7_smc_switch_mm)
+ stmfd sp!, {r0 - r3}
+ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+ smc #0
+ ldmfd sp!, {r0 - r3}
+ b cpu_v7_switch_mm
+ENDPROC(cpu_v7_smc_switch_mm)
+ .arch_extension virt
+ENTRY(cpu_v7_hvc_switch_mm)
+ stmfd sp!, {r0 - r3}
+ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+ hvc #0
+ ldmfd sp!, {r0 - r3}
+ b cpu_v7_switch_mm
+ENDPROC(cpu_v7_smc_switch_mm)
+#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 0 @ ICIALLU