[PATCH v5 05/40] arm64: mte: Add in-kernel tag fault handler
From: Andrey Konovalov
Date: Mon Oct 12 2020 - 16:45:14 EST
From: Vincenzo Frascino <vincenzo.frascino@xxxxxxx>
Add the implementation of the in-kernel fault handler.
When a tag fault happens on a kernel address:
* MTE is disabled on the current CPU,
* the execution continues.
When a tag fault happens on a user address:
* the kernel executes do_bad_area() and panics.
The tag fault handler for kernel addresses is currently empty and will be
filled in by a future commit.
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx>
Co-developed-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx>
---
Change-Id: I9b8aa79567f7c45f4d6a1290efcf34567e620717
---
arch/arm64/include/asm/uaccess.h | 23 +++++++++++++++++++
arch/arm64/mm/fault.c | 38 +++++++++++++++++++++++++++++++-
2 files changed, 60 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 991dd5f031e4..c7fff8daf2a7 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -200,13 +200,36 @@ do { \
CONFIG_ARM64_PAN)); \
} while (0)
+/*
+ * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
+ * affects EL0 and TCF affects EL1 irrespective of which TTBR is
+ * used.
+ * The kernel accesses TTBR0 usually with LDTR/STTR instructions
+ * when UAO is available, so these would act as EL0 accesses using
+ * TCF0.
+ * However futex.h code uses exclusives which would be executed as
+ * EL1, this can potentially cause a tag check fault even if the
+ * user disables TCF0.
+ *
+ * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
+ * and reset it in uaccess_disable().
+ *
+ * The Tag check override (TCO) bit disables temporarily the tag checking
+ * preventing the issue.
+ */
static inline void uaccess_disable(void)
{
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+
__uaccess_disable(ARM64_HAS_PAN);
}
static inline void uaccess_enable(void)
{
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+
__uaccess_enable(ARM64_HAS_PAN);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index a3bd189602df..d110f382dacf 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -33,6 +33,7 @@
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/kprobes.h>
+#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
@@ -294,6 +295,11 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
do_exit(SIGKILL);
}
+static void report_tag_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+}
+
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -641,10 +647,40 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 0;
}
+static void do_tag_recovery(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ static bool reported = false;
+
+ if (!READ_ONCE(reported)) {
+ report_tag_fault(addr, esr, regs);
+ WRITE_ONCE(reported, true);
+ }
+
+ /*
+ * Disable MTE Tag Checking on the local CPU for the current EL.
+ * It will be done lazily on the other CPUs when they will hit a
+ * tag fault.
+ */
+ sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE);
+ isb();
+}
+
+
static int do_tag_check_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- do_bad_area(addr, esr, regs);
+ /*
+ * The tag check fault (TCF) is per EL, hence TCF0 affects
+ * EL0 and TCF affects EL1.
+ * TTBR0 address belong by convention to EL0 hence to correctly
+ * discriminate we use the is_ttbr0_addr() macro.
+ */
+ if (is_ttbr0_addr(addr))
+ do_bad_area(addr, esr, regs);
+ else
+ do_tag_recovery(addr, esr, regs);
+
return 0;
}
--
2.28.0.1011.ga647a8990f-goog