[PATCH v6 06/40] arm64: kasan: Enable in-kernel MTE
From: Andrey Konovalov
Date: Thu Oct 29 2020 - 15:26:35 EST
From: Vincenzo Frascino <vincenzo.frascino@xxxxxxx>
Hardware tag-based KASAN relies on Memory Tagging Extension (MTE)
feature and requires it to be enabled.
The Tag Checking operation causes a synchronous data abort as
a consequence of a tag check fault when MTE is configured in
synchronous mode.
Enable MTE in Synchronous mode in EL1 to provide a more immediate
way of tag check failure detection in the kernel.
As part of this change enable match-all tag for EL1 to allow the
kernel to access user pages without faulting. This is required because
the kernel does not have knowledge of the tags set by the user in a
page.
Note: For MTE, the TCF bit field in SCTLR_EL1 affects only EL1 in a
similar way as TCF0 affects EL0.
MTE that is built on top of the Top Byte Ignore (TBI) feature hence we
enable it as part of this patch as well.
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx>
Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx>
---
Change-Id: I4d67497268bb7f0c2fc5dcacefa1e273df4af71d
---
arch/arm64/kernel/cpufeature.c | 7 +++++++
arch/arm64/mm/proc.S | 23 ++++++++++++++++++++---
2 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index dcc165b3fc04..c61f201042b2 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1704,6 +1704,13 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
cleared_zero_page = true;
mte_clear_page_tags(lm_alias(empty_zero_page));
}
+
+ /* Enable in-kernel MTE only if KASAN_HW_TAGS is enabled */
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) {
+ /* Enable MTE Sync Mode for EL1 */
+ sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
+ isb();
+ }
}
#endif /* CONFIG_ARM64_MTE */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 23c326a06b2d..7c3304fb15d9 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -40,9 +40,15 @@
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
#ifdef CONFIG_KASAN_SW_TAGS
-#define TCR_KASAN_FLAGS TCR_TBI1
+#define TCR_KASAN_SW_FLAGS TCR_TBI1
#else
-#define TCR_KASAN_FLAGS 0
+#define TCR_KASAN_SW_FLAGS 0
+#endif
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1
+#else
+#define TCR_KASAN_HW_FLAGS 0
#endif
/*
@@ -427,6 +433,10 @@ SYM_FUNC_START(__cpu_setup)
*/
mov_q x5, MAIR_EL1_SET
#ifdef CONFIG_ARM64_MTE
+ mte_tcr .req x20
+
+ mov mte_tcr, #0
+
/*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1).
@@ -447,6 +457,9 @@ SYM_FUNC_START(__cpu_setup)
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
+
+ /* set the TCR_EL1 bits */
+ mov_q mte_tcr, TCR_KASAN_HW_FLAGS
1:
#endif
msr mair_el1, x5
@@ -456,7 +469,11 @@ SYM_FUNC_START(__cpu_setup)
*/
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
+#ifdef CONFIG_ARM64_MTE
+ orr x10, x10, mte_tcr
+ .unreq mte_tcr
+#endif
tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
--
2.29.1.341.ge80a0c044ae-goog