[PATCH 4/5] arm64: mte: Add asymmetric mode support

From: Vincenzo Frascino
Date: Mon Sep 13 2021 - 04:14:58 EST


MTE provides an asymmetric mode for detecting tag exceptions. In
particular, when such a mode is present, the CPU triggers a fault
on a tag mismatch during a load operation and asynchronously updates
a register when a tag mismatch is detected during a store operation.

Add support for MTE asymmetric mode.

Note: If the CPU does not support MTE asymmetric mode the kernel falls
back on synchronous mode which is the default for kasan=on.

Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Andrey Konovalov <andreyknvl@xxxxxxxxx>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx>
---
arch/arm64/include/asm/memory.h | 1 +
arch/arm64/include/asm/mte-kasan.h | 5 +++++
arch/arm64/kernel/mte.c | 26 ++++++++++++++++++++++++++
3 files changed, 32 insertions(+)

diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f1745a843414..1b9a1e242612 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -243,6 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
+#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 22420e1f8c03..478b9bcf69ad 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -130,6 +130,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,

void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
+void mte_enable_kernel_asymm(void);

#else /* CONFIG_ARM64_MTE */

@@ -161,6 +162,10 @@ static inline void mte_enable_kernel_async(void)
{
}

+static inline void mte_enable_kernel_asymm(void)
+{
+}
+
#endif /* CONFIG_ARM64_MTE */

#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 9d314a3bad3b..ef5484ecb2da 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -137,6 +137,32 @@ void mte_enable_kernel_async(void)
if (!system_uses_mte_async_mode())
static_branch_enable(&mte_async_mode);
}
+
+void mte_enable_kernel_asymm(void)
+{
+ if (cpus_have_cap(ARM64_MTE_ASYMM)) {
+ __mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM);
+
+ /*
+ * MTE asymm mode behaves as async mode for store
+ * operations. The mode is set system wide by the
+ * first PE that executes this function.
+ *
+ * Note: If in future KASAN acquires a runtime switching
+ * mode in between sync and async, this strategy needs
+ * to be reviewed.
+ */
+ if (!system_uses_mte_async_mode())
+ static_branch_enable(&mte_async_mode);
+ } else {
+ /*
+ * If the CPU does not support MTE asymmetric mode the
+ * kernel falls back on synchronous mode which is the
+ * default for kasan=on.
+ */
+ mte_enable_kernel_sync();
+ }
+}
#endif

#ifdef CONFIG_KASAN_HW_TAGS
--
2.33.0