[PATCH 2/3] tso: aarch64: context-switch tso bit on thread switch

From: Zayd Qumsieh
Date: Wed Apr 10 2024 - 18:17:54 EST


Add support for context-switching the tso bit when the thread
switches. This allows per-thread setting of the tso bit, and prepares
future work to allow userspace to set the tso bit of their thread
at will.

Signed-off-by: Zayd Qumsieh <zayd_qumsieh@xxxxxxxxx>
---
arch/arm64/include/asm/processor.h | 4 ++++
arch/arm64/include/asm/tso.h | 1 +
arch/arm64/kernel/process.c | 9 +++++++++
arch/arm64/kernel/tso.c | 9 +++++++++
4 files changed, 23 insertions(+)

diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f77371232d8c..a247bee24c73 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -4,6 +4,7 @@
*
* Copyright (C) 1995-1999 Russell King
* Copyright (C) 2012 ARM Ltd.
+ * Copyright © 2024 Apple Inc. All rights reserved.
*/
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
@@ -184,6 +185,9 @@ struct thread_struct {
u64 sctlr_user;
u64 svcr;
u64 tpidr2_el0;
+#ifdef CONFIG_ARM64_TSO
+ bool tso;
+#endif
};

static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/tso.h b/arch/arm64/include/asm/tso.h
index d9e1a7602c44..405e9a5efdf5 100644
--- a/arch/arm64/include/asm/tso.h
+++ b/arch/arm64/include/asm/tso.h
@@ -12,6 +12,7 @@
#include <linux/types.h>

int modify_tso_enable(bool tso_enable);
+void tso_thread_switch(struct task_struct *next);

#endif /* CONFIG_ARM64_TSO */
#endif /* __ASM_TSO_H */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4ae31b7af6c3..3831c1a97f79 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -5,6 +5,7 @@
* Original Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Copyright (C) 2012 ARM Ltd.
+ * Copyright © 2024 Apple Inc. All rights reserved.
*/
#include <linux/compat.h>
#include <linux/efi.h>
@@ -55,6 +56,7 @@
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/system_misc.h>
+#include <asm/tso.h>

#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
@@ -530,6 +532,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
ssbs_thread_switch(next);
erratum_1418040_thread_switch(next);
ptrauth_thread_switch_user(next);
+#ifdef CONFIG_ARM64_TSO
+ tso_thread_switch(next);
+#endif

/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -651,6 +656,10 @@ void arch_setup_new_exec(void)
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
PR_SPEC_ENABLE);
}
+
+#ifdef CONFIG_ARM64_TSO
+ modify_tso_enable(false);
+#endif
}

#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
diff --git a/arch/arm64/kernel/tso.c b/arch/arm64/kernel/tso.c
index b3964db7aa66..9a15d825943f 100644
--- a/arch/arm64/kernel/tso.c
+++ b/arch/arm64/kernel/tso.c
@@ -3,6 +3,7 @@
* Copyright © 2024 Apple Inc. All rights reserved.
*/

+#include <linux/sched.h>
#include <linux/types.h>

#include <asm/cputype.h>
@@ -49,4 +50,12 @@ int modify_tso_enable(bool tso_enable)
return 0;
}

+void tso_thread_switch(struct task_struct *next)
+{
+ if (tso_supported()) {
+ current->thread.tso = tso_enabled();
+ modify_tso_enable(next->thread.tso);
+ }
+}
+
#endif /* CONFIG_ARM64_TSO */
--
2.39.3 (Apple Git-146)