[patch V3 52/66] x86/fpu: Add PKRU storage outside of task XSAVE buffer
From: Thomas Gleixner
Date: Fri Jun 18 2021 - 11:24:11 EST
From: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
PKRU is currently partly XSAVE-managed and partly not. It has space in the
task XSAVE buffer and is context-switched by XSAVE/XRSTOR. However, it is
switched more eagerly than FPU because there may be a need for PKRU to be
up-to-date for things like copy_to/from_user() since PKRU affects
user-permission memory accesses, not just accesses from userspace itself.
This leaves PKRU in a very odd position. XSAVE brings very little value to
the table for how Linux uses PKRU except for signal related XSTATE
handling.
Prepare to move PKRU away from being XSAVE-managed. Allocate space in the
thread_struct for it and save/restore it in the context-switch path
separately from the XSAVE-managed features. task->thread_struct.pkru is
only valid when the task is scheduled out. For the current task the
authoritative source is the hardware, i.e. it has to be retrieved via
rdpkru().
Leave the XSAVE code in place for now to ensure bisectability.
Signed-off-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
V3: Fix the fallout on !PKRU enabled systems in copy_thread() - Intel testing via Dave
---
arch/x86/include/asm/processor.h | 9 +++++++++
arch/x86/kernel/process.c | 7 +++++++
arch/x86/kernel/process_64.c | 25 +++++++++++++++++++++++++
3 files changed, 41 insertions(+)
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -518,6 +518,15 @@ struct thread_struct {
unsigned int sig_on_uaccess_err:1;
+ /*
+ * Protection Keys Register for Userspace. Loaded immediately on
+ * context switch. Store it in thread_struct to avoid a lookup in
+ * the tasks's FPU xstate buffer. This value is only valid when a
+ * task is scheduled out. For 'current' the authoritative source of
+ * PKRU is the hardware itself.
+ */
+ u32 pkru;
+
/* Floating point and extended processor state */
struct fpu fpu;
/*
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -156,11 +156,18 @@ int copy_thread(unsigned long clone_flag
/* Kernel thread ? */
if (unlikely(p->flags & PF_KTHREAD)) {
+ p->thread.pkru = pkru_get_init_value();
memset(childregs, 0, sizeof(struct pt_regs));
kthread_frame_init(frame, sp, arg);
return 0;
}
+ /*
+ * Clone current's PKRU value from hardware. tsk->thread.pkru
+ * is only valid when scheduled out.
+ */
+ p->thread.pkru = read_pkru();
+
frame->bx = 0;
*childregs = *current_pt_regs();
childregs->ax = 0;
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -340,6 +340,29 @@ static __always_inline void load_seg_leg
}
}
+/*
+ * Store prev's PKRU value and load next's PKRU value if they differ. PKRU
+ * is not XSTATE managed on context switch because that would require a
+ * lookup in the task's FPU xsave buffer and require to keep that updated
+ * in various places.
+ */
+static __always_inline void x86_pkru_load(struct thread_struct *prev,
+ struct thread_struct *next)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
+
+ /* Stash the prev task's value: */
+ prev->pkru = rdpkru();
+
+ /*
+ * PKRU writes are slightly expensive. Avoid them when not
+ * strictly necessary:
+ */
+ if (prev->pkru != next->pkru)
+ wrpkru(next->pkru);
+}
+
static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
struct thread_struct *next)
{
@@ -589,6 +612,8 @@ void compat_start_thread(struct pt_regs
x86_fsgsbase_load(prev, next);
+ x86_pkru_load(prev, next);
+
/*
* Switch the PDA and FPU contexts.
*/