[PATCH 7/9] x86/fpu: rename lazy restore functions to "register state valid"

From: riel
Date: Tue Oct 04 2016 - 20:34:54 EST


From: Rik van Riel <riel@xxxxxxxxxx>

Name the functions after the state they track, rather than the function
they currently enable. This should make it more obvious when we use the
fpu_register_state_valid function for something else in the future.

Signed-off-by: Rik van Riel <riel@xxxxxxxxxx>
---
arch/x86/include/asm/fpu/internal.h | 26 ++++++++++++++++++++------
arch/x86/kernel/fpu/core.c | 4 ++--
arch/x86/kernel/smpboot.c | 2 +-
3 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 499d6ed0e376..d2cfe16dd9fa 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -479,18 +479,32 @@ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size)
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);

/*
+ * The in-register FPU state for an FPU context on a CPU is assumed to be
+ * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
+ * matches the FPU.
+ *
+ * If the FPU register state is valid, the kernel can skip restoring the
+ * FPU state from memory.
+ *
+ * Any code that clobbers the FPU registers or updates the in-memory
+ * FPU state for a task MUST let the rest of the kernel know that the
+ * FPU registers are no longer valid for this task. Calling either of
+ * these two invalidate functions is enough, use whichever is convenient.
+ *
* Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
* on this CPU.
- *
- * This will disable any lazy FPU state restore of the current FPU state,
- * but if the current thread owns the FPU, it will still be saved by.
*/
-static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+static inline void __cpu_invalidate_fpregs_state(unsigned int cpu)
{
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
}

-static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
+static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
+{
+ fpu->last_cpu = -1;
+}
+
+static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
{
return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
}
@@ -588,7 +602,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
} else {
old_fpu->last_cpu = -1;
if (fpu.preload) {
- if (fpu_want_lazy_restore(new_fpu, cpu))
+ if (fpregs_state_valid(new_fpu, cpu))
fpu.preload = 0;
else
prefetch(&new_fpu->state);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 6a37d525bdbe..25a45ddfdbcf 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -336,7 +336,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)

if (fpu->fpstate_active) {
/* Invalidate any lazy state: */
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
@@ -379,7 +379,7 @@ void fpu__current_fpstate_write_begin(void)
* ensures we will not be lazy and skip a XRSTOR in the
* future.
*/
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
}

/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42a93621f5b0..ca4c4ca2f6af 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1111,7 +1111,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
return err;

/* the FPU context is blank, nobody can own it */
- __cpu_disable_lazy_restore(cpu);
+ __cpu_invalidate_fpregs_state(cpu);

common_cpu_up(cpu, tidle);

--
2.7.4