[tip:x86/fpu] x86/fpu: Rename lazy restore functions to "register state valid"

From: tip-bot for Rik van Riel
Date: Fri Oct 07 2016 - 05:48:39 EST


Commit-ID: 25d83b531c1aa4fca5b4e24ed10f493268f162bc
Gitweb: http://git.kernel.org/tip/25d83b531c1aa4fca5b4e24ed10f493268f162bc
Author: Rik van Riel <riel@xxxxxxxxxx>
AuthorDate: Tue, 4 Oct 2016 20:34:36 -0400
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Fri, 7 Oct 2016 11:14:41 +0200

x86/fpu: Rename lazy restore functions to "register state valid"

Name the functions after the state they track, rather than the function
they currently enable. This should make it more obvious when we use the
fpu_register_state_valid() function for something else in the future.

Signed-off-by: Rik van Riel <riel@xxxxxxxxxx>
Reviewed-by: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Oleg Nesterov <oleg@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Quentin Casasnovas <quentin.casasnovas@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: pbonzini@xxxxxxxxxx
Link: http://lkml.kernel.org/r/1475627678-20788-8-git-send-email-riel@xxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/include/asm/fpu/internal.h | 26 ++++++++++++++++++++------
arch/x86/kernel/fpu/core.c | 4 ++--
arch/x86/kernel/smpboot.c | 2 +-
3 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 499d6ed..d2cfe16 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -479,18 +479,32 @@ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size)
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);

/*
+ * The in-register FPU state for an FPU context on a CPU is assumed to be
+ * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
+ * matches the FPU.
+ *
+ * If the FPU register state is valid, the kernel can skip restoring the
+ * FPU state from memory.
+ *
+ * Any code that clobbers the FPU registers or updates the in-memory
+ * FPU state for a task MUST let the rest of the kernel know that the
+ * FPU registers are no longer valid for this task. Calling either of
+ * these two invalidate functions is enough, use whichever is convenient.
+ *
* Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
* on this CPU.
- *
- * This will disable any lazy FPU state restore of the current FPU state,
- * but if the current thread owns the FPU, it will still be saved by.
*/
-static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+static inline void __cpu_invalidate_fpregs_state(unsigned int cpu)
{
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
}

-static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
+static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
+{
+ fpu->last_cpu = -1;
+}
+
+static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
{
return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
}
@@ -588,7 +602,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
} else {
old_fpu->last_cpu = -1;
if (fpu.preload) {
- if (fpu_want_lazy_restore(new_fpu, cpu))
+ if (fpregs_state_valid(new_fpu, cpu))
fpu.preload = 0;
else
prefetch(&new_fpu->state);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 6a37d52..25a45dd 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -336,7 +336,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)

if (fpu->fpstate_active) {
/* Invalidate any lazy state: */
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
@@ -379,7 +379,7 @@ void fpu__current_fpstate_write_begin(void)
* ensures we will not be lazy and skip a XRSTOR in the
* future.
*/
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
}

/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42a9362..ca4c4ca 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1111,7 +1111,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
return err;

/* the FPU context is blank, nobody can own it */
- __cpu_disable_lazy_restore(cpu);
+ __cpu_invalidate_fpregs_state(cpu);

common_cpu_up(cpu, tidle);