[tip:x86/pti] x86/entry/32: Handle Entry from Kernel-Mode on Entry-Stack

From: tip-bot for Joerg Roedel
Date: Thu Jul 19 2018 - 19:24:38 EST


Commit-ID: b92a165df17ee6e616e43107730f06bf6ecf5d8d
Gitweb: https://git.kernel.org/tip/b92a165df17ee6e616e43107730f06bf6ecf5d8d
Author: Joerg Roedel <jroedel@xxxxxxx>
AuthorDate: Wed, 18 Jul 2018 11:40:47 +0200
Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
CommitDate: Fri, 20 Jul 2018 01:11:38 +0200

x86/entry/32: Handle Entry from Kernel-Mode on Entry-Stack

It is possible that the kernel is entered from kernel-mode and on the
entry-stack. The most common way this happens is when an exception is
triggered while loading the user-space segment registers on the
kernel-to-userspace exit path.

The segment loading needs to be done after the entry-stack switch, because
the stack-switch needs kernel %fs for per_cpu access.

When this happens, make sure to leave the kernel with the entry-stack
again, so that the interrupted code-path runs on the right stack when
switching to the user-cr3.

Detect this condition on kernel-entry by checking CS.RPL and %esp, and if
it happens, copy over the complete content of the entry stack to the
task-stack. This needs to be done because once the exception handler is
entereed, the task might be scheduled out or even migrated to a different
CPU, so this cannot rely on the entry-stack contents. Leave a marker in the
stack-frame to detect this condition on the exit path.

On the exit path the copy is reversed, copy all of the remaining task-stack
back to the entry-stack and switch to it.

Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Tested-by: Pavel Machek <pavel@xxxxxx>
Cc: "H . Peter Anvin" <hpa@xxxxxxxxx>
Cc: linux-mm@xxxxxxxxx
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Juergen Gross <jgross@xxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Jiri Kosina <jkosina@xxxxxxx>
Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: David Laight <David.Laight@xxxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: Eduardo Valentin <eduval@xxxxxxxxxx>
Cc: Greg KH <gregkh@xxxxxxxxxxxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: aliguori@xxxxxxxxxx
Cc: daniel.gruss@xxxxxxxxxxxxxx
Cc: hughd@xxxxxxxxxx
Cc: keescook@xxxxxxxxxx
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Waiman Long <llong@xxxxxxxxxx>
Cc: "David H . Gutteridge" <dhgutteridge@xxxxxxxxxxxx>
Cc: joro@xxxxxxxxxx
Link: https://lkml.kernel.org/r/1531906876-13451-11-git-send-email-joro@xxxxxxxxxx

---
arch/x86/entry/entry_32.S | 116 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 115 insertions(+), 1 deletion(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 763592596727..9d6eceba0461 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -294,6 +294,9 @@
* copied there. So allocate the stack-frame on the task-stack and
* switch to it before we do any copying.
*/
+
+#define CS_FROM_ENTRY_STACK (1 << 31)
+
.macro SWITCH_TO_KERNEL_STACK

ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
@@ -316,6 +319,16 @@
/* Load top of task-stack into %edi */
movl TSS_entry2task_stack(%edi), %edi

+ /*
+ * Clear unused upper bits of the dword containing the word-sized CS
+ * slot in pt_regs in case hardware didn't clear it for us.
+ */
+ andl $(0x0000ffff), PT_CS(%esp)
+
+ /* Special case - entry from kernel mode via entry stack */
+ testl $SEGMENT_RPL_MASK, PT_CS(%esp)
+ jz .Lentry_from_kernel_\@
+
/* Bytes to copy */
movl $PTREGS_SIZE, %ecx

@@ -329,8 +342,8 @@
*/
addl $(4 * 4), %ecx

-.Lcopy_pt_regs_\@:
#endif
+.Lcopy_pt_regs_\@:

/* Allocate frame on task-stack */
subl %ecx, %edi
@@ -346,6 +359,56 @@
cld
rep movsl

+ jmp .Lend_\@
+
+.Lentry_from_kernel_\@:
+
+ /*
+ * This handles the case when we enter the kernel from
+ * kernel-mode and %esp points to the entry-stack. When this
+ * happens we need to switch to the task-stack to run C code,
+ * but switch back to the entry-stack again when we approach
+ * iret and return to the interrupted code-path. This usually
+ * happens when we hit an exception while restoring user-space
+ * segment registers on the way back to user-space.
+ *
+ * When we switch to the task-stack here, we can't trust the
+ * contents of the entry-stack anymore, as the exception handler
+ * might be scheduled out or moved to another CPU. Therefore we
+ * copy the complete entry-stack to the task-stack and set a
+ * marker in the iret-frame (bit 31 of the CS dword) to detect
+ * what we've done on the iret path.
+ *
+ * On the iret path we copy everything back and switch to the
+ * entry-stack, so that the interrupted kernel code-path
+ * continues on the same stack it was interrupted with.
+ *
+ * Be aware that an NMI can happen anytime in this code.
+ *
+ * %esi: Entry-Stack pointer (same as %esp)
+ * %edi: Top of the task stack
+ */
+
+ /* Calculate number of bytes on the entry stack in %ecx */
+ movl %esi, %ecx
+
+ /* %ecx to the top of entry-stack */
+ andl $(MASK_entry_stack), %ecx
+ addl $(SIZEOF_entry_stack), %ecx
+
+ /* Number of bytes on the entry stack to %ecx */
+ sub %esi, %ecx
+
+ /* Mark stackframe as coming from entry stack */
+ orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+
+ /*
+ * %esi and %edi are unchanged, %ecx contains the number of
+ * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
+ * the stack-frame on task-stack and copy everything over
+ */
+ jmp .Lcopy_pt_regs_\@
+
.Lend_\@:
.endm

@@ -403,6 +466,56 @@
.Lend_\@:
.endm

+/*
+ * This macro handles the case when we return to kernel-mode on the iret
+ * path and have to switch back to the entry stack.
+ *
+ * See the comments below the .Lentry_from_kernel_\@ label in the
+ * SWITCH_TO_KERNEL_STACK macro for more details.
+ */
+.macro PARANOID_EXIT_TO_KERNEL_MODE
+
+ /*
+ * Test if we entered the kernel with the entry-stack. Most
+ * likely we did not, because this code only runs on the
+ * return-to-kernel path.
+ */
+ testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+ jz .Lend_\@
+
+ /* Unlikely slow-path */
+
+ /* Clear marker from stack-frame */
+ andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
+
+ /* Copy the remaining task-stack contents to entry-stack */
+ movl %esp, %esi
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+
+ /* Bytes on the task-stack to ecx */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
+ subl %esi, %ecx
+
+ /* Allocate stack-frame on entry-stack */
+ subl %ecx, %edi
+
+ /*
+ * Save future stack-pointer, we must not switch until the
+ * copy is done, otherwise the NMI handler could destroy the
+ * contents of the task-stack we are about to copy.
+ */
+ movl %edi, %ebx
+
+ /* Do the copy */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ /* Safe to switch to entry-stack now */
+ movl %ebx, %esp
+
+.Lend_\@:
+.endm
/*
* %eax: prev task
* %edx: next task
@@ -764,6 +877,7 @@ restore_all:

restore_all_kernel:
TRACE_IRQS_IRET
+ PARANOID_EXIT_TO_KERNEL_MODE
RESTORE_REGS 4
jmp .Lirq_return