Re: [PATCH] x86/mm: Remove broken vsyscall emulation code from the page fault code

From: Jiri Olsa
Date: Mon Apr 29 2024 - 09:52:47 EST


On Mon, Apr 29, 2024 at 10:00:51AM +0200, Ingo Molnar wrote:

SNIP

> The attached patch looks like the ObviouslyCorrect(tm) thing to do.
>
> NOTE! This broken code goes back to this commit in 2011:
>
> 4fc3490114bb ("x86-64: Set siginfo and context on vsyscall emulation faults")
>
> ... and back then the reason was to get all the siginfo details right.
> Honestly, I do not for a moment believe that it's worth getting the siginfo
> details right here, but part of the commit says:
>
> This fixes issues with UML when vsyscall=emulate.
>
> ... and so my patch to remove this garbage will probably break UML in this
> situation.
>
> I do not believe that anybody should be running with vsyscall=emulate in
> 2024 in the first place, much less if you are doing things like UML. But
> let's see if somebody screams.
>
> Not-Yet-Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
> Link: https://lore.kernel.org/r/CAHk-=wh9D6f7HUkDgZHKmDCHUQmp+Co89GP+b8+z+G56BKeyNg@xxxxxxxxxxxxxx

fwiw I can no longer trigger the invalid wait context bug
with this change

Tested-by: Jiri Olsa <jolsa@xxxxxxxxxx>

jirka

> ---
> arch/x86/entry/vsyscall/vsyscall_64.c | 25 ++-----------------------
> arch/x86/include/asm/processor.h | 1 -
> arch/x86/mm/fault.c | 33 +--------------------------------
> 3 files changed, 3 insertions(+), 56 deletions(-)
>
> diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
> index a3c0df11d0e6..3b0f61b2ea6d 100644
> --- a/arch/x86/entry/vsyscall/vsyscall_64.c
> +++ b/arch/x86/entry/vsyscall/vsyscall_64.c
> @@ -98,11 +98,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
>
> static bool write_ok_or_segv(unsigned long ptr, size_t size)
> {
> - /*
> - * XXX: if access_ok, get_user, and put_user handled
> - * sig_on_uaccess_err, this could go away.
> - */
> -
> if (!access_ok((void __user *)ptr, size)) {
> struct thread_struct *thread = &current->thread;
>
> @@ -123,7 +118,6 @@ bool emulate_vsyscall(unsigned long error_code,
> struct task_struct *tsk;
> unsigned long caller;
> int vsyscall_nr, syscall_nr, tmp;
> - int prev_sig_on_uaccess_err;
> long ret;
> unsigned long orig_dx;
>
> @@ -234,12 +228,8 @@ bool emulate_vsyscall(unsigned long error_code,
> goto do_ret; /* skip requested */
>
> /*
> - * With a real vsyscall, page faults cause SIGSEGV. We want to
> - * preserve that behavior to make writing exploits harder.
> + * With a real vsyscall, page faults cause SIGSEGV.
> */
> - prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
> - current->thread.sig_on_uaccess_err = 1;
> -
> ret = -EFAULT;
> switch (vsyscall_nr) {
> case 0:
> @@ -262,23 +252,12 @@ bool emulate_vsyscall(unsigned long error_code,
> break;
> }
>
> - current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
> -
> check_fault:
> if (ret == -EFAULT) {
> /* Bad news -- userspace fed a bad pointer to a vsyscall. */
> warn_bad_vsyscall(KERN_INFO, regs,
> "vsyscall fault (exploit attempt?)");
> -
> - /*
> - * If we failed to generate a signal for any reason,
> - * generate one here. (This should be impossible.)
> - */
> - if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
> - !sigismember(&tsk->pending.signal, SIGSEGV)))
> - goto sigsegv;
> -
> - return true; /* Don't emulate the ret. */
> + goto sigsegv;
> }
>
> regs->ax = ret;
> diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
> index 811548f131f4..78e51b0d6433 100644
> --- a/arch/x86/include/asm/processor.h
> +++ b/arch/x86/include/asm/processor.h
> @@ -472,7 +472,6 @@ struct thread_struct {
> unsigned long iopl_emul;
>
> unsigned int iopl_warn:1;
> - unsigned int sig_on_uaccess_err:1;
>
> /*
> * Protection Keys Register for Userspace. Loaded immediately on
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index 6b2ca8ba75b8..f26ecabc9424 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -724,39 +724,8 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
> WARN_ON_ONCE(user_mode(regs));
>
> /* Are we prepared to handle this kernel fault? */
> - if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
> - /*
> - * Any interrupt that takes a fault gets the fixup. This makes
> - * the below recursive fault logic only apply to a faults from
> - * task context.
> - */
> - if (in_interrupt())
> - return;
> -
> - /*
> - * Per the above we're !in_interrupt(), aka. task context.
> - *
> - * In this case we need to make sure we're not recursively
> - * faulting through the emulate_vsyscall() logic.
> - */
> - if (current->thread.sig_on_uaccess_err && signal) {
> - sanitize_error_code(address, &error_code);
> -
> - set_signal_archinfo(address, error_code);
> -
> - if (si_code == SEGV_PKUERR) {
> - force_sig_pkuerr((void __user *)address, pkey);
> - } else {
> - /* XXX: hwpoison faults will set the wrong code. */
> - force_sig_fault(signal, si_code, (void __user *)address);
> - }
> - }
> -
> - /*
> - * Barring that, we can do the fixup and be happy.
> - */
> + if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
> return;
> - }
>
> /*
> * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
>