[PATCH AUTOSEL 4.19 1/4] x86/mm: Move is_vsyscall_vaddr() into asm/vsyscall.h

From: Sasha Levin
Date: Thu Feb 29 2024 - 16:02:57 EST


From: Hou Tao <houtao1@xxxxxxxxxx>

[ Upstream commit ee0e39a63b78849f8abbef268b13e4838569f646 ]

Move is_vsyscall_vaddr() into asm/vsyscall.h to make it available for
copy_from_kernel_nofault_allowed() in arch/x86/mm/maccess.c.

Reviewed-by: Sohil Mehta <sohil.mehta@xxxxxxxxx>
Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx>
Link: https://lore.kernel.org/r/20240202103935.3154011-2-houtao@xxxxxxxxxxxxxxx
Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
arch/x86/include/asm/vsyscall.h | 10 ++++++++++
1 file changed, 10 insertions(+)

diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index b986b2ca688a0..8154b25cb975a 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -4,6 +4,7 @@

#include <linux/seqlock.h>
#include <uapi/asm/vsyscall.h>
+#include <asm/page_types.h>

#ifdef CONFIG_X86_VSYSCALL_EMULATION
extern void map_vsyscall(void);
@@ -22,4 +23,13 @@ static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
}
#endif

+/*
+ * The (legacy) vsyscall page is the long page in the kernel portion
+ * of the address space that has user-accessible permissions.
+ */
+static inline bool is_vsyscall_vaddr(unsigned long vaddr)
+{
+ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+}
+
#endif /* _ASM_X86_VSYSCALL_H */
--
2.43.0