[PATCH v7 5/6] x86/tdx: Move MMIO helpers to common library

From: Alexey Gladkov
Date: Fri Sep 13 2024 - 13:07:30 EST


From: "Alexey Gladkov (Intel)" <legion@xxxxxxxxxx>

AMD code has helpers that are used to emulate MOVS instructions. To be
able to reuse this code in the MOVS implementation for intel, it is
necessary to move them to a common location.

Acked-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Signed-off-by: Alexey Gladkov (Intel) <legion@xxxxxxxxxx>
---
arch/x86/coco/sev/core.c | 139 ++++++--------------------------------
arch/x86/include/asm/io.h | 3 +
arch/x86/lib/iomem.c | 115 +++++++++++++++++++++++++++++++
3 files changed, 140 insertions(+), 117 deletions(-)

diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 082d61d85dfc..07e9a6f15fba 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -369,72 +369,24 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
char *dst, char *buf, size_t size)
{
- unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
+ unsigned long error_code;
+ int ret;

/*
- * This function uses __put_user() independent of whether kernel or user
- * memory is accessed. This works fine because __put_user() does no
- * sanity checks of the pointer being accessed. All that it does is
- * to report when the access failed.
- *
- * Also, this function runs in atomic context, so __put_user() is not
- * allowed to sleep. The page-fault handler detects that it is running
- * in atomic context and will not try to take mmap_sem and handle the
- * fault, so additional pagefault_enable()/disable() calls are not
- * needed.
- *
- * The access can't be done via copy_to_user() here because
- * vc_write_mem() must not use string instructions to access unsafe
- * memory. The reason is that MOVS is emulated by the #VC handler by
- * splitting the move up into a read and a write and taking a nested #VC
- * exception on whatever of them is the MMIO access. Using string
- * instructions here would cause infinite nesting.
+ * This function runs in atomic context, so __put_iomem() is not allowed
+ * to sleep. The page-fault handler detects that it is running in atomic
+ * context and will not try to take mmap_lock and handle the fault, so
+ * additional pagefault_enable()/disable() calls are not needed.
*/
- switch (size) {
- case 1: {
- u8 d1;
- u8 __user *target = (u8 __user *)dst;
-
- memcpy(&d1, buf, 1);
- if (__put_user(d1, target))
- goto fault;
- break;
- }
- case 2: {
- u16 d2;
- u16 __user *target = (u16 __user *)dst;
-
- memcpy(&d2, buf, 2);
- if (__put_user(d2, target))
- goto fault;
- break;
- }
- case 4: {
- u32 d4;
- u32 __user *target = (u32 __user *)dst;
-
- memcpy(&d4, buf, 4);
- if (__put_user(d4, target))
- goto fault;
- break;
- }
- case 8: {
- u64 d8;
- u64 __user *target = (u64 __user *)dst;
+ ret = __put_iomem(dst, buf, size);
+ if (!ret)
+ return ES_OK;

- memcpy(&d8, buf, 8);
- if (__put_user(d8, target))
- goto fault;
- break;
- }
- default:
- WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ if (ret == -EIO)
return ES_UNSUPPORTED;
- }

- return ES_OK;
+ error_code = X86_PF_PROT | X86_PF_WRITE;

-fault:
if (user_mode(ctxt->regs))
error_code |= X86_PF_USER;

@@ -448,71 +400,24 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
char *src, char *buf, size_t size)
{
- unsigned long error_code = X86_PF_PROT;
+ unsigned long error_code;
+ int ret;

/*
- * This function uses __get_user() independent of whether kernel or user
- * memory is accessed. This works fine because __get_user() does no
- * sanity checks of the pointer being accessed. All that it does is
- * to report when the access failed.
- *
- * Also, this function runs in atomic context, so __get_user() is not
- * allowed to sleep. The page-fault handler detects that it is running
- * in atomic context and will not try to take mmap_sem and handle the
- * fault, so additional pagefault_enable()/disable() calls are not
- * needed.
- *
- * The access can't be done via copy_from_user() here because
- * vc_read_mem() must not use string instructions to access unsafe
- * memory. The reason is that MOVS is emulated by the #VC handler by
- * splitting the move up into a read and a write and taking a nested #VC
- * exception on whatever of them is the MMIO access. Using string
- * instructions here would cause infinite nesting.
+ * This function runs in atomic context, so __get_iomem() is not allowed
+ * to sleep. The page-fault handler detects that it is running in atomic
+ * context and will not try to take mmap_lock and handle the fault, so
+ * additional pagefault_enable()/disable() calls are not needed.
*/
- switch (size) {
- case 1: {
- u8 d1;
- u8 __user *s = (u8 __user *)src;
-
- if (__get_user(d1, s))
- goto fault;
- memcpy(buf, &d1, 1);
- break;
- }
- case 2: {
- u16 d2;
- u16 __user *s = (u16 __user *)src;
-
- if (__get_user(d2, s))
- goto fault;
- memcpy(buf, &d2, 2);
- break;
- }
- case 4: {
- u32 d4;
- u32 __user *s = (u32 __user *)src;
+ ret = __get_iomem(src, buf, size);
+ if (!ret)
+ return ES_OK;

- if (__get_user(d4, s))
- goto fault;
- memcpy(buf, &d4, 4);
- break;
- }
- case 8: {
- u64 d8;
- u64 __user *s = (u64 __user *)src;
- if (__get_user(d8, s))
- goto fault;
- memcpy(buf, &d8, 8);
- break;
- }
- default:
- WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ if (ret == -EIO)
return ES_UNSUPPORTED;
- }

- return ES_OK;
+ error_code = X86_PF_PROT;

-fault:
if (user_mode(ctxt->regs))
error_code |= X86_PF_USER;

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1d60427379c9..ac01d53466cb 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -402,4 +402,7 @@ static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
}
}

+int __get_iomem(char *src, char *buf, size_t size);
+int __put_iomem(char *src, char *buf, size_t size);
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 5eecb45d05d5..3ab146edddea 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -2,6 +2,7 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/kmsan-checks.h>
+#include <asm/uaccess.h>

#define movs(type,to,from) \
asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
@@ -124,3 +125,117 @@ void memset_io(volatile void __iomem *a, int b, size_t c)
}
}
EXPORT_SYMBOL(memset_io);
+
+int __get_iomem(char *src, char *buf, size_t size)
+{
+ /*
+ * This function uses __get_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __get_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * The access can't be done via copy_from_user() here because
+ * __get_iomem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the exception handler
+ * for SEV and TDX by splitting the move up into a read and a write
+ * opetations and taking a nested exception on whatever of them is the
+ * MMIO access. Using string instructions here would cause infinite
+ * nesting.
+ */
+ switch (size) {
+ case 1: {
+ u8 d1, __user *s = (u8 __user *)src;
+
+ if (__get_user(d1, s))
+ return -EFAULT;
+ memcpy(buf, &d1, 1);
+ break;
+ }
+ case 2: {
+ u16 d2, __user *s = (u16 __user *)src;
+
+ if (__get_user(d2, s))
+ return -EFAULT;
+ memcpy(buf, &d2, 2);
+ break;
+ }
+ case 4: {
+ u32 d4, __user *s = (u32 __user *)src;
+
+ if (__get_user(d4, s))
+ return -EFAULT;
+ memcpy(buf, &d4, 4);
+ break;
+ }
+ case 8: {
+ u64 d8, __user *s = (u64 __user *)src;
+
+ if (__get_user(d8, s))
+ return -EFAULT;
+ memcpy(buf, &d8, 8);
+ break;
+ }
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int __put_iomem(char *dst, char *buf, size_t size)
+{
+ /*
+ * This function uses __put_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __put_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * The access can't be done via copy_to_user() here because
+ * __put_iomem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the exception handler
+ * for SEV and TDX by splitting the move up into a read and a write
+ * opetations and taking a nested exception on whatever of them is the
+ * MMIO access. Using string instructions here would cause infinite
+ * nesting.
+ */
+ switch (size) {
+ case 1: {
+ u8 d1, __user *target = (u8 __user *)dst;
+
+ memcpy(&d1, buf, 1);
+ if (__put_user(d1, target))
+ return -EFAULT;
+ break;
+ }
+ case 2: {
+ u16 d2, __user *target = (u16 __user *)dst;
+
+ memcpy(&d2, buf, 2);
+ if (__put_user(d2, target))
+ return -EFAULT;
+ break;
+ }
+ case 4: {
+ u32 d4, __user *target = (u32 __user *)dst;
+
+ memcpy(&d4, buf, 4);
+ if (__put_user(d4, target))
+ return -EFAULT;
+ break;
+ }
+ case 8: {
+ u64 d8, __user *target = (u64 __user *)dst;
+
+ memcpy(&d8, buf, 8);
+ if (__put_user(d8, target))
+ return -EFAULT;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return -EIO;
+ }
+
+ return 0;
+}
--
2.46.0