[PATCH v1 5/7] x86/tdx: Make DMA pages shared

From: Kuppuswamy Sathyanarayanan
Date: Wed Jun 09 2021 - 17:56:02 EST


From: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx>

Just like MKTME, TDX reassigns bits of the physical address for
metadata. MKTME used several bits for an encryption KeyID. TDX
uses a single bit in guests to communicate whether a physical page
should be protected by TDX as private memory (bit set to 0) or
unprotected and shared with the VMM (bit set to 1).

__set_memory_enc_dec() is now aware about TDX and sets Shared bit
accordingly following with relevant TDX hypercall.

Also, Do TDACCEPTPAGE on every 4k page after mapping the GPA range
when converting memory to private. Using 4k page size limit is due
to current TDX spec restriction. Also, If the GPA (range) was
already mapped as an active, private page, the host VMM may remove
the private page from the TD by following the “Removing TD Private
Pages” sequence in the Intel TDX-module specification [1] to safely
block the mapping(s), flush the TLB and cache, and remove the
mapping(s).

BUG() if TDACCEPTPAGE fails (except "previously accepted page" case)
, as the guest is completely hosed if it can't access memory. 

[1] https://software.intel.com/content/dam/develop/external/us/en/documents/tdx-module-1eas-v0.85.039.pdf

Tested-by: Kai Huang <kai.huang@xxxxxxxxxxxxxxx>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
Reviewed-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Reviewed-by: Tony Luck <tony.luck@xxxxxxxxx>
Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@xxxxxxxxxxxxxxx>
---
arch/x86/include/asm/pgtable.h | 1 +
arch/x86/kernel/tdx.c | 34 ++++++++++++++++++-----
arch/x86/mm/mem_encrypt_common.c | 3 +++
arch/x86/mm/pat/set_memory.c | 46 +++++++++++++++++++++++++++-----
4 files changed, 71 insertions(+), 13 deletions(-)

diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5b77843dfa10..41c8d3ace070 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -24,6 +24,7 @@
/* Make the page accesable by VMM for protected guests */
#define pgprot_protected_guest(prot) __pgprot(pgprot_val(prot) | \
tdg_shared_mask())
+#define pgprot_pg_shared_mask() __pgprot(tdg_shared_mask())

#ifndef __ASSEMBLY__
#include <asm/x86_init.h>
diff --git a/arch/x86/kernel/tdx.c b/arch/x86/kernel/tdx.c
index 591643abae88..c90871a10443 100644
--- a/arch/x86/kernel/tdx.c
+++ b/arch/x86/kernel/tdx.c
@@ -16,10 +16,14 @@
/* TDX Module call Leaf IDs */
#define TDINFO 1
#define TDGETVEINFO 3
+#define TDACCEPTPAGE 6

/* TDX hypercall Leaf IDs */
#define TDVMCALL_MAP_GPA 0x10001

+/* TDX Module call error codes */
+#define TDX_PAGE_ALREADY_ACCEPTED 0x8000000000000001
+
#define VE_IS_IO_OUT(exit_qual) (((exit_qual) & 8) ? 0 : 1)
#define VE_GET_IO_SIZE(exit_qual) (((exit_qual) & 7) + 1)
#define VE_GET_PORT_NUM(exit_qual) ((exit_qual) >> 16)
@@ -124,25 +128,43 @@ static void tdg_get_info(void)
physical_mask &= ~tdg_shared_mask();
}

+static void tdg_accept_page(phys_addr_t gpa)
+{
+ u64 ret;
+
+ ret = __tdx_module_call(TDACCEPTPAGE, gpa, 0, 0, 0, NULL);
+
+ BUG_ON(ret && ret != TDX_PAGE_ALREADY_ACCEPTED);
+}
+
/*
* Inform the VMM of the guest's intent for this physical page:
* shared with the VMM or private to the guest. The VMM is
* expected to change its mapping of the page in response.
- *
- * Note: shared->private conversions require further guest
- * action to accept the page.
*/
int tdx_hcall_gpa_intent(phys_addr_t gpa, int numpages,
enum tdx_map_type map_type)
{
- u64 ret;
+ u64 ret = 0;
+ int i;

if (map_type == TDX_MAP_SHARED)
gpa |= tdg_shared_mask();

- ret = tdx_hypercall(TDVMCALL_MAP_GPA, gpa, PAGE_SIZE * numpages, 0, 0);
+ if (tdx_hypercall(TDVMCALL_MAP_GPA, gpa, PAGE_SIZE * numpages, 0, 0))
+ ret = -EIO;

- return ret ? -EIO : 0;
+ if (ret || map_type == TDX_MAP_SHARED)
+ return ret;
+
+ /*
+ * For shared->private conversion, accept the page using TDACCEPTPAGE
+ * TDX module call.
+ */
+ for (i = 0; i < numpages; i++)
+ tdg_accept_page(gpa + i * PAGE_SIZE);
+
+ return 0;
}

static __cpuidle void tdg_halt(void)
diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c
index 4a9a4d5f36cd..8053b43298ff 100644
--- a/arch/x86/mm/mem_encrypt_common.c
+++ b/arch/x86/mm/mem_encrypt_common.c
@@ -16,5 +16,8 @@ bool force_dma_unencrypted(struct device *dev)
if (sev_active() || sme_active())
return amd_force_dma_unencrypted(dev);

+ if (prot_guest_has(PR_GUEST_MEM_ENCRYPT))
+ return true;
+
return false;
}
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 156cd235659f..fa0f2de20617 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -29,6 +29,7 @@
#include <asm/proto.h>
#include <asm/memtype.h>
#include <asm/set_memory.h>
+#include <asm/tdx.h>

#include "../mm_internal.h"

@@ -1980,13 +1981,16 @@ int set_memory_global(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}

-static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
+static int __set_memory_protect(unsigned long addr, int numpages, bool protect)
{
+ pgprot_t mem_protected_bits, mem_plain_bits;
struct cpa_data cpa;
+ enum tdx_map_type map_type;
int ret;

/* Nothing to do if memory encryption is not active */
- if (!mem_encrypt_active())
+ if (!mem_encrypt_active() &&
+ !prot_guest_has(PR_GUEST_MEM_ENCRYPT_ACTIVE))
return 0;

/* Should not be working on unaligned addresses */
@@ -1996,8 +2000,25 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
memset(&cpa, 0, sizeof(cpa));
cpa.vaddr = &addr;
cpa.numpages = numpages;
- cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
- cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
+
+ if (prot_guest_has(PR_GUEST_SHARED_MAPPING_INIT)) {
+ mem_protected_bits = __pgprot(0);
+ mem_plain_bits = pgprot_pg_shared_mask();
+ } else {
+ mem_protected_bits = __pgprot(_PAGE_ENC);
+ mem_plain_bits = __pgprot(0);
+ }
+
+ if (protect) {
+ cpa.mask_set = mem_protected_bits;
+ cpa.mask_clr = mem_plain_bits;
+ map_type = TDX_MAP_PRIVATE;
+ } else {
+ cpa.mask_set = mem_plain_bits;
+ cpa.mask_clr = mem_protected_bits;
+ map_type = TDX_MAP_SHARED;
+ }
+
cpa.pgd = init_mm.pgd;

/* Must avoid aliasing mappings in the highmem code */
@@ -2006,8 +2027,16 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)

/*
* Before changing the encryption attribute, we need to flush caches.
+ *
+ * For TDX we need to flush caches on private->shared. VMM is
+ * responsible for flushing on shared->private.
*/
- cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
+ if (is_tdx_guest()) {
+ if (map_type == TDX_MAP_SHARED)
+ cpa_flush(&cpa, 1);
+ } else {
+ cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
+ }

ret = __change_page_attr_set_clr(&cpa, 1);

@@ -2020,18 +2049,21 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
*/
cpa_flush(&cpa, 0);

+ if (!ret && prot_guest_has(PR_GUEST_SHARED_MAPPING_INIT))
+ ret = tdx_hcall_gpa_intent(__pa(addr), numpages, map_type);
+
return ret;
}

int set_memory_encrypted(unsigned long addr, int numpages)
{
- return __set_memory_enc_dec(addr, numpages, true);
+ return __set_memory_protect(addr, numpages, true);
}
EXPORT_SYMBOL_GPL(set_memory_encrypted);

int set_memory_decrypted(unsigned long addr, int numpages)
{
- return __set_memory_enc_dec(addr, numpages, false);
+ return __set_memory_protect(addr, numpages, false);
}
EXPORT_SYMBOL_GPL(set_memory_decrypted);

--
2.25.1