[RFC v2 31/32] x86/kvm: Use bounce buffers for TD guest

From: Kuppuswamy Sathyanarayanan
Date: Mon Apr 26 2021 - 14:05:01 EST


From: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx>

TDX doesn't allow to perform DMA access to guest private memory.
In order for DMA to work properly in TD guest, user SWIOTLB bounce
buffers.

Move AMD SEV initialization into common code and adopt for TDX.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Reviewed-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@xxxxxxxxxxxxxxx>
---
arch/x86/include/asm/io.h | 3 +-
arch/x86/kernel/pci-swiotlb.c | 2 +-
arch/x86/kernel/tdx.c | 3 ++
arch/x86/mm/mem_encrypt.c | 45 ------------------------------
arch/x86/mm/mem_encrypt_common.c | 47 ++++++++++++++++++++++++++++++++
5 files changed, 53 insertions(+), 47 deletions(-)

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 30a3b30395ad..658d9c2c2a9a 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -257,10 +257,11 @@ static inline void slow_down_io(void)

#endif

+extern struct static_key_false sev_enable_key;
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
#include <linux/jump_label.h>

-extern struct static_key_false sev_enable_key;
static inline bool sev_key_active(void)
{
return static_branch_unlikely(&sev_enable_key);
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index c2cfa5e7c152..020e13749758 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -49,7 +49,7 @@ int __init pci_swiotlb_detect_4gb(void)
* buffers are allocated and used for devices that do not support
* the addressing range required for the encryption mask.
*/
- if (sme_active())
+ if (sme_active() || is_tdx_guest())
swiotlb = 1;

return swiotlb;
diff --git a/arch/x86/kernel/tdx.c b/arch/x86/kernel/tdx.c
index 44dd12c693d0..6b07e7b4a69c 100644
--- a/arch/x86/kernel/tdx.c
+++ b/arch/x86/kernel/tdx.c
@@ -8,6 +8,7 @@
#include <asm/vmx.h>
#include <asm/insn.h>
#include <linux/sched/signal.h> /* force_sig_fault() */
+#include <linux/swiotlb.h>

#include <linux/cpu.h>

@@ -470,6 +471,8 @@ void __init tdx_early_init(void)

legacy_pic = &null_legacy_pic;

+ swiotlb_force = SWIOTLB_FORCE;
+
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "tdg:cpu_hotplug",
NULL, tdg_cpu_offline_prepare);

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 6f713c6a32b2..761a98904aa2 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -409,48 +409,3 @@ void __init mem_encrypt_free_decrypted_mem(void)

free_init_pages("unused decrypted", vaddr, vaddr_end);
}
-
-static void print_mem_encrypt_feature_info(void)
-{
- pr_info("AMD Memory Encryption Features active:");
-
- /* Secure Memory Encryption */
- if (sme_active()) {
- /*
- * SME is mutually exclusive with any of the SEV
- * features below.
- */
- pr_cont(" SME\n");
- return;
- }
-
- /* Secure Encrypted Virtualization */
- if (sev_active())
- pr_cont(" SEV");
-
- /* Encrypted Register State */
- if (sev_es_active())
- pr_cont(" SEV-ES");
-
- pr_cont("\n");
-}
-
-/* Architecture __weak replacement functions */
-void __init mem_encrypt_init(void)
-{
- if (!sme_me_mask)
- return;
-
- /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
- swiotlb_update_mem_attributes();
-
- /*
- * With SEV, we need to unroll the rep string I/O instructions,
- * but SEV-ES supports them through the #VC handler.
- */
- if (sev_active() && !sev_es_active())
- static_branch_enable(&sev_enable_key);
-
- print_mem_encrypt_feature_info();
-}
-
diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c
index b6d93b0c5dcf..625c15fa92f9 100644
--- a/arch/x86/mm/mem_encrypt_common.c
+++ b/arch/x86/mm/mem_encrypt_common.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/mem_encrypt.h>
#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>

/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
@@ -36,3 +37,49 @@ bool force_dma_unencrypted(struct device *dev)

return false;
}
+
+static void print_amd_mem_encrypt_feature_info(void)
+{
+ pr_info("AMD Memory Encryption Features active:");
+
+ /* Secure Memory Encryption */
+ if (sme_active()) {
+ /*
+ * SME is mutually exclusive with any of the SEV
+ * features below.
+ */
+ pr_cont(" SME\n");
+ return;
+ }
+
+ /* Secure Encrypted Virtualization */
+ if (sev_active())
+ pr_cont(" SEV");
+
+ /* Encrypted Register State */
+ if (sev_es_active())
+ pr_cont(" SEV-ES");
+
+ pr_cont("\n");
+}
+
+/* Architecture __weak replacement functions */
+void __init mem_encrypt_init(void)
+{
+ if (!sme_me_mask && !is_tdx_guest())
+ return;
+
+ /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
+ swiotlb_update_mem_attributes();
+
+ /*
+ * With SEV, we need to unroll the rep string I/O instructions,
+ * but SEV-ES supports them through the #VC handler.
+ */
+ if (sev_active() && !sev_es_active())
+ static_branch_enable(&sev_enable_key);
+
+ /* sme_me_mask !=0 means SME or SEV */
+ if (sme_me_mask)
+ print_amd_mem_encrypt_feature_info();
+}
--
2.25.1