[PATCH] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.

From: Ashish Kalra
Date: Thu Dec 05 2019 - 19:41:17 EST


From: Ashish Kalra <ashish.kalra@xxxxxxx>

For SEV, all DMA to and from guest has to use shared
(un-encrypted) pages. SEV uses SWIOTLB to make this happen
without requiring changes to device drivers. However,
depending on workload being run, the default 64MB of SWIOTLB
might not be enough and SWIOTLB may run out of buffers to
use for DMA, resulting in I/O errors.

Increase the default size of SWIOTLB for SEV guests using
a minimum value of 128MB and a maximum value of 512MB,
determining on amount of provisioned guest memory.

The SWIOTLB default size adjustment is added as an
architecture specific interface/callback to allow
architectures such as those supporting memory encryption
to adjust/expand SWIOTLB size for their use.

Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx>
---
arch/x86/Kconfig | 1 +
arch/x86/mm/mem_encrypt.c | 36 ++++++++++++++++++++++++++++++++++++
include/linux/dma-direct.h | 10 ++++++++++
kernel/dma/Kconfig | 3 +++
kernel/dma/swiotlb.c | 14 ++++++++++++--
5 files changed, 62 insertions(+), 2 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d6e1faa28c58..c48bddf4b5b7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1533,6 +1533,7 @@ config AMD_MEM_ENCRYPT
select DYNAMIC_PHYSICAL_MASK
select ARCH_USE_MEMREMAP_PROT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
+ select ARCH_HAS_ADJUST_SWIOTLB_DEFAULT
---help---
Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9268c12458c8..12e586b37a92 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -376,6 +376,42 @@ bool force_dma_unencrypted(struct device *dev)
return false;
}

+#define TOTAL_MEM_1G 0x40000000U
+#define TOTAL_MEM_4G 0x100000000U
+
+/*
+ * Override for SWIOTLB default size adjustment -
+ * ARCH_HAS_ADJUST_SWIOTLB_DEFAULT
+ */
+unsigned long adjust_swiotlb_default_size(unsigned long default_size)
+{
+ /*
+ * For SEV, all DMA has to occur via shared/unencrypted pages.
+ * SEV uses SWOTLB to make this happen without changing device
+ * drivers. However, depending on the workload being run, the
+ * default 64MB of SWIOTLB may not be enough & SWIOTLB may
+ * run out of buffers for using DMA, resulting in I/O errors.
+ * Increase the default size of SWIOTLB for SEV guests using
+ * a minimum value of 128MB and a maximum value of 512GB,
+ * depending on amount of provisioned guest memory.
+ */
+ if (sev_active()) {
+ unsigned long total_mem = get_num_physpages() << PAGE_SHIFT;
+
+ if (total_mem <= TOTAL_MEM_1G)
+ default_size = default_size * 2;
+ else if (total_mem <= TOTAL_MEM_4G)
+ default_size = default_size * 4;
+ else
+ default_size = default_size * 8;
+
+ pr_info_once("SEV is active, SWIOTLB default size set to %luMB\n",
+ default_size >> 20);
+ }
+
+ return default_size;
+}
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_free_decrypted_mem(void)
{
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index adf993a3bd58..481943e08c94 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -41,6 +41,16 @@ static inline bool force_dma_unencrypted(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */

+#ifdef CONFIG_ARCH_HAS_ADJUST_SWIOTLB_DEFAULT
+unsigned long adjust_swiotlb_default_size(unsigned long default_size);
+#else
+static inline unsigned long adjust_swiotlb_default_size
+ (unsigned long default_size);
+{
+ return default_size;
+}
+#endif /* CONFIG_ARCH_HAS_ADJUST_SWIOTLB_DEFAULT */
+
/*
* If memory encryption is supported, phys_to_dma will set the memory encryption
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 73c5c2b8e824..9fd88f45f48f 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -57,6 +57,9 @@ config ARCH_HAS_DMA_COHERENT_TO_PFN
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool

+config ARCH_HAS_ADJUST_SWIOTLB_DEFAULT
+ bool
+
config DMA_NONCOHERENT_CACHE_SYNC
bool

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 673a2cdb2656..346838edf9e5 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -155,11 +155,21 @@ void swiotlb_set_max_segment(unsigned int val)
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
+ unsigned long default_size = IO_TLB_DEFAULT_SIZE;
unsigned long size;

+ /*
+ * If swiotlb size/amount of slabs are not defined on kernel command
+ * line, then give a chance to architectures to adjust swiotlb
+ * size, this may be required by some architectures such as those
+ * supporting memory encryption.
+ */
+ if (!io_tlb_nslabs)
+ default_size = adjust_swiotlb_default_size(default_size);
+
size = io_tlb_nslabs << IO_TLB_SHIFT;

- return size ? size : (IO_TLB_DEFAULT_SIZE);
+ return size ? size : default_size;
}

void swiotlb_print_info(void)
@@ -245,7 +255,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
void __init
swiotlb_init(int verbose)
{
- size_t default_size = IO_TLB_DEFAULT_SIZE;
+ unsigned long default_size = swiotlb_size_or_default();
unsigned char *vstart;
unsigned long bytes;

--
2.17.1