[PATCH v6] swiotlb: Adjust SWIOTBL bounce buffer size for SEV guests.
From: Ashish Kalra
Date: Thu Nov 19 2020 - 16:42:48 EST
From: Ashish Kalra <ashish.kalra@xxxxxxx>
For SEV, all DMA to and from guest has to use shared (un-encrypted) pages.
SEV uses SWIOTLB to make this happen without requiring changes to device
drivers. However, depending on workload being run, the default 64MB of
SWIOTLB might not be enough and SWIOTLB may run out of buffers to use
for DMA, resulting in I/O errors and/or performance degradation for
high I/O workloads.
Increase the default size of SWIOTLB for SEV guests using a minimum
value of 128MB and a maximum value of 512MB, determining on amount
of provisioned guest memory.
Using late_initcall() interface to invoke swiotlb_adjust() does not
work as the size adjustment needs to be done before mem_encrypt_init()
and reserve_crashkernel() which use the allocated SWIOTLB buffer size,
hence calling it explicitly from setup_arch().
The SWIOTLB default size adjustment is added as an architecture specific
interface/callback to allow architectures such as those supporting memory
encryption to adjust/expand SWIOTLB size for their use.
v5 fixed build errors and warnings as
Reported-by: kbuild test robot <lkp@xxxxxxxxx>
Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx>
---
arch/x86/kernel/setup.c | 2 ++
arch/x86/mm/mem_encrypt.c | 32 ++++++++++++++++++++++++++++++++
include/linux/swiotlb.h | 6 ++++++
kernel/dma/swiotlb.c | 24 ++++++++++++++++++++++++
4 files changed, 64 insertions(+)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3511736fbc74..b073d58dd4a3 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1166,6 +1166,8 @@ void __init setup_arch(char **cmdline_p)
if (boot_cpu_has(X86_FEATURE_GBPAGES))
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
+ swiotlb_adjust();
+
/*
* Reserve memory for crash kernel after SRAT is parsed so that it
* won't consume hotpluggable memory.
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 3f248f0d0e07..c79a0d761db5 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -490,6 +490,38 @@ static void print_mem_encrypt_feature_info(void)
}
/* Architecture __weak replacement functions */
+unsigned long __init arch_swiotlb_adjust(unsigned long iotlb_default_size)
+{
+ unsigned long size = 0;
+
+ /*
+ * For SEV, all DMA has to occur via shared/unencrypted pages.
+ * SEV uses SWOTLB to make this happen without changing device
+ * drivers. However, depending on the workload being run, the
+ * default 64MB of SWIOTLB may not be enough & SWIOTLB may
+ * run out of buffers for DMA, resulting in I/O errors and/or
+ * performance degradation especially with high I/O workloads.
+ * Increase the default size of SWIOTLB for SEV guests using
+ * a minimum value of 128MB and a maximum value of 512MB,
+ * depending on amount of provisioned guest memory.
+ */
+ if (sev_active()) {
+ phys_addr_t total_mem = memblock_phys_mem_size();
+
+ if (total_mem <= SZ_1G)
+ size = max(iotlb_default_size, (unsigned long) SZ_128M);
+ else if (total_mem <= SZ_4G)
+ size = max(iotlb_default_size, (unsigned long) SZ_256M);
+ else
+ size = max(iotlb_default_size, (unsigned long) SZ_512M);
+
+ pr_info("SWIOTLB bounce buffer size adjusted to %luMB for SEV platform",
+ size >> 20);
+ }
+
+ return size;
+}
+
void __init mem_encrypt_init(void)
{
if (!sme_me_mask)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 046bb94bd4d6..46a693f76f1e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -33,6 +33,7 @@ extern void swiotlb_init(int verbose);
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
extern unsigned long swiotlb_nr_tbl(void);
unsigned long swiotlb_size_or_default(void);
+unsigned long __init arch_swiotlb_adjust(unsigned long size);
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
extern void __init swiotlb_update_mem_attributes(void);
@@ -80,6 +81,7 @@ void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_active(void);
+void __init swiotlb_adjust(void);
#else
#define swiotlb_force SWIOTLB_NO_FORCE
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@@ -102,6 +104,10 @@ static inline bool is_swiotlb_active(void)
{
return false;
}
+
+static inline void swiotlb_adjust(void)
+{
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index c19379fabd20..3be9a19ea0a5 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -163,6 +163,30 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
+unsigned long __init __weak arch_swiotlb_adjust(unsigned long size)
+{
+ return 0;
+}
+
+void __init swiotlb_adjust(void)
+{
+ unsigned long size;
+
+ /*
+ * If swiotlb parameter has not been specified, give a chance to
+ * architectures such as those supporting memory encryption to
+ * adjust/expand SWIOTLB size for their use.
+ */
+ if (!io_tlb_nslabs) {
+ size = arch_swiotlb_adjust(IO_TLB_DEFAULT_SIZE);
+ if (size) {
+ size = ALIGN(size, 1 << IO_TLB_SHIFT);
+ io_tlb_nslabs = size >> IO_TLB_SHIFT;
+ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+ }
+ }
+}
+
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
--
2.17.1