Re: [PATCH kernel 4/9] dma/swiotlb: Stop forcing SWIOTLB for TDISP devices

From: Aneesh Kumar K . V

Date: Mon Mar 02 2026 - 02:55:38 EST


Alexey Kardashevskiy <aik@xxxxxxx> writes:

> SWIOTLB is enforced when encrypted guest memory is detected
> in pci_swiotlb_detect() which is required for legacy devices.
>
> Skip SWIOTLB for TDISP devices.
>
> Signed-off-by: Alexey Kardashevskiy <aik@xxxxxxx>
> ---
> include/linux/swiotlb.h | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index 3dae0f592063..119c25d639a7 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -173,6 +173,15 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
> {
> struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
>
> + /*
> + * CC_ATTR_GUEST_MEM_ENCRYPT enforces SWIOTLB_FORCE in
> + * swiotlb_init_remap() to allow legacy devices access arbitrary
> + * VM encrypted memory.
> + * Skip it for TDISP devices capable of DMA-ing the encrypted memory.
> + */
> + if (device_cc_accepted(dev))
> + return false;
> +
> return mem && mem->force_bounce;
> }
>


I’m wondering whether we need more than that. Perhaps we could start
with a simpler assumption: a TDISP-capable device will never require
SWIOTLB bouncing. That would significantly simplify the DMA allocation
path for T=1.

Without this assumption, we might need to implement a private
io_tlb_mem.

We should also avoid supporting TDISP mode on devices that require
things like restricted-memory SWIOTLB pool.

Something like:

modified arch/arm64/mm/mem_encrypt.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/mem_encrypt.h>
+#include <linux/device.h>

static const struct arm64_mem_crypt_ops *crypt_ops;

@@ -53,3 +54,12 @@ int set_memory_decrypted(unsigned long addr, int numpages)
return crypt_ops->decrypt(addr, numpages);
}
EXPORT_SYMBOL_GPL(set_memory_decrypted);
+
+bool force_dma_unencrypted(struct device *dev)
+{
+ if (device_cc_accepted(dev))
+ return false;
+
+ return is_realm_world();
+}
+EXPORT_SYMBOL_GPL(force_dma_unencrypted);
modified include/linux/swiotlb.h
@@ -173,6 +173,11 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;

+ if (device_cc_accepted(dev)) {
+ dev_warn_once(dev, "(TIO) Disable SWIOTLB");
+ return false;
+ }
+
return mem && mem->force_bounce;
}

@@ -287,6 +292,9 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size);

static inline bool is_swiotlb_for_alloc(struct device *dev)
{
+ if (device_cc_accepted(dev))
+ return false;
+
return dev->dma_io_tlb_mem->for_alloc;
}
#else
modified kernel/dma/direct.c
@@ -159,6 +159,14 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
*/
static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
{
+ /*
+ * Atomic pools are marked decrypted and are used if we require require
+ * updation of pfn mem encryption attributes or for DMA non-coherent
+ * device allocation. Both is not true for trusted device.
+ */
+ if (device_cc_accepted(dev))
+ return false;
+
return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
}

modified kernel/dma/swiotlb.c
@@ -1643,6 +1643,9 @@ bool is_swiotlb_active(struct device *dev)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;

+ if (device_cc_accepted(dev))
+ return false;
+
return mem && mem->nslabs;
}