[RFC PATCH v3 10/11] coco: arm64: dma: Update force_dma_unencrypted for accepted devices

From: Aneesh Kumar K.V (Arm)

Date: Thu Mar 12 2026 - 04:09:38 EST


This change updates the DMA behavior for accepted devices by assuming
they access only private memory. Currently, the DMA API does not provide
a mechanism for allocating shared memory that can be accessed by both
the secure realm and the non-secure host. Accepted devices are therefore
expected to operate entirely within the private memory space.

If future use cases require accepted devices to interact with shared
memory— for example, for host-device communication, we will need to
extend the DMA interface to support such allocation semantics. This
commit lays the groundwork for that by clearly defining the current
assumption and isolating the enforcement to force_dma_unencrypted.

Treat swiotlb and decrypted DMA pools as shared-memory paths and avoid them
for accepted devices by:
- returning false from is_swiotlb_for_alloc() for accepted devices
- returning false from is_swiotlb_active() for accepted devices
- bypassing dma-direct atomic pool usage for accepted devices

This is based on the current assumption that accepted devices operate on private
Realm memory only, and prevents accidental fallback to shared/decrypted DMA
backends.

Cc: Marc Zyngier <maz@xxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Jonathan Cameron <Jonathan.Cameron@xxxxxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxx>
Cc: Dan Williams <dan.j.williams@xxxxxxxxx>
Cc: Alexey Kardashevskiy <aik@xxxxxxx>
Cc: Samuel Ortiz <sameo@xxxxxxxxxxxx>
Cc: Xu Yilun <yilun.xu@xxxxxxxxxxxxxxx>
Cc: Suzuki K Poulose <Suzuki.Poulose@xxxxxxx>
Cc: Steven Price <steven.price@xxxxxxx>
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@xxxxxxxxxx>
---
arch/arm64/include/asm/mem_encrypt.h | 6 +-----
arch/arm64/mm/mem_encrypt.c | 10 ++++++++++
include/linux/swiotlb.h | 3 +++
kernel/dma/direct.c | 8 ++++++++
kernel/dma/swiotlb.c | 3 +++
5 files changed, 25 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
index 5541911eb028..ae0b0cac0900 100644
--- a/arch/arm64/include/asm/mem_encrypt.h
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -15,17 +15,13 @@ int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops);

int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
+bool force_dma_unencrypted(struct device *dev);

#define mem_decrypt_granule_size mem_decrypt_granule_size
size_t mem_decrypt_granule_size(void);

int realm_register_memory_enc_ops(void);

-static inline bool force_dma_unencrypted(struct device *dev)
-{
- return is_realm_world();
-}
-
/*
* For Arm CCA guests, canonical addresses are "encrypted", so no changes
* required for dma_addr_encrypted().
diff --git a/arch/arm64/mm/mem_encrypt.c b/arch/arm64/mm/mem_encrypt.c
index f5d64bc29c20..18dea5d879b8 100644
--- a/arch/arm64/mm/mem_encrypt.c
+++ b/arch/arm64/mm/mem_encrypt.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/mem_encrypt.h>
+#include <linux/device.h>

static const struct arm64_mem_crypt_ops *crypt_ops;

@@ -67,3 +68,12 @@ size_t mem_decrypt_granule_size(void)
return PAGE_SIZE;
}
EXPORT_SYMBOL_GPL(mem_decrypt_granule_size);
+
+bool force_dma_unencrypted(struct device *dev)
+{
+ if (device_cc_accepted(dev))
+ return false;
+
+ return is_realm_world();
+}
+EXPORT_SYMBOL_GPL(force_dma_unencrypted);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 0efb9b8e5dd0..224dcec6a58f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -296,6 +296,9 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size);

static inline bool is_swiotlb_for_alloc(struct device *dev)
{
+ if (device_cc_accepted(dev))
+ return false;
+
return dev->dma_io_tlb_mem->for_alloc;
}
#else
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 34eccd047e9b..a7a9984db342 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -158,6 +158,14 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
*/
static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
{
+ /*
+ * Atomic pools are marked decrypted and are used if we require
+ * updation of pfn mem encryption attributes or for DMA non-coherent
+ * device allocation. Both is not true for trusted device.
+ */
+ if (device_cc_accepted(dev))
+ return false;
+
return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
}

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 309a8b398a7d..339147d1d42f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1634,6 +1634,9 @@ bool is_swiotlb_active(struct device *dev)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;

+ if (device_cc_accepted(dev))
+ return false;
+
return mem && mem->nslabs;
}

--
2.43.0