[RFC PATCH 7/7] dma-direct: set decrypted flag for remapped DMA allocations
From: Aneesh Kumar K.V (Arm)
Date: Fri Apr 17 2026 - 05:04:27 EST
Devices that are DMA non-coherent and require a remap were skipping
dma_set_decrypted(), leaving DMA buffers encrypted even when the device
requires unencrypted access. Move the call after the if (remap) branch
so that both the direct and remapped allocation paths correctly mark the
allocation as decrypted (or fail cleanly) before use.
Architectures such as arm64 cannot mark vmap addresses as decrypted, and
highmem pages necessarily require a vmap remap. As a result, such
allocations cannot be safely used for unencrypted DMA. Therefore, when
an unencrypted DMA buffer is requested, avoid allocating high PFNs from
__dma_direct_alloc_pages().
Other architectures (e.g. x86) do not have this limitation. However,
rather than making this architecture-specific, apply the restriction
only when the device requires unencrypted DMA access, for simplicity.
Fixes: f3c962226dbe ("dma-direct: clean up the remapping checks in dma_direct_alloc")
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@xxxxxxxxxx>
---
kernel/dma/direct.c | 30 +++++++++++++++++++++++++++---
1 file changed, 27 insertions(+), 3 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 1d2c27bbf3de..bb2a32896a9e 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -204,6 +204,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
{
bool remap = false, set_uncached = false;
bool mark_mem_decrypt = !!(attrs & DMA_ATTR_CC_DECRYPTED);
+ bool allow_highmem = true;
struct page *page;
void *ret;
@@ -212,6 +213,15 @@ void *dma_direct_alloc(struct device *dev, size_t size,
mark_mem_decrypt = true;
}
+ if (attrs & DMA_ATTR_CC_DECRYPTED)
+ /*
+ * Unencrypted/shared DMA requires a linear-mapped buffer
+ * address to look up the PFN and set architecture-required PFN
+ * attributes. This is not possible with HighMem. Avoid HighMem
+ * allocation.
+ */
+ allow_highmem = false;
+
size = PAGE_ALIGN(size);
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
@@ -270,7 +280,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}
/* we always manually zero the memory once we are done */
- page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
+ page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, allow_highmem);
if (!page)
return NULL;
@@ -298,7 +308,13 @@ void *dma_direct_alloc(struct device *dev, size_t size,
goto out_free_pages;
} else {
ret = page_address(page);
- if (mark_mem_decrypt && dma_set_decrypted(dev, ret, size))
+ }
+
+ if (mark_mem_decrypt) {
+ void *lm_addr;
+
+ lm_addr = page_address(page);
+ if (set_memory_decrypted((unsigned long)lm_addr, PFN_UP(size)))
goto out_leak_pages;
}
@@ -374,8 +390,16 @@ void dma_direct_free(struct device *dev, size_t size,
} else {
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
arch_dma_clear_uncached(cpu_addr, size);
- if (mark_mem_encrypted && dma_set_encrypted(dev, cpu_addr, size))
+ }
+
+ if (mark_mem_encrypted) {
+ void *lm_addr;
+
+ lm_addr = phys_to_virt(dma_to_phys(dev, dma_addr));
+ if (set_memory_encrypted((unsigned long)lm_addr, PFN_UP(size))) {
+ pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
return;
+ }
}
__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
--
2.43.0