[RFC PATCH 2/7] dma-direct: use DMA_ATTR_CC_DECRYPTED in alloc/free paths

From: Aneesh Kumar K.V (Arm)

Date: Fri Apr 17 2026 - 05:05:02 EST


Propagate force_dma_unencrypted() into DMA_ATTR_CC_DECRYPTED in the
dma-direct allocation path and use the attribute to drive the related
decisions.

This updates dma_direct_alloc(), dma_direct_free(), and
dma_direct_alloc_pages() to fold the forced unencrypted case into attrs.

Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@xxxxxxxxxx>
---
kernel/dma/direct.c | 34 ++++++++++++++++++++++++++--------
1 file changed, 26 insertions(+), 8 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index c2a43e4ef902..3932033f4d8c 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -201,16 +201,21 @@ void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
bool remap = false, set_uncached = false;
- bool mark_mem_decrypt = true;
+ bool mark_mem_decrypt = !!(attrs & DMA_ATTR_CC_DECRYPTED);
struct page *page;
void *ret;

+ if (force_dma_unencrypted(dev)) {
+ attrs |= DMA_ATTR_CC_DECRYPTED;
+ mark_mem_decrypt = true;
+ }
+
size = PAGE_ALIGN(size);
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;

- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
- !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
+ if (((attrs & (DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_CC_DECRYPTED)) ==
+ DMA_ATTR_NO_KERNEL_MAPPING) && !is_swiotlb_for_alloc(dev))
return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);

if (!dev_is_dma_coherent(dev)) {
@@ -244,7 +249,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
* Remapping or decrypting memory may block, allocate the memory from
* the atomic pools instead if we aren't allowed block.
*/
- if ((remap || force_dma_unencrypted(dev)) &&
+ if ((remap || (attrs & DMA_ATTR_CC_DECRYPTED)) &&
dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);

@@ -318,11 +323,20 @@ void *dma_direct_alloc(struct device *dev, size_t size,
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
- bool mark_mem_encrypted = true;
+ bool mark_mem_encrypted = !!(attrs & DMA_ATTR_CC_DECRYPTED);
unsigned int page_order = get_order(size);

- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
- !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
+ /*
+ * if the device had requested for an unencrypted buffer,
+ * convert it to encrypted on free
+ */
+ if (force_dma_unencrypted(dev)) {
+ attrs |= DMA_ATTR_CC_DECRYPTED;
+ mark_mem_encrypted = true;
+ }
+
+ if (((attrs & (DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_CC_DECRYPTED)) ==
+ DMA_ATTR_NO_KERNEL_MAPPING) && !is_swiotlb_for_alloc(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
dma_free_contiguous(dev, cpu_addr, size);
return;
@@ -365,10 +379,14 @@ void dma_direct_free(struct device *dev, size_t size,
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
+ unsigned long attrs = 0;
struct page *page;
void *ret;

- if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
+ if (force_dma_unencrypted(dev))
+ attrs |= DMA_ATTR_CC_DECRYPTED;
+
+ if ((attrs & DMA_ATTR_CC_DECRYPTED) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);

if (is_swiotlb_for_alloc(dev)) {
--
2.43.0