[RFC PATCH v3 1/5] swiotlb: Return state of memory from swiotlb_alloc()
From: Mostafa Saleh
Date: Wed Apr 08 2026 - 15:48:02 EST
Make swiotlb_alloc() return the state of the allocated memory, at
the moment all the pools are decrypted but that would change soon.
In the next patches dma-direct will use the returned state to
determine whether to decrypt the memory and use the proper memory
decryption/encryption related functions.
Also, add swiotlb_is_decrypted(), that will be used before calling
swiotlb_free() to check whether the memory needs to be encrypted
by the caller.
Signed-off-by: Mostafa Saleh <smostafa@xxxxxxxxxx>
---
include/linux/swiotlb.h | 25 +++++++++++++++++++++++--
kernel/dma/direct.c | 2 +-
kernel/dma/swiotlb.c | 23 ++++++++++++++++++++++-
3 files changed, 46 insertions(+), 4 deletions(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 3dae0f592063..24be65494ce8 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -63,6 +63,7 @@ extern void __init swiotlb_update_mem_attributes(void);
* @area_nslabs: Number of slots in each area.
* @areas: Array of memory area descriptors.
* @slots: Array of slot descriptors.
+ * @decrypted: Whether the pool was decrypted or left in default state.
* @node: Member of the IO TLB memory pool list.
* @rcu: RCU head for swiotlb_dyn_free().
* @transient: %true if transient memory pool.
@@ -77,6 +78,7 @@ struct io_tlb_pool {
unsigned int area_nslabs;
struct io_tlb_area *areas;
struct io_tlb_slot *slots;
+ bool decrypted;
#ifdef CONFIG_SWIOTLB_DYNAMIC
struct list_head node;
struct rcu_head rcu;
@@ -281,16 +283,31 @@ static inline void swiotlb_sync_single_for_cpu(struct device *dev,
extern void swiotlb_print_info(void);
+/*
+ * This contains the state of pages returned by swiotlb_alloc()
+ * A page can either be:
+ * SWIOTLB_PAGE_DEFAULT: The page was not decrypted by the pool.
+ * SWIOTLB_PAGE_DECRYPTED: The page was decrypted by the pool.
+ */
+enum swiotlb_page_state {
+ SWIOTLB_PAGE_DEFAULT,
+ SWIOTLB_PAGE_DECRYPTED,
+};
+
#ifdef CONFIG_DMA_RESTRICTED_POOL
-struct page *swiotlb_alloc(struct device *dev, size_t size);
+struct page *swiotlb_alloc(struct device *dev, size_t size,
+ enum swiotlb_page_state *state);
bool swiotlb_free(struct device *dev, struct page *page, size_t size);
+bool swiotlb_is_decrypted(struct device *dev, struct page *page, size_t size);
+
static inline bool is_swiotlb_for_alloc(struct device *dev)
{
return dev->dma_io_tlb_mem->for_alloc;
}
#else
-static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
+static inline struct page *swiotlb_alloc(struct device *dev, size_t size,
+ enum swiotlb_page_state *state)
{
return NULL;
}
@@ -299,6 +316,10 @@ static inline bool swiotlb_free(struct device *dev, struct page *page,
{
return false;
}
+static inline bool swiotlb_is_decrypted(struct device *dev, struct page *page, size_t size)
+{
+ return false;
+}
static inline bool is_swiotlb_for_alloc(struct device *dev)
{
return false;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8f43a930716d..6efb5973fbd3 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -106,7 +106,7 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page,
static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
{
- struct page *page = swiotlb_alloc(dev, size);
+ struct page *page = swiotlb_alloc(dev, size, NULL);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
swiotlb_free(dev, page, size);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 9fd73700ddcf..8468ee5d3ff2 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1763,7 +1763,8 @@ static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
#ifdef CONFIG_DMA_RESTRICTED_POOL
-struct page *swiotlb_alloc(struct device *dev, size_t size)
+struct page *swiotlb_alloc(struct device *dev, size_t size,
+ enum swiotlb_page_state *state)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
@@ -1787,6 +1788,8 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
return NULL;
}
+ if (state)
+ *state = pool->decrypted ? SWIOTLB_PAGE_DECRYPTED : SWIOTLB_PAGE_DEFAULT;
return pfn_to_page(PFN_DOWN(tlb_addr));
}
@@ -1804,6 +1807,18 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size)
return true;
}
+bool swiotlb_is_decrypted(struct device *dev, struct page *page, size_t size)
+{
+ phys_addr_t tlb_addr = page_to_phys(page);
+ struct io_tlb_pool *pool;
+
+ pool = swiotlb_find_pool(dev, tlb_addr);
+ if (!pool)
+ return false;
+
+ return pool->decrypted;
+}
+
static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
struct device *dev)
{
@@ -1844,6 +1859,12 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
return -ENOMEM;
}
+ /*
+ * At the moment all restricted dma pools are always decrypted,
+ * although that should change soon with CCA solutions introducing
+ * device passthrough.
+ */
+ pool->decrypted = true;
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
rmem->size >> PAGE_SHIFT);
swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
--
2.53.0.1213.gd9a14994de-goog