[PATCH 5.15 377/667] dma-direct: factor out dma_set_{de,en}crypted helpers

From: Greg Kroah-Hartman
Date: Tue Jun 07 2022 - 15:22:12 EST


From: Christoph Hellwig <hch@xxxxxx>

[ Upstream commit 4d0564785bb03841e4b5c5b31aa4ecd1eb0d01bb ]

Factor out helpers the make dealing with memory encryption a little less
cumbersome.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
Reviewed-by: Robin Murphy <robin.murphy@xxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
kernel/dma/direct.c | 56 ++++++++++++++++++++-------------------------
1 file changed, 25 insertions(+), 31 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index a1573ed2ea18..b9513fd68239 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}

+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
+{
+ if (!force_dma_unencrypted(dev))
+ return 0;
+ return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+ if (!force_dma_unencrypted(dev))
+ return 0;
+ return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
{
@@ -175,7 +189,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
{
struct page *page;
void *ret;
- int err;

size = PAGE_ALIGN(size);
if (attrs & DMA_ATTR_NO_WARN)
@@ -228,12 +241,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
- if (force_dma_unencrypted(dev)) {
- err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
- if (err)
- goto out_free_pages;
- }
+ if (dma_set_decrypted(dev, ret, size))
+ goto out_free_pages;
memset(ret, 0, size);
goto done;
}
@@ -250,13 +259,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}

ret = page_address(page);
- if (force_dma_unencrypted(dev)) {
- err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
- if (err)
- goto out_free_pages;
- }
-
+ if (dma_set_decrypted(dev, ret, size))
+ goto out_free_pages;
memset(ret, 0, size);

if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
@@ -271,13 +275,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return ret;

out_encrypt_pages:
- if (force_dma_unencrypted(dev)) {
- err = set_memory_encrypted((unsigned long)page_address(page),
- 1 << get_order(size));
- /* If memory cannot be re-encrypted, it must be leaked */
- if (err)
- return NULL;
- }
+ /* If memory cannot be re-encrypted, it must be leaked */
+ if (dma_set_encrypted(dev, page_address(page), size))
+ return NULL;
out_free_pages:
__dma_direct_free_pages(dev, page, size);
return NULL;
@@ -316,8 +316,7 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;

- if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+ dma_set_encrypted(dev, cpu_addr, 1 << page_order);

if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
vunmap(cpu_addr);
@@ -343,11 +342,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
return NULL;

ret = page_address(page);
- if (force_dma_unencrypted(dev)) {
- if (set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size)))
- goto out_free_pages;
- }
+ if (dma_set_decrypted(dev, ret, size))
+ goto out_free_pages;
memset(ret, 0, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
@@ -368,9 +364,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
dma_free_from_pool(dev, vaddr, size))
return;

- if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+ dma_set_encrypted(dev, vaddr, 1 << page_order);
__dma_direct_free_pages(dev, page, size);
}

--
2.35.1