Re: [rfc v2 4/6] dma-direct: atomic allocations must come from atomic coherent pools
From: David Rientjes
Date: Tue Apr 14 2020 - 15:31:15 EST
On Tue, 14 Apr 2020, Christoph Hellwig wrote:
> > + /*
> > + * Unencrypted memory must come directly from DMA atomic pools if
> > + * blocking is not allowed.
> > + */
> > + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
> > + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) {
> > + ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
> > + if (!ret)
> > + return NULL;
> > + goto done;
> > + }
> > +
> > if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> > dma_alloc_need_uncached(dev, attrs) &&
> > !gfpflags_allow_blocking(gfp)) {
>
> Can we keep a single conditional for the pool allocations? Maybe
> add a new dma_alloc_from_pool helper ala:
>
> static inline bool dma_alloc_from_pool(struct device *dev, gfp_t gfp)
> {
> if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
> return false;
> if (gfpflags_allow_blocking(gfp))
> return false;
> if (force_dma_unencrypted(dev))
> return true;
> if (dma_alloc_need_uncached(dev))
> return true;
> }
Looks good, fixed. I renamed it to dma_should_alloc_from_pool() to avoid
confusing it with the actual allocation function and added a
dma_should_free_from_pool() as well.
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
+/*
+ * Decrypting memory is allowed to block, so if this device requires
+ * unencrypted memory it must come from atomic pools.
+ */
+static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
+ unsigned long attrs)
+{
+ if (!IS_ENABLED(CONFIG_DMA_COHERENTPOOL))
+ return false;
+ if (gfpflags_allow_blocking(gfp))
+ return false;
+ if (force_dma_unencrypted(dev))
+ return true;
+ if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
+ return false;
+ if (dma_alloc_need_uncached(dev, attrs))
+ return true;
+ return false;
+}
+
+static inline bool dma_should_free_from_pool(struct device *dev,
+ unsigned long attrs)
+{
+ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
+ return true;
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev))
+ return false;
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
+ return true;
+ return false;
+}
+
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs)
{
@@ -124,9 +157,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
- if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_alloc_need_uncached(dev, attrs) &&
- !gfpflags_allow_blocking(gfp)) {
+ if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
if (!ret)
return NULL;
@@ -202,6 +233,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
+ /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
+ if (dma_should_free_from_pool(dev, attrs) &&
+ dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
+ return;
+
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
@@ -209,10 +245,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
return;
}
- if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
- return;
-
if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);