[PATCH v3 4/4] dmaengine: dma-axi-dmac: Fig BUG() on vunmap()
From: Nuno Sá via B4 Relay
Date: Wed Apr 08 2026 - 08:43:39 EST
From: Nuno Sá <nuno.sa@xxxxxxxxxx>
For architectures like Microblaze or arm64 (where this IP is used),
DMA_DIRECT_REMAP is set which means that dma_alloc_coherent() might
remap (and hence vmalloc()) some memory. This became visible in a design
where dma_direct_use_pool() is not possible.
With the above, when calling dma_free_coherent(), vunmap() would be
called from softirq context and thus leading to a BUG().
To fix it, use a dma pool that is allocated in
.device_alloc_chan_resources() and allocate blocks from it. The key
point is that now dma_pool_free() is used in axi_dmac_free_desc() to
free the blocks and that just frees the blocks from the pool in the
sense they can be used again. In other words, no actual call to
dma_free_coherent() happens. That only happens when destroying the pool
in axi_dmac_free_chan_resources() which does not happen in any interrupt
context.
Signed-off-by: Nuno Sá <nuno.sa@xxxxxxxxxx>
---
drivers/dma/dma-axi-dmac.c | 66 ++++++++++++++++++++++++++++------------------
1 file changed, 40 insertions(+), 26 deletions(-)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 41898d594be7..d47ff27e1408 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -147,6 +148,7 @@ struct axi_dmac_chan {
struct virt_dma_chan vchan;
struct axi_dmac_desc *next_desc;
+ void *pool;
struct list_head active_descs;
enum dma_transfer_direction direction;
@@ -648,11 +650,17 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
+static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
+{
+ for (unsigned int i = 0; i < desc->num_sgs; i++)
+ dma_pool_free(desc->chan->pool, desc->sg[i].hw, desc->sg[i].hw_phys);
+
+ kfree(desc);
+}
+
static struct axi_dmac_desc *
axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
{
- struct axi_dmac *dmac = chan_to_axi_dmac(chan);
- struct device *dev = dmac->dma_dev.dev;
struct axi_dmac_hw_desc *hws;
struct axi_dmac_desc *desc;
dma_addr_t hw_phys;
@@ -664,22 +672,22 @@ axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
desc->num_sgs = num_sgs;
desc->chan = chan;
- hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)),
- &hw_phys, GFP_ATOMIC);
- if (!hws) {
- kfree(desc);
- return NULL;
- }
-
for (i = 0; i < num_sgs; i++) {
- desc->sg[i].hw = &hws[i];
- desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws);
+ hws = dma_pool_zalloc(chan->pool, GFP_NOWAIT, &hw_phys);
+ if (!hws) {
+ desc->num_sgs = i;
+ axi_dmac_free_desc(desc);
+ return NULL;
+ }
- hws[i].id = AXI_DMAC_SG_UNUSED;
- hws[i].flags = 0;
+ desc->sg[i].hw = hws;
+ desc->sg[i].hw_phys = hw_phys;
+
+ hws->id = AXI_DMAC_SG_UNUSED;
/* Link hardware descriptors */
- hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws);
+ if (i)
+ desc->sg[i - 1].hw->next_sg_addr = hw_phys;
}
/* The last hardware descriptor will trigger an interrupt */
@@ -688,18 +696,6 @@ axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
return desc;
}
-static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
-{
- struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
- struct device *dev = dmac->dma_dev.dev;
- struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
- dma_addr_t hw_phys = desc->sg[0].hw_phys;
-
- dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
- hw, hw_phys);
- kfree(desc);
-}
-
static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
enum dma_transfer_direction direction, dma_addr_t addr,
unsigned int num_periods, unsigned int period_len,
@@ -933,9 +929,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
+static int axi_dmac_alloc_chan_resources(struct dma_chan *c)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct device *dev = c->device->dev;
+
+ chan->pool = dma_pool_create(dev_name(dev), dev,
+ sizeof(struct axi_dmac_hw_desc),
+ __alignof__(struct axi_dmac_hw_desc), 0);
+ if (!chan->pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
static void axi_dmac_free_chan_resources(struct dma_chan *c)
{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+
vchan_free_chan_resources(to_virt_chan(c));
+ dma_pool_destroy(chan->pool);
}
static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
@@ -1238,6 +1251,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
+ dma_dev->device_alloc_chan_resources = axi_dmac_alloc_chan_resources;
dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending;
--
2.53.0