Re: [PATCH v2 3/4] dmaengine: dma-axi-dmac: fix use-after-free on unbind
From: Frank Li
Date: Tue Mar 31 2026 - 10:27:09 EST
On Tue, Mar 31, 2026 at 09:46:35AM +0100, Nuno Sá wrote:
> On Mon, Mar 30, 2026 at 11:22:10AM -0400, Frank Li wrote:
> > On Fri, Mar 27, 2026 at 04:58:40PM +0000, Nuno Sá wrote:
> > > The DMA device lifetime can extend beyond the platform driver unbind if
> > > DMA channels are still referenced by client drivers. This leads to
> > > use-after-free when the devm-managed memory is freed on unbind but the
> > > DMA device callbacks still access it.
> > >
> > > Fix this by:
> > > - Allocating axi_dmac with kzalloc_obj() instead of devm_kzalloc() so
> > > its lifetime is not tied to the platform device.
> > > - Implementing the device_release callback that so that we can free
> > > the object when reference count gets to 0 (no users).
> > > - Adding an 'unbound' flag protected by the vchan lock that is set
> > > during driver removal, preventing MMIO accesses after the device has been
> > > unbound.
> > >
> > > While at it, explicitly include spinlock.h given it was missing.
> > >
> > > Signed-off-by: Nuno Sá <nuno.sa@xxxxxxxxxx>
> > > ---
> >
> > Not sure if it similar with
> > https://lore.kernel.org/dmaengine/20250903-v6-16-topic-sdma-v1-9-ac7bab629e8b@xxxxxxxxxxxxxx/
> >
> > It looks like miss device link between comsumer and provider.
>
> Well, it surely it's related. I mean, if we ensure the consumers are
> gone through devlinks and nothing is left behind, then this patch is basically unneeded.
>
> But, FWIW, my 2cents would also go into questioning if AUTOREMOVE is
> really want we want in every situation? Might be to harsh to assume that
> a DMA channel consumer is useless even if DMA is gone. Anyways, is there
> a v2 already? I would be interested in following this one...
This patch may be missed by vnod, I have asked vnod to check again. The
open question link to channel device or dma enginee device. I prefer link
to channel devices, so it support more advance's runtime pm management.
Frank
>
> - Nuno Sá
>
> >
> > Frank
> >
> > > drivers/dma/dma-axi-dmac.c | 70 +++++++++++++++++++++++++++++++++++++++-------
> > > 1 file changed, 60 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
> > > index 127c3cf80a0e..70d3ad7e7d37 100644
> > > --- a/drivers/dma/dma-axi-dmac.c
> > > +++ b/drivers/dma/dma-axi-dmac.c
> > > @@ -24,6 +24,7 @@
> > > #include <linux/platform_device.h>
> > > #include <linux/regmap.h>
> > > #include <linux/slab.h>
> > > +#include <linux/spinlock.h>
> > >
> > > #include <dt-bindings/dma/axi-dmac.h>
> > >
> > > @@ -174,6 +175,8 @@ struct axi_dmac {
> > >
> > > struct dma_device dma_dev;
> > > struct axi_dmac_chan chan;
> > > +
> > > + bool unbound;
> > > };
> > >
> > > static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
> > > @@ -182,6 +185,11 @@ static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
> > > dma_dev);
> > > }
> > >
> > > +static struct axi_dmac *dev_to_axi_dmac(struct dma_device *dev)
> > > +{
> > > + return container_of(dev, struct axi_dmac, dma_dev);
> > > +}
> > > +
> > > static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
> > > {
> > > return container_of(c, struct axi_dmac_chan, vchan.chan);
> > > @@ -614,7 +622,12 @@ static int axi_dmac_terminate_all(struct dma_chan *c)
> > > LIST_HEAD(head);
> > >
> > > spin_lock_irqsave(&chan->vchan.lock, flags);
> > > - axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
> > > + /*
> > > + * Only allow the MMIO access if the device is live. Otherwise still
> > > + * go for freeing the descriptors.
> > > + */
> > > + if (!dmac->unbound)
> > > + axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
> > > chan->next_desc = NULL;
> > > vchan_get_all_descriptors(&chan->vchan, &head);
> > > list_splice_tail_init(&chan->active_descs, &head);
> > > @@ -642,9 +655,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
> > > if (chan->hw_sg)
> > > ctrl |= AXI_DMAC_CTRL_ENABLE_SG;
> > >
> > > - axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
> > > -
> > > spin_lock_irqsave(&chan->vchan.lock, flags);
> > > + if (dmac->unbound) {
> > > + spin_unlock_irqrestore(&chan->vchan.lock, flags);
> > > + return;
> > > + }
> > > + axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
> > > if (vchan_issue_pending(&chan->vchan))
> > > axi_dmac_start_transfer(chan);
> > > spin_unlock_irqrestore(&chan->vchan.lock, flags);
> > > @@ -1184,6 +1200,14 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
> > > return 0;
> > > }
> > >
> > > +static void axi_dmac_release(struct dma_device *dma_dev)
> > > +{
> > > + struct axi_dmac *dmac = dev_to_axi_dmac(dma_dev);
> > > +
> > > + put_device(dma_dev->dev);
> > > + kfree(dmac);
> > > +}
> > > +
> > > static void axi_dmac_tasklet_kill(void *task)
> > > {
> > > tasklet_kill(task);
> > > @@ -1194,16 +1218,27 @@ static void axi_dmac_free_dma_controller(void *of_node)
> > > of_dma_controller_free(of_node);
> > > }
> > >
> > > +static void axi_dmac_disable(void *__dmac)
> > > +{
> > > + struct axi_dmac *dmac = __dmac;
> > > + unsigned long flags;
> > > +
> > > + spin_lock_irqsave(&dmac->chan.vchan.lock, flags);
> > > + dmac->unbound = true;
> > > + spin_unlock_irqrestore(&dmac->chan.vchan.lock, flags);
> > > + axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
> > > +}
> > > +
> > > static int axi_dmac_probe(struct platform_device *pdev)
> > > {
> > > struct dma_device *dma_dev;
> > > - struct axi_dmac *dmac;
> > > + struct axi_dmac *__dmac;
> > > struct regmap *regmap;
> > > unsigned int version;
> > > u32 irq_mask = 0;
> > > int ret;
> > >
> > > - dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
> > > + struct axi_dmac *dmac __free(kfree) = kzalloc_obj(struct axi_dmac);
> > > if (!dmac)
> > > return -ENOMEM;
> > >
> > > @@ -1251,6 +1286,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
> > > dma_dev->dev = &pdev->dev;
> > > dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
> > > dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
> > > + dma_dev->device_release = axi_dmac_release;
> > > dma_dev->directions = BIT(dmac->chan.direction);
> > > dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
> > > dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */
> > > @@ -1285,12 +1321,21 @@ static int axi_dmac_probe(struct platform_device *pdev)
> > > if (ret)
> > > return ret;
> > >
> > > + /*
> > > + * From this point on, our dmac object has it's lifetime bounded with
> > > + * dma_dev. Will be freed when dma_dev refcount goes to 0. That means,
> > > + * no more automatic kfree(). Also note that dmac is now NULL so we
> > > + * need __dmac.
> > > + */
> > > + __dmac = no_free_ptr(dmac);
> > > + get_device(&pdev->dev);
> > > +
> > > /*
> > > * Put the action in here so it get's done before unregistering the DMA
> > > * device.
> > > */
> > > ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill,
> > > - &dmac->chan.vchan.task);
> > > + &__dmac->chan.vchan.task);
> > > if (ret)
> > > return ret;
> > >
> > > @@ -1304,13 +1349,18 @@ static int axi_dmac_probe(struct platform_device *pdev)
> > > if (ret)
> > > return ret;
> > >
> > > - ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler,
> > > - IRQF_SHARED, dev_name(&pdev->dev), dmac);
> > > + /* So that we can mark the device as unbound and disable it */
> > > + ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_disable, __dmac);
> > > if (ret)
> > > return ret;
> > >
> > > - regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
> > > - &axi_dmac_regmap_config);
> > > + ret = devm_request_irq(&pdev->dev, __dmac->irq, axi_dmac_interrupt_handler,
> > > + IRQF_SHARED, dev_name(&pdev->dev), __dmac);
> > > + if (ret)
> > > + return ret;
> > > +
> > > + regmap = devm_regmap_init_mmio(&pdev->dev, __dmac->base,
> > > + &axi_dmac_regmap_config);
> > >
> > > return PTR_ERR_OR_ZERO(regmap);
> > > }
> > >
> > > --
> > > 2.53.0
> > >