[PATCH RFC v11 12/12] dmaengine: qcom: bam_dma: add support for BAM locking
From: Bartosz Golaszewski
Date: Mon Mar 02 2026 - 11:20:48 EST
Add support for BAM pipe locking. To that end: when starting the DMA on
an RX channel - wrap the already issued descriptors with additional
command descriptors performing dummy writes to the base register
supplied by the client via dmaengine_slave_config() (if any) alongside
the lock/unlock HW flags.
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@xxxxxxxxxxxxxxxx>
---
drivers/dma/qcom/bam_dma.c | 100 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 99 insertions(+), 1 deletion(-)
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 83491e7c2f17d8c9d12a1a055baea7e3a0a75a53..b149cbe9613f0bdc8e26cae4f0cc6922997480d5 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -28,11 +28,13 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_bam_dma.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_dma.h>
@@ -60,6 +62,8 @@ struct bam_desc_hw {
#define DESC_FLAG_EOB BIT(13)
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
+#define DESC_FLAG_LOCK BIT(10)
+#define DESC_FLAG_UNLOCK BIT(9)
struct bam_async_desc {
struct virt_dma_desc vd;
@@ -391,6 +395,12 @@ struct bam_chan {
struct list_head desc_list;
struct list_head node;
+
+ /* BAM locking infrastructure */
+ struct scatterlist lock_sg;
+ struct scatterlist unlock_sg;
+ struct bam_cmd_element lock_ce;
+ struct bam_cmd_element unlock_ce;
};
static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
@@ -1012,14 +1022,92 @@ static void bam_apply_new_config(struct bam_chan *bchan,
bchan->reconfigure = 0;
}
+static struct bam_async_desc *
+bam_make_lock_desc(struct bam_chan *bchan, struct scatterlist *sg,
+ struct bam_cmd_element *ce, unsigned int flag)
+{
+ struct dma_chan *chan = &bchan->vc.chan;
+ struct bam_async_desc *async_desc;
+ struct bam_desc_hw *desc;
+ struct virt_dma_desc *vd;
+ struct virt_dma_chan *vc;
+ unsigned int mapped;
+ dma_cookie_t cookie;
+ int ret;
+
+ async_desc = kzalloc_flex(*async_desc, desc, 1, GFP_NOWAIT);
+ if (!async_desc) {
+ dev_err(bchan->bdev->dev, "failed to allocate the BAM lock descriptor\n");
+ return NULL;
+ }
+
+ async_desc->num_desc = 1;
+ async_desc->curr_desc = async_desc->desc;
+ async_desc->dir = DMA_MEM_TO_DEV;
+
+ desc = async_desc->desc;
+
+ bam_prep_ce_le32(ce, bchan->slave.dst_addr, BAM_WRITE_COMMAND, 0);
+ sg_set_buf(sg, ce, sizeof(*ce));
+
+ mapped = dma_map_sg_attrs(chan->slave, sg, 1, DMA_TO_DEVICE, DMA_PREP_CMD);
+ if (!mapped) {
+ kfree(async_desc);
+ return NULL;
+ }
+
+ desc->flags |= cpu_to_le16(DESC_FLAG_CMD | flag);
+ desc->addr = sg_dma_address(sg);
+ desc->size = sizeof(struct bam_cmd_element);
+
+ vc = &bchan->vc;
+ vd = &async_desc->vd;
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = DMA_PREP_CMD;
+ vd->tx.desc_free = vchan_tx_desc_free;
+ vd->tx_result.result = DMA_TRANS_NOERROR;
+ vd->tx_result.residue = 0;
+
+ cookie = dma_cookie_assign(&vd->tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return NULL;
+
+ return async_desc;
+}
+
+static int bam_setup_pipe_lock(struct bam_chan *bchan)
+{
+ struct bam_async_desc *lock_desc, *unlock_desc;
+
+ lock_desc = bam_make_lock_desc(bchan, &bchan->lock_sg,
+ &bchan->lock_ce, DESC_FLAG_LOCK);
+ if (!lock_desc)
+ return -ENOMEM;
+
+ unlock_desc = bam_make_lock_desc(bchan, &bchan->unlock_sg,
+ &bchan->unlock_ce, DESC_FLAG_UNLOCK);
+ if (!unlock_desc) {
+ kfree(lock_desc);
+ return -ENOMEM;
+ }
+
+ list_add(&lock_desc->vd.node, &bchan->vc.desc_issued);
+ list_add_tail(&unlock_desc->vd.node, &bchan->vc.desc_issued);
+
+ return 0;
+}
+
/**
* bam_start_dma - start next transaction
* @bchan: bam dma channel
*/
static void bam_start_dma(struct bam_chan *bchan)
{
- struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
+ struct virt_dma_desc *vd;
struct bam_device *bdev = bchan->bdev;
+ const struct bam_device_data *bdata = bdev->dev_data;
struct bam_async_desc *async_desc = NULL;
struct bam_desc_hw *desc;
struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
@@ -1030,6 +1118,16 @@ static void bam_start_dma(struct bam_chan *bchan)
lockdep_assert_held(&bchan->vc.lock);
+ if (bdata->pipe_lock_supported && bchan->slave.dst_addr &&
+ bchan->slave.direction == DMA_MEM_TO_DEV) {
+ ret = bam_setup_pipe_lock(bchan);
+ if (ret) {
+ dev_err(bdev->dev, "Failed to set up the BAM lock\n");
+ return;
+ }
+ }
+
+ vd = vchan_next_desc(&bchan->vc);
if (!vd)
return;
--
2.47.3