[RFC PATCH 3/6] dmaengine: ti: k3-udma-glue: Add helpers for batch operations on TX/RX DMA

From: Siddharth Vadapalli

Date: Wed Mar 25 2026 - 09:20:39 EST


To allow pushing and popping a batch of DMA Descriptors on the Transmit
and Receive DMA Channels (Flows), introduce four helpers:
1. k3_udma_glue_push_tx_chn_batch
2. k3_udma_glue_pop_tx_chn_batch
3. k3_udma_glue_push_rx_chn_batch
4. k3_udma_glue_pop_rx_chn_batch

Signed-off-by: Siddharth Vadapalli <s-vadapalli@xxxxxx>
---
drivers/dma/ti/k3-udma-glue.c | 55 ++++++++++++++++++++++++++++++++
include/linux/dma/k3-udma-glue.h | 12 +++++++
2 files changed, 67 insertions(+)

diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index f87d244cc2d6..15835c521977 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -485,6 +485,25 @@ int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
}
EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);

+int k3_udma_glue_push_tx_chn_batch(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t **desc_tx,
+ dma_addr_t *desc_dma, u32 batch_size)
+{
+ u32 ringtxcq_id;
+ int i;
+
+ if (!atomic_add_unless(&tx_chn->free_pkts, -1 * batch_size, 0))
+ return -ENOMEM;
+
+ ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+
+ for (i = 0; i < batch_size; i++)
+ cppi5_desc_set_retpolicy(&desc_tx[i]->hdr, 0, ringtxcq_id);
+
+ return k3_ringacc_ring_push_batch(tx_chn->ringtx, desc_dma, batch_size);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn_batch);
+
int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
dma_addr_t *desc_dma)
{
@@ -498,6 +517,21 @@ int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
}
EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);

+int k3_udma_glue_pop_tx_chn_batch(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma, u32 *batch_size,
+ u32 max_batch)
+{
+ int ret;
+
+ ret = k3_ringacc_ring_pop_batch(tx_chn->ringtxcq, desc_dma, batch_size,
+ max_batch);
+ if (!ret)
+ atomic_add(*batch_size, &tx_chn->free_pkts);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn_batch);
+
int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
int ret;
@@ -1512,6 +1546,16 @@ int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
}
EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);

+int k3_udma_glue_push_rx_chn_batch(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t desc_dma,
+ u32 batch_size)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_push_batch(flow->ringrxfdq, &desc_dma, batch_size);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn_batch);
+
int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, dma_addr_t *desc_dma)
{
@@ -1521,6 +1565,17 @@ int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
}
EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);

+int k3_udma_glue_pop_rx_chn_batch(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma,
+ u32 *batch_size, u32 max_batch)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_pop_batch(flow->ringrx, desc_dma, batch_size,
+ max_batch);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn_batch);
+
int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num)
{
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index 5d43881e6fb7..9fe3f51c230c 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -35,8 +35,14 @@ void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
struct cppi5_host_desc_t *desc_tx,
dma_addr_t desc_dma);
+int k3_udma_glue_push_tx_chn_batch(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t **desc_tx,
+ dma_addr_t *desc_dma, u32 batch_size);
int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
dma_addr_t *desc_dma);
+int k3_udma_glue_pop_tx_chn_batch(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma, u32 *batch_size,
+ u32 max_batch);
int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
@@ -127,8 +133,14 @@ void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, struct cppi5_host_desc_t *desc_tx,
dma_addr_t desc_dma);
+int k3_udma_glue_push_rx_chn_batch(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t desc_dma,
+ u32 batch_size);
int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, dma_addr_t *desc_dma);
+int k3_udma_glue_pop_rx_chn_batch(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma,
+ u32 *batch_size, u32 max_batch);
int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
--
2.51.1