Re: [PATCH v5 2/3] dmaengine: amlogic: Add general DMA driver for A9
From: Xianwei Zhao
Date: Thu Mar 05 2026 - 00:22:40 EST
On 2026/3/5 12:17, Frank Li wrote:
On Thu, Mar 05, 2026 at 11:35:15AM +0800, Xianwei Zhao wrote:
Hi Frank,Is data length total transfer size?
Thanks for your review.
On 2026/3/4 23:48, Frank Li wrote:
On Wed, Mar 04, 2026 at 06:14:13AM +0000, Xianwei Zhao wrote:This DMA hardware does not have OWNER BIT.
Amlogic A9 SoCs include a general-purpose DMA controller that can be used...
by multiple peripherals, such as I2C PIO and I3C. Each peripheral group
is associated with a dedicated DMA channel in hardware.
Signed-off-by: Xianwei Zhao<xianwei.zhao@xxxxxxxxxxx>
---
drivers/dma/Kconfig | 9 +
drivers/dma/Makefile | 1 +
drivers/dma/amlogic-dma.c | 585 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 595 insertions(+)
+regmap_set_bits()
+static int aml_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+ size_t size = size_mul(sizeof(struct aml_dma_sg_link), DMA_MAX_LINK);
+
+ aml_chan->sg_link = dma_alloc_coherent(aml_dma->dma_device.dev, size,
+ &aml_chan->sg_link_phys, GFP_KERNEL);
+ if (!aml_chan->sg_link)
+ return -ENOMEM;
+
+ /* offset is the same RCH_CFG and WCH_CFG */
+ regmap_update_bits(aml_dma->regmap, aml_chan->reg_offs + RCH_CFG, CFG_CLEAR, CFG_CLEAR);
+ aml_chan->status = DMA_COMPLETE;regmap_clear_bits();
+ dma_async_tx_descriptor_init(&aml_chan->desc, chan);
+ aml_chan->desc.tx_submit = aml_dma_tx_submit;
+ regmap_update_bits(aml_dma->regmap, aml_chan->reg_offs + RCH_CFG, CFG_CLEAR, 0);
+...
+ return 0;
+}
+
+static void aml_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+
+ aml_chan->status = DMA_COMPLETE;
+ dma_free_coherent(aml_dma->dma_device.dev,
+ sizeof(struct aml_dma_sg_link) * DMA_MAX_LINK,
+ aml_chan->sg_link, aml_chan->sg_link_phys);
+}
+
+Support here dma_wmb() to make previous write complete before update
+static struct dma_async_tx_descriptor *aml_dma_prep_slave_sg
+ (struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+ struct aml_dma_sg_link *sg_link;
+ struct scatterlist *sg;
+ int idx = 0;
+ u64 paddr;
+ u32 reg, link_count, avail, chan_id;
+ u32 i;
+
+ if (aml_chan->direction != direction) {
+ dev_err(aml_dma->dma_device.dev, "direction not support\n");
+ return NULL;
+ }
+
+ switch (aml_chan->status) {
+ case DMA_IN_PROGRESS:
+ dev_err(aml_dma->dma_device.dev, "not support multi tx_desciptor\n");
+ return NULL;
+
+ case DMA_COMPLETE:
+ aml_chan->data_len = 0;
+ chan_id = aml_chan->chan_id;
+ reg = (direction == DMA_DEV_TO_MEM) ? WCH_INT_MASK : RCH_INT_MASK;
+ regmap_update_bits(aml_dma->regmap, reg, BIT(chan_id), BIT(chan_id));
+
+ break;
+ default:
+ dev_err(aml_dma->dma_device.dev, "status error\n");
+ return NULL;
+ }
+
+ link_count = sg_nents_for_dma(sgl, sg_len, SG_MAX_LEN);
+
+ if (link_count > DMA_MAX_LINK) {
+ dev_err(aml_dma->dma_device.dev,
+ "maximum number of sg exceeded: %d > %d\n",
+ sg_len, DMA_MAX_LINK);
+ aml_chan->status = DMA_ERROR;
+ return NULL;
+ }
+
+ aml_chan->status = DMA_IN_PROGRESS;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ avail = sg_dma_len(sg);
+ paddr = sg->dma_address;
+ while (avail > SG_MAX_LEN) {
+ sg_link = &aml_chan->sg_link[idx++];
+ /* set dma address and len to sglink*/
+ sg_link->address = paddr;
+ sg_link->ctl = FIELD_PREP(LINK_LEN, SG_MAX_LEN);
+ paddr = paddr + SG_MAX_LEN;
+ avail = avail - SG_MAX_LEN;
+ }
+ sg_link = &aml_chan->sg_link[idx++];
+ /* set dma address and len to sglink*/
+ sg_link->address = paddr;
OWNER BIT.
Where update OWNER bit to tall DMA engine sg_link ready?
DMA working steps:
The first step is to prepare the corresponding link memory.
(This is what the aml_dma_prep_slave_sg work involves.)
The second step is to write link phy address into the control register, and
data length into the control register. THis will trigger DMA work.
Yes.
then DMA decrease when process one item in link?
Yes. When the link with the LINK_EOC flag is processed, the DMA will stop, and rasie a interrupt.
Frank
For the memory-to-device channel, an additional register needs to be written
to trigger the transfer
(This part is implemented in aml_enable_dma_channel function.)
In v1 and v2 I placed dma_wmb() at the beginning of aml_enable_dma_channel.
You said it was okay not to use it, so I drop it.
+ sg_link->ctl = FIELD_PREP(LINK_LEN, avail);regmap_set_bits(), check others
+
+ aml_chan->data_len += sg_dma_len(sg);
+ }
+ aml_chan->sg_link_cnt = idx;
+
+ return &aml_chan->desc;
+}
+
+static int aml_dma_pause_chan(struct dma_chan *chan)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+
+ regmap_update_bits(aml_dma->regmap, aml_chan->reg_offs + RCH_CFG, CFG_PAUSE, CFG_PAUSE);
+ aml_chan->pre_status = aml_chan->status;...
+ aml_chan->status = DMA_PAUSED;
+
+ return 0;
+}
+
+align callback name, aml_dma_chan_resume()
+ dma_set_max_seg_size(dma_dev->dev, SG_MAX_LEN);
+
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_dev->device_alloc_chan_resources = aml_dma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = aml_dma_free_chan_resources;
+ dma_dev->device_tx_status = aml_dma_tx_status;
+ dma_dev->device_prep_slave_sg = aml_dma_prep_slave_sg;
+
+ dma_dev->device_pause = aml_dma_pause_chan;
+ dma_dev->device_resume = aml_dma_resume_chan;
+ dma_dev->device_terminate_all = aml_dma_terminate_all;aml_dma_issue_pending()
+ dma_dev->device_issue_pending = aml_dma_enable_chan;
Frank
+ /* PIO 4 bytes and I2C 1 byte */...
+ dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
+ dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
--
2.52.0