[PATCH 3/3] dmaengine: ti: k3-udma: Fix early TX completion against PDMAs
From: Peter Ujfalusi
Date: Mon Dec 02 2019 - 15:31:48 EST
If the peripheral is disabled (or it is not able to send out data) the
UDMAP will complete a 'short' transfer. In other words: if the amount of
data can fit into PSI-L and PDMA (and peripheral FIFO) then UDMAP will
send out the data and return as the transfer is completed, however the
peripheral did not actually received all the data.
It was wrong to issue a normal teardown on the channel for several reasons:
UDMAP is not processing any packet so it will just return the TDCM and if
the peripheral is not consuming data from PDMA then we will have constant
flood of TDCMs (interrupts).
After the teardown the channel will be in reset state and we would need to
reset the rings as well, but it can not be done in interrupt context.
If the peripheral is just slow to consume data or even there is a delay
between starting the DMA then we will have again issues detecting the
state.
We could set force teardown, but that will make PDMA to discard the data
which is not correct in case of slow or delayed transfer start on the
peripheral.
The only solution is to use a work and check the progress in there after
the descriptor is returned and the UDMA and PDMA counters are not showing
the same number of bytes processed.
Signed-off-by: Peter Ujfalusi <peter.ujfalusi@xxxxxx>
---
drivers/dma/ti/k3-udma.c | 74 ++++++++++++++++++++++++++++++----------
1 file changed, 56 insertions(+), 18 deletions(-)
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 3aede5db9604..39ca371a67dd 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -165,10 +165,15 @@ struct udma_desc {
enum udma_chan_state {
UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
UDMA_CHAN_IS_ACTIVE, /* Normal operation */
- UDMA_CHAN_IS_ACTIVE_FLUSH, /* Flushing for delayed tx */
UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
};
+struct udma_tx_drain {
+ struct delayed_work work;
+ unsigned long jiffie;
+ u32 residue;
+};
+
struct udma_chan {
struct virt_dma_chan vc;
struct dma_slave_config cfg;
@@ -193,6 +198,8 @@ struct udma_chan {
enum udma_chan_state state;
struct completion teardown_completed;
+ struct udma_tx_drain tx_drain;
+
u32 bcnt; /* number of bytes completed since the start of the channel */
u32 in_ring_cnt; /* number of descriptors in flight */
@@ -928,22 +935,51 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
- if (peer_bcnt < bcnt)
+ if (peer_bcnt < bcnt) {
+ uc->tx_drain.residue = bcnt - peer_bcnt;
+ uc->tx_drain.jiffie = jiffies;
return false;
+ }
return true;
}
-static void udma_flush_tx(struct udma_chan *uc)
+static void udma_check_tx_completion(struct work_struct *work)
{
- if (uc->dir != DMA_MEM_TO_DEV)
- return;
+ struct udma_chan *uc = container_of(work, typeof(*uc),
+ tx_drain.work.work);
+ bool desc_done = true;
+ u32 residue_diff;
+ unsigned long jiffie_diff, delay;
+
+ if (uc->desc) {
+ residue_diff = uc->tx_drain.residue;
+ jiffie_diff = uc->tx_drain.jiffie;
+ desc_done = udma_is_desc_really_done(uc, uc->desc);
+ }
+
+ if (!desc_done) {
+ jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
+ residue_diff -= uc->tx_drain.residue;
+ if (residue_diff) {
+ /* Try to guess when we should check next time */
+ residue_diff /= jiffie_diff;
+ delay = uc->tx_drain.residue / residue_diff / 3;
+ if (jiffies_to_msecs(delay) < 5)
+ delay = 0;
+ } else {
+ /* No progress, check again in 1 second */
+ delay = HZ;
+ }
- uc->state = UDMA_CHAN_IS_ACTIVE_FLUSH;
+ schedule_delayed_work(&uc->tx_drain.work, delay);
+ } else if (uc->desc) {
+ struct udma_desc *d = uc->desc;
- udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
- UDMA_CHAN_RT_CTL_EN |
- UDMA_CHAN_RT_CTL_TDOWN);
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -973,11 +1009,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
if (!uc->desc)
udma_start(uc);
- if (uc->state != UDMA_CHAN_IS_ACTIVE_FLUSH)
- goto out;
- else if (uc->desc)
- paddr = udma_curr_cppi5_desc_paddr(uc->desc,
- uc->desc->desc_idx);
+ goto out;
}
d = udma_udma_desc_from_paddr(uc, paddr);
@@ -997,7 +1029,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
}
} else {
- bool desc_done = true;
+ bool desc_done = false;
if (d == uc->desc) {
desc_done = udma_is_desc_really_done(uc, d);
@@ -1006,10 +1038,9 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
uc->bcnt += d->residue;
udma_start(uc);
} else {
- udma_flush_tx(uc);
+ schedule_delayed_work(&uc->tx_drain.work,
+ 0);
}
- } else if (d == uc->terminated_desc) {
- uc->terminated_desc = NULL;
}
if (desc_done)
@@ -1818,6 +1849,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc);
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
return 0;
err_irq_free:
@@ -2727,6 +2760,7 @@ static int udma_terminate_all(struct dma_chan *chan)
uc->terminated_desc = uc->desc;
uc->desc = NULL;
uc->terminated_desc->terminated = true;
+ cancel_delayed_work(&uc->tx_drain.work);
}
uc->paused = false;
@@ -2760,6 +2794,7 @@ static void udma_synchronize(struct dma_chan *chan)
if (udma_is_chan_running(uc))
dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
+ cancel_delayed_work_sync(&uc->tx_drain.work);
udma_reset_rings(uc);
}
@@ -2847,6 +2882,9 @@ static void udma_free_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc);
}
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ destroy_delayed_work_on_stack(&uc->tx_drain.work);
+
if (uc->irq_num_ring > 0) {
free_irq(uc->irq_num_ring, uc);
--
Peter
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki