[PATCH 03/13] tty: serial: atmel: Call dma_async_issue_pending()

From: Tudor Ambarus
Date: Tue Nov 16 2021 - 06:21:37 EST


The driver wrongly assummed that tx_submit() will start the transfer,
which is not the case, now that the at_xdmac driver is fixed. tx_submit
is supposed to push the current transaction descriptor to a pending queue,
waiting for issue_pending to be called. issue_pending must start the
transfer, not tx_submit. While touching atmel_prepare_rx_dma(), introduce
a local variable for the RX dma channel.

Fixes: 34df42f59a60 ("serial: at91: add rx dma support")
Fixes: 08f738be88bb ("serial: at91: add tx dma support")
Signed-off-by: Tudor Ambarus <tudor.ambarus@xxxxxxxxxxxxx>
---
drivers/tty/serial/atmel_serial.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)

diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 376f7a9c2868..b3e593f3c17f 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1009,6 +1009,8 @@ static void atmel_tx_dma(struct uart_port *port)
atmel_port->cookie_tx);
return;
}
+
+ dma_async_issue_pending(chan);
}

if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1191,6 +1193,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
static int atmel_prepare_rx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct dma_chan *chan_rx;
struct device *mfd_dev = port->dev->parent;
struct dma_async_tx_descriptor *desc;
dma_cap_mask_t mask;
@@ -1203,11 +1206,13 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_CYCLIC, mask);

- atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
- if (atmel_port->chan_rx == NULL)
+ chan_rx = dma_request_slave_channel(mfd_dev, "rx");
+ if (chan_rx == NULL)
goto chan_err;
+ atmel_port->chan_rx = chan_rx;
+
dev_info(port->dev, "using %s for rx DMA transfers\n",
- dma_chan_name(atmel_port->chan_rx));
+ dma_chan_name(chan_rx));

spin_lock_init(&atmel_port->lock_rx);
sg_init_table(&atmel_port->sg_rx, 1);
@@ -1239,8 +1244,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
config.src_addr = port->mapbase + ATMEL_US_RHR;
config.src_maxburst = 1;

- ret = dmaengine_slave_config(atmel_port->chan_rx,
- &config);
+ ret = dmaengine_slave_config(chan_rx, &config);
if (ret) {
dev_err(port->dev, "DMA rx slave configuration failed\n");
goto chan_err;
@@ -1249,7 +1253,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
* Prepare a cyclic dma transfer, assign 2 descriptors,
* each one is half ring buffer size
*/
- desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
+ desc = dmaengine_prep_dma_cyclic(chan_rx,
sg_dma_address(&atmel_port->sg_rx),
sg_dma_len(&atmel_port->sg_rx),
sg_dma_len(&atmel_port->sg_rx)/2,
@@ -1269,12 +1273,14 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
goto chan_err;
}

+ dma_async_issue_pending(chan_rx);
+
return 0;

chan_err:
dev_err(port->dev, "RX channel not available, switch to pio\n");
atmel_port->use_dma_rx = false;
- if (atmel_port->chan_rx)
+ if (chan_rx)
atmel_release_rx_dma(port);
return -EINVAL;
}
--
2.25.1