[PATCH 13/18] spi: stm32h7: fix handling of dma transfer completed
From: Alain Volmat
Date: Wed Aug 05 2020 - 03:05:30 EST
From: Amelie Delaunay <amelie.delaunay@xxxxxx>
The rx dma is completed "after" the last data is received
from spi. Thus, to avoid loss of rx data, it's mandatory to
wait for the dma callback before tearing down the rx dma in
stm32_spi_disable().
The tx dma is of course already completed when last data is
sent from spi. But both spi and dma use threaded interrupts,
thus there is no guarantee that the dma irq handler is already
executed when the spi irq handler triggers stm32_spi_disable().
Waiting for dma callback will allow a clean termination of the
dma.
Remove the unused code in the current dma callback, signal the
end of dma through completion, then delay spi disable until
the dma callback has been executed.
Signed-off-by: Antonio Borneo <antonio.borneo@xxxxxx>
Signed-off-by: Amelie Delaunay <amelie.delaunay@xxxxxx>
Signed-off-by: Alain Volmat <alain.volmat@xxxxxx>
---
drivers/spi/spi-stm32.c | 37 +++++++++++++++++++++++--------------
1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 1a703c4a65db..b0a9642392e9 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -275,6 +275,7 @@ struct stm32_spi_cfg {
* @rx_len: number of data to be read in bytes
* @dma_tx: dma channel for TX transfer
* @dma_rx: dma channel for RX transfer
+ * @dma_completion: completion to wait for end of DMA transfer
* @phys_addr: SPI registers physical base address
* @xfer_completion: completion to wait for end of transfer
* @xfer_status: current transfer status
@@ -304,6 +305,7 @@ struct stm32_spi {
int rx_len;
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
+ struct completion dma_completion;
dma_addr_t phys_addr;
struct completion xfer_completion;
int xfer_status;
@@ -1062,25 +1064,18 @@ static void stm32f4_spi_dma_rx_cb(void *data)
* stm32h7_spi_dma_cb - dma callback
* @data: pointer to the spi controller data structure
*
- * DMA callback is called when the transfer is complete or when an error
- * occurs. If the transfer is complete, EOT flag is raised.
+ * DMA callback is called when the transfer is complete.
*/
static void stm32h7_spi_dma_cb(void *data)
{
struct stm32_spi *spi = data;
unsigned long flags;
- u32 sr;
spin_lock_irqsave(&spi->lock, flags);
- sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ complete(&spi->dma_completion);
spin_unlock_irqrestore(&spi->lock, flags);
-
- if (!(sr & STM32H7_SPI_SR_EOT))
- dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
-
- /* Now wait for EOT, or SUSP or OVR in case of error */
}
/**
@@ -1274,12 +1269,20 @@ static int stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
struct spi_transfer *xfer)
{
+ dma_async_tx_callback rx_done = NULL, tx_done = NULL;
struct dma_slave_config tx_dma_conf, rx_dma_conf;
struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
+ if (spi->rx_buf)
+ rx_done = spi->cfg->dma_rx_cb;
+ else if (spi->tx_buf)
+ tx_done = spi->cfg->dma_tx_cb;
+
+ reinit_completion(&spi->dma_completion);
+
rx_dma_desc = NULL;
if (spi->rx_buf && spi->dma_rx) {
stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
@@ -1316,7 +1319,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
goto dma_desc_error;
if (rx_dma_desc) {
- rx_dma_desc->callback = spi->cfg->dma_rx_cb;
+ rx_dma_desc->callback = rx_done;
rx_dma_desc->callback_param = spi;
if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
@@ -1330,7 +1333,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
if (tx_dma_desc) {
if (spi->cur_comm == SPI_SIMPLEX_TX ||
spi->cur_comm == SPI_3WIRE_TX) {
- tx_dma_desc->callback = spi->cfg->dma_tx_cb;
+ tx_dma_desc->callback = tx_done;
tx_dma_desc->callback_param = spi;
}
@@ -1658,6 +1661,7 @@ static int stm32_spi_transfer_one(struct spi_master *master,
{
struct stm32_spi *spi = spi_master_get_devdata(master);
u32 xfer_time, midi_delay_ns;
+ unsigned long timeout;
int ret;
spi->tx_buf = transfer->tx_buf;
@@ -1690,10 +1694,14 @@ static int stm32_spi_transfer_one(struct spi_master *master,
midi_delay_ns = spi->cur_xferlen * 8 / spi->cur_bpw * spi->cur_midi;
xfer_time += DIV_ROUND_UP(midi_delay_ns, NSEC_PER_MSEC);
xfer_time = max(2 * xfer_time, 100U);
+ timeout = msecs_to_jiffies(xfer_time);
+
+ timeout = wait_for_completion_timeout(&spi->xfer_completion, timeout);
+ if (timeout && spi->cur_usedma)
+ timeout = wait_for_completion_timeout(&spi->dma_completion,
+ timeout);
- ret = wait_for_completion_timeout(&spi->xfer_completion,
- (msecs_to_jiffies(xfer_time)));
- if (!ret) {
+ if (!timeout) {
dev_err(spi->dev, "SPI transfer timeout (%u ms)\n", xfer_time);
spi->xfer_status = -ETIMEDOUT;
}
@@ -1854,6 +1862,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->master = master;
spin_lock_init(&spi->lock);
init_completion(&spi->xfer_completion);
+ init_completion(&spi->dma_completion);
spi->cfg = (const struct stm32_spi_cfg *)
of_match_device(pdev->dev.driver->of_match_table,
--
2.7.4