[PATCH] serial: samsung: use dma_ops of DMA if attached

From: Tamseel Shams
Date: Mon Jun 21 2021 - 00:47:51 EST


When DMA is used for TX and RX by serial driver, it should
pass the DMA device pointer to DMA API instead of UART device
pointer.

This patch is necessary to fix the SMMU page faults
which is observed when a DMA(with SMMU enabled) is attached
to UART for transfer.

Signed-off-by: Tamseel Shams <m.shams@xxxxxxxxxxx>
Signed-off-by: Ajay Kumar <ajaykumar.rs@xxxxxxxxxxx>
---
drivers/tty/serial/samsung_tty.c | 60 +++++++++++++++++++++++++-------
1 file changed, 48 insertions(+), 12 deletions(-)

diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index b923683e6a25..5bdc7dd2a5e2 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -284,8 +284,13 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
struct s3c24xx_uart_dma *dma = ourport->dma;
struct circ_buf *xmit = &port->state->xmit;
struct dma_tx_state state;
+ struct device *dma_map_ops_dev = ourport->port.dev;
int count;

+ /* Pick dma_ops of DMA device if DMA device is attached */
+ if (dma && dma->tx_chan)
+ dma_map_ops_dev = dma->tx_chan->device->dev;
+
if (!ourport->tx_enabled)
return;

@@ -298,7 +303,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
dmaengine_pause(dma->tx_chan);
dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
dmaengine_terminate_all(dma->tx_chan);
- dma_sync_single_for_cpu(ourport->port.dev,
+ dma_sync_single_for_cpu(dma_map_ops_dev,
dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE);
async_tx_ack(dma->tx_desc);
count = dma->tx_bytes_requested - state.residue;
@@ -324,15 +329,19 @@ static void s3c24xx_serial_tx_dma_complete(void *args)
struct circ_buf *xmit = &port->state->xmit;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct dma_tx_state state;
+ struct device *dma_map_ops_dev = ourport->port.dev;
unsigned long flags;
int count;

+ /* Pick dma_ops of DMA device if DMA device is attached */
+ if (dma && dma->tx_chan)
+ dma_map_ops_dev = dma->tx_chan->device->dev;

dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
count = dma->tx_bytes_requested - state.residue;
async_tx_ack(dma->tx_desc);

- dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,
+ dma_sync_single_for_cpu(dma_map_ops_dev, dma->tx_transfer_addr,
dma->tx_size, DMA_TO_DEVICE);

spin_lock_irqsave(&port->lock, flags);
@@ -408,7 +417,11 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct device *dma_map_ops_dev = ourport->port.dev;

+ /* Pick dma_ops of DMA device if DMA device is attached */
+ if (dma && dma->tx_chan)
+ dma_map_ops_dev = dma->tx_chan->device->dev;

if (ourport->tx_mode != S3C24XX_TX_DMA)
enable_tx_dma(ourport);
@@ -416,7 +429,7 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
dma->tx_transfer_addr = dma->tx_addr + xmit->tail;

- dma_sync_single_for_device(ourport->port.dev, dma->tx_transfer_addr,
+ dma_sync_single_for_device(dma_map_ops_dev, dma->tx_transfer_addr,
dma->tx_size, DMA_TO_DEVICE);

dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,
@@ -483,12 +496,17 @@ static void s3c24xx_uart_copy_rx_to_tty(struct s3c24xx_uart_port *ourport,
struct tty_port *tty, int count)
{
struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct device *dma_map_ops_dev = ourport->port.dev;
int copied;

+ /* Pick dma_ops of DMA device if DMA device is attached */
+ if (dma && dma->rx_chan)
+ dma_map_ops_dev = dma->rx_chan->device->dev;
+
if (!count)
return;

- dma_sync_single_for_cpu(ourport->port.dev, dma->rx_addr,
+ dma_sync_single_for_cpu(dma_map_ops_dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);

ourport->port.icount.rx += count;
@@ -600,8 +618,13 @@ static void s3c24xx_serial_rx_dma_complete(void *args)
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
{
struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct device *dma_map_ops_dev = ourport->port.dev;

- dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,
+ /* Pick dma_ops of DMA device if DMA device is attached */
+ if (dma && dma->rx_chan)
+ dma_map_ops_dev = dma->rx_chan->device->dev;
+
+ dma_sync_single_for_device(dma_map_ops_dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);

dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
@@ -983,6 +1006,7 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
struct s3c24xx_uart_dma *dma = p->dma;
struct dma_slave_caps dma_caps;
const char *reason = NULL;
+ struct device *dma_map_ops_dev = p->port.dev;
int ret;

/* Default slave configuration parameters */
@@ -1040,18 +1064,25 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
goto err_release_tx;
}

- dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
+ /* Pick dma_ops of DMA device if DMA device is attached */
+ if (dma && dma->rx_chan)
+ dma_map_ops_dev = dma->rx_chan->device->dev;
+
+ dma->rx_addr = dma_map_single(dma_map_ops_dev, dma->rx_buf,
dma->rx_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(p->port.dev, dma->rx_addr)) {
+ if (dma_mapping_error(dma_map_ops_dev, dma->rx_addr)) {
reason = "DMA mapping error for RX buffer";
ret = -EIO;
goto err_free_rx;
}

+ /* Pick dma_ops of DMA device if DMA device is attached */
+ if (dma && dma->tx_chan)
+ dma_map_ops_dev = dma->tx_chan->device->dev;
/* TX buffer */
- dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
+ dma->tx_addr = dma_map_single(dma_map_ops_dev, p->port.state->xmit.buf,
UART_XMIT_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(p->port.dev, dma->tx_addr)) {
+ if (dma_mapping_error(dma_map_ops_dev, dma->tx_addr)) {
reason = "DMA mapping error for TX buffer";
ret = -EIO;
goto err_unmap_rx;
@@ -1060,7 +1091,9 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
return 0;

err_unmap_rx:
- dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size,
+ if (dma->rx_chan)
+ dma_map_ops_dev = dma->rx_chan->device->dev;
+ dma_unmap_single(dma_map_ops_dev, dma->rx_addr, dma->rx_size,
DMA_FROM_DEVICE);
err_free_rx:
kfree(dma->rx_buf);
@@ -1077,10 +1110,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
{
struct s3c24xx_uart_dma *dma = p->dma;
+ struct device *dma_map_ops_dev = p->port.dev;

if (dma->rx_chan) {
+ dma_map_ops_dev = dma->rx_chan->device->dev;
dmaengine_terminate_all(dma->rx_chan);
- dma_unmap_single(p->port.dev, dma->rx_addr,
+ dma_unmap_single(dma_map_ops_dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
kfree(dma->rx_buf);
dma_release_channel(dma->rx_chan);
@@ -1088,8 +1123,9 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
}

if (dma->tx_chan) {
+ dma_map_ops_dev = dma->tx_chan->device->dev;
dmaengine_terminate_all(dma->tx_chan);
- dma_unmap_single(p->port.dev, dma->tx_addr,
+ dma_unmap_single(dma_map_ops_dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_release_channel(dma->tx_chan);
dma->tx_chan = NULL;
--
2.17.1