We can use GPI DMA for devices where it is enabled by firmware. Add
support for this mode
Signed-off-by: Vinod Koul <vkoul@xxxxxxxxxx>
---
drivers/spi/spi-geni-qcom.c | 395 +++++++++++++++++++++++++++++++++++-
1 file changed, 384 insertions(+), 11 deletions(-)
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 512e925d5ea4..5bb0e2192734 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -328,8 +609,34 @@ static int spi_geni_init(struct spi_geni_master *mas)
spi_tx_cfg &= ~CS_TOGGLE;
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+ mas->tx = dma_request_slave_channel(mas->dev, "tx");
+ if (IS_ERR_OR_NULL(mas->tx)) {
+ dev_err(mas->dev, "Failed to get tx DMA ch %ld", PTR_ERR(mas->tx));
+ ret = PTR_ERR(mas->tx);
+ goto out_pm;
+ } else {
+ mas->rx = dma_request_slave_channel(mas->dev, "rx");
+ if (IS_ERR_OR_NULL(mas->rx)) {
+ dev_err(mas->dev, "Failed to get rx DMA ch %ld", PTR_ERR(mas->rx));
+ dma_release_channel(mas->tx);
+ ret = PTR_ERR(mas->rx);
+ goto out_pm;
+ }
+
+ gsi_sz = sizeof(struct spi_geni_gsi) * NUM_SPI_XFER;
+ mas->gsi = devm_kzalloc(mas->dev, gsi_sz, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(mas->gsi)) {
+ dma_release_channel(mas->tx);
+ dma_release_channel(mas->rx);
+ mas->tx = NULL;
+ mas->rx = NULL;
+ goto out_pm;
+ }
+ }
+
+out_pm:
pm_runtime_put(mas->dev);
- return 0;
+ return ret;
}
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
@@ -420,6 +727,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
{
u32 m_cmd = 0;
u32 len;
+ u32 m_param = 0;
struct geni_se *se = &mas->se;
int ret;
@@ -457,6 +765,11 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
len &= TRANS_LEN_MSK;
+ if (!xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+ m_param |= FRAGMENTATION;
+ }
+
mas->cur_xfer = xfer;
if (xfer->tx_buf) {
m_cmd |= SPI_TX_ONLY;
@@ -475,7 +788,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
* interrupt could come in at any time now.
*/
spin_lock_irq(&mas->lock);
- geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
+ geni_se_setup_m_cmd(se, m_cmd, m_param);
/*
* TX_WATERMARK_REG should be set after SPI configuration and
@@ -494,13 +807,52 @@ static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ unsigned long timeout, jiffies;
+ int ret = 0i, i;
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
- return 0;
+ return ret;
+
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ setup_fifo_xfer(xfer, mas, slv->mode, spi);
+ } else {
+ setup_gsi_xfer(xfer, mas, slv, spi);
+ if (mas->num_xfers >= NUM_SPI_XFER ||
+ (list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))) {
+ for (i = 0 ; i < mas->num_tx_eot; i++) {
+ jiffies = msecs_to_jiffies(SPI_XFER_TIMEOUT_MS);
+ timeout = wait_for_completion_timeout(&mas->tx_cb, jiffies);
+ if (timeout <= 0) {
+ dev_err(mas->dev, "Tx[%d] timeout%lu\n", i, timeout);
+ ret = -ETIMEDOUT;
+ goto err_gsi_geni_transfer_one;
+ }
+ spi_finalize_current_transfer(spi);
+ }
+ for (i = 0 ; i < mas->num_rx_eot; i++) {
+ jiffies = msecs_to_jiffies(SPI_XFER_TIMEOUT_MS);
+ timeout = wait_for_completion_timeout(&mas->tx_cb, jiffies);
+ if (timeout <= 0) {
+ dev_err(mas->dev, "Rx[%d] timeout%lu\n", i, timeout);
+ ret = -ETIMEDOUT;
+ goto err_gsi_geni_transfer_one;
+ }
+ spi_finalize_current_transfer(spi);
+ }
+ if (mas->qn_err) {
+ ret = -EIO;
+ mas->qn_err = false;
+ goto err_gsi_geni_transfer_one;
+ }
+ }
+ }
- setup_fifo_xfer(xfer, mas, slv->mode, spi);
- return 1;
+ return ret;
+
+err_gsi_geni_transfer_one:
+ dmaengine_terminate_all(mas->tx);
+ return ret;
}
static irqreturn_t geni_spi_isr(int irq, void *data)
@@ -595,6 +947,15 @@ static int spi_geni_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "could not set DMA mask\n");
+ return ret;
+ }
+ }
+
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -632,15 +993,18 @@ static int spi_geni_probe(struct platform_device *pdev)
spi->num_chipselect = 4;
spi->max_speed_hz = 50000000;
spi->prepare_message = spi_geni_prepare_message;
+ spi->unprepare_message = spi_geni_unprepare_message;
spi->transfer_one = spi_geni_transfer_one;
spi->auto_runtime_pm = true;
spi->handle_err = handle_fifo_timeout;
- spi->set_cs = spi_geni_set_cs;
spi->use_gpio_descriptors = true;
init_completion(&mas->cs_done);
init_completion(&mas->cancel_done);
init_completion(&mas->abort_done);
+ init_completion(&mas->xfer_done);
+ init_completion(&mas->tx_cb);
+ init_completion(&mas->rx_cb);
spin_lock_init(&mas->lock);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
@@ -661,6 +1025,15 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
goto spi_geni_probe_runtime_disable;
+ /*
+ * query the mode supported and set_cs for fifo mode only
+ * for dma (gsi) mode, the gsi will set cs based on params passed in
+ * TRE
+ */
+ mas->cur_xfer_mode = get_xfer_mode(spi);
+ if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ spi->set_cs = spi_geni_set_cs;
+
ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
goto spi_geni_probe_runtime_disable;