[PATCH 2/2] spi: spi-geni-qcom: Really ensure the previous xfer is done before new one
From: Douglas Anderson
Date: Mon Dec 14 2020 - 19:33:01 EST
In commit 2ee471a1e28e ("spi: spi-geni-qcom: Mo' betta locking") we
added a dance in setup_fifo_xfer() to make sure that the previous
transfer was really done before we setup the next one. However, it
wasn't enough. Specifically, if we had a timeout it's possible that
the previous transfer could still be pending. This could happen if
our interrupt handler was blocked for a long while (interrupt storm or
someone disablng IRQs for a while). This pending interrupt could
throw off our logic.
Let's really make sure that the previous interrupt isn't still pending
before we start the next transfer.
Fixes: 561de45f72bd ("spi: spi-geni-qcom: Add SPI driver support for GENI based QUP")
Signed-off-by: Douglas Anderson <dianders@xxxxxxxxxxxx>
---
drivers/spi/spi-geni-qcom.c | 69 ++++++++++++++++++++++++++++---------
1 file changed, 53 insertions(+), 16 deletions(-)
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 6f736e94e9f4..5ef2e9f38ac9 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -145,12 +145,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
}
+static int spi_geni_check_busy(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ /*
+ * We grab the spinlock so that if we raced really fast and the IRQ
+ * handler is still actually running we'll wait for it to exit. This
+ * can happen because the IRQ handler may signal in the middle of the
+ * function and the next transfer can kick off right away.
+ *
+ * Once we have the spinlock, if we're starting a new transfer we
+ * expect nothing is pending. We check this to handle the case where
+ * the previous transfer timed out and then handle_fifo_timeout() timed
+ * out. This can happen if the interrupt handler was blocked for
+ * a long time and we don't want to start any new transfers until it's
+ * all done.
+ *
+ * We are OK releasing the spinlock after we're done here since (if
+ * we're returning 0 and going ahead with the transfer) we know that
+ * the SPI controller must be in a quiet state.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Busy, IRQs pending %#010x\n",
+ m_irq & m_irq_en);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
struct spi_master *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned long time_left;
+ int ret;
if (!(slv->mode & SPI_CS_HIGH))
set_flag = !set_flag;
@@ -158,6 +195,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
+ ret = spi_geni_check_busy(mas);
+ if (ret) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ return;
+ }
+
mas->cs_flag = set_flag;
pm_runtime_get_sync(mas->dev);
@@ -277,8 +320,12 @@ static int setup_fifo_params(struct spi_device *spi_slv,
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
- int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ ret = spi_geni_check_busy(mas);
+ if (ret)
+ return ret;
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
@@ -440,21 +487,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
struct geni_se *se = &mas->se;
int ret;
- /*
- * Ensure that our interrupt handler isn't still running from some
- * prior command before we start messing with the hardware behind
- * its back. We don't need to _keep_ the lock here since we're only
- * worried about racing with out interrupt handler. The SPI core
- * already handles making sure that we're not trying to do two
- * transfers at once or setting a chip select and doing a transfer
- * concurrently.
- *
- * NOTE: we actually _can't_ hold the lock here because possibly we
- * might call clk_set_rate() which needs to be able to sleep.
- */
- spin_lock_irq(&mas->lock);
- spin_unlock_irq(&mas->lock);
-
if (xfer->bits_per_word != mas->cur_bits_per_word) {
spi_setup_word_len(mas, mode, xfer->bits_per_word);
mas->cur_bits_per_word = xfer->bits_per_word;
@@ -511,6 +543,11 @@ static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ ret = spi_geni_check_busy(mas);
+ if (ret)
+ return ret;
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
--
2.29.2.684.gfbc64c5ab5-goog