Re: [PATCH v2 2/2] spi: spi-geni-qcom: Do not do DMA map/unmap inside driver, use framework instead
From: Konrad Dybcio
Date: Tue Jun 06 2023 - 12:56:43 EST
On 17.05.2023 14:18, Vijaya Krishna Nivarthi wrote:
> The spi geni driver in SE DMA mode, unlike GSI DMA, is not making use of
> DMA mapping functionality available in the framework.
> The driver does mapping internally which makes dma buffer fields available
> in spi_transfer struct superfluous while requiring additional members in
> spi_geni_master struct.
>
> Conform to the design by having framework handle map/unmap and do only
> DMA transfer in the driver; this also simplifies code a bit.
>
> Fixes: e5f0dfa78ac7 ("spi: spi-geni-qcom: Add support for SE DMA mode")
> Suggested-by: Douglas Anderson <dianders@xxxxxxxxxxxx>
> Signed-off-by: Vijaya Krishna Nivarthi <quic_vnivarth@xxxxxxxxxxx>
> ---
I don't really have a good insight in this code, but these changes
seem sane.
Acked-by: Konrad Dybcio <konrad.dybcio@xxxxxxxxxx>
Konrad
> v1 -> v2:
> - pass dma address to new geni interfaces instead of pointer
> - set max_dma_len as per HPG
> - remove expendable local variables
> ---
> drivers/spi/spi-geni-qcom.c | 103 +++++++++++++++++++++-----------------------
> 1 file changed, 50 insertions(+), 53 deletions(-)
>
> diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
> index e423efc..206cc04 100644
> --- a/drivers/spi/spi-geni-qcom.c
> +++ b/drivers/spi/spi-geni-qcom.c
> @@ -97,8 +97,6 @@ struct spi_geni_master {
> struct dma_chan *tx;
> struct dma_chan *rx;
> int cur_xfer_mode;
> - dma_addr_t tx_se_dma;
> - dma_addr_t rx_se_dma;
> };
>
> static int get_spi_clk_cfg(unsigned int speed_hz,
> @@ -174,7 +172,7 @@ static void handle_se_timeout(struct spi_master *spi,
> unmap_if_dma:
> if (mas->cur_xfer_mode == GENI_SE_DMA) {
> if (xfer) {
> - if (xfer->tx_buf && mas->tx_se_dma) {
> + if (xfer->tx_buf) {
> spin_lock_irq(&mas->lock);
> reinit_completion(&mas->tx_reset_done);
> writel(1, se->base + SE_DMA_TX_FSM_RST);
> @@ -182,9 +180,8 @@ static void handle_se_timeout(struct spi_master *spi,
> time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
> if (!time_left)
> dev_err(mas->dev, "DMA TX RESET failed\n");
> - geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
> }
> - if (xfer->rx_buf && mas->rx_se_dma) {
> + if (xfer->rx_buf) {
> spin_lock_irq(&mas->lock);
> reinit_completion(&mas->rx_reset_done);
> writel(1, se->base + SE_DMA_RX_FSM_RST);
> @@ -192,7 +189,6 @@ static void handle_se_timeout(struct spi_master *spi,
> time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
> if (!time_left)
> dev_err(mas->dev, "DMA RX RESET failed\n");
> - geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
> }
> } else {
> /*
> @@ -523,17 +519,36 @@ static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas
> return 1;
> }
>
> +static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
> + struct spi_geni_master *mas)
> +{
> + u32 len;
> +
> + if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
> + len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
> + else
> + len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
> + len &= TRANS_LEN_MSK;
> +
> + return len;
> +}
> +
> static bool geni_can_dma(struct spi_controller *ctlr,
> struct spi_device *slv, struct spi_transfer *xfer)
> {
> struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
> + u32 len, fifo_size;
>
> - /*
> - * Return true if transfer needs to be mapped prior to
> - * calling transfer_one which is the case only for GPI_DMA.
> - * For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
> - */
> - return mas->cur_xfer_mode == GENI_GPI_DMA;
> + if (mas->cur_xfer_mode == GENI_GPI_DMA)
> + return true;
> +
> + len = get_xfer_len_in_words(xfer, mas);
> + fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
> +
> + if (len > fifo_size)
> + return true;
> + else
> + return false;
> }
>
> static int spi_geni_prepare_message(struct spi_master *spi,
> @@ -772,7 +787,7 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> u16 mode, struct spi_master *spi)
> {
> u32 m_cmd = 0;
> - u32 len, fifo_size;
> + u32 len;
> struct geni_se *se = &mas->se;
> int ret;
>
> @@ -804,11 +819,7 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> mas->tx_rem_bytes = 0;
> mas->rx_rem_bytes = 0;
>
> - if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
> - len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
> - else
> - len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
> - len &= TRANS_LEN_MSK;
> + len = get_xfer_len_in_words(xfer, mas);
>
> mas->cur_xfer = xfer;
> if (xfer->tx_buf) {
> @@ -823,9 +834,20 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> mas->rx_rem_bytes = xfer->len;
> }
>
> - /* Select transfer mode based on transfer length */
> - fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
> - mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
> + /*
> + * Select DMA mode if sgt are present; and with only 1 entry
> + * This is not a serious limitation because the xfer buffers are
> + * expected to fit into in 1 entry almost always, and if any
> + * doesn't for any reason we fall back to FIFO mode anyway
> + */
> + if (!xfer->tx_sg.nents && !xfer->rx_sg.nents)
> + mas->cur_xfer_mode = GENI_SE_FIFO;
> + else if (xfer->tx_sg.nents > 1 || xfer->rx_sg.nents > 1) {
> + dev_warn_once(mas->dev, "Doing FIFO, cannot handle tx_nents-%d, rx_nents-%d\n",
> + xfer->tx_sg.nents, xfer->rx_sg.nents);
> + mas->cur_xfer_mode = GENI_SE_FIFO;
> + } else
> + mas->cur_xfer_mode = GENI_SE_DMA;
> geni_se_select_mode(se, mas->cur_xfer_mode);
>
> /*
> @@ -836,35 +858,17 @@ static int setup_se_xfer(struct spi_transfer *xfer,
> geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
>
> if (mas->cur_xfer_mode == GENI_SE_DMA) {
> - if (m_cmd & SPI_RX_ONLY) {
> - ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
> - xfer->len, &mas->rx_se_dma);
> - if (ret) {
> - dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
> - mas->rx_se_dma = 0;
> - goto unlock_and_return;
> - }
> - }
> - if (m_cmd & SPI_TX_ONLY) {
> - ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
> - xfer->len, &mas->tx_se_dma);
> - if (ret) {
> - dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
> - mas->tx_se_dma = 0;
> - if (m_cmd & SPI_RX_ONLY) {
> - /* Unmap rx buffer if duplex transfer */
> - geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
> - mas->rx_se_dma = 0;
> - }
> - goto unlock_and_return;
> - }
> - }
> + if (m_cmd & SPI_RX_ONLY)
> + geni_se_rx_init_dma(se, sg_dma_address(xfer->rx_sg.sgl),
> + sg_dma_len(xfer->rx_sg.sgl));
> + if (m_cmd & SPI_TX_ONLY)
> + geni_se_tx_init_dma(se, sg_dma_address(xfer->tx_sg.sgl),
> + sg_dma_len(xfer->tx_sg.sgl));
> } else if (m_cmd & SPI_TX_ONLY) {
> if (geni_spi_handle_tx(mas))
> writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
> }
>
> -unlock_and_return:
> spin_unlock_irq(&mas->lock);
> return ret;
> }
> @@ -965,14 +969,6 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
> if (dma_rx_status & RX_RESET_DONE)
> complete(&mas->rx_reset_done);
> if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
> - if (xfer->tx_buf && mas->tx_se_dma) {
> - geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
> - mas->tx_se_dma = 0;
> - }
> - if (xfer->rx_buf && mas->rx_se_dma) {
> - geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
> - mas->rx_se_dma = 0;
> - }
> spi_finalize_current_transfer(spi);
> mas->cur_xfer = NULL;
> }
> @@ -1057,6 +1053,7 @@ static int spi_geni_probe(struct platform_device *pdev)
> spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
> spi->num_chipselect = 4;
> spi->max_speed_hz = 50000000;
> + spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
> spi->prepare_message = spi_geni_prepare_message;
> spi->transfer_one = spi_geni_transfer_one;
> spi->can_dma = geni_can_dma;