[PATCH v2 2/7] dmaengine: sh: rz-dmac: Add pause status bit
From: Claudiu
Date: Fri Mar 20 2026 - 07:30:04 EST
From: Claudiu Beznea <claudiu.beznea.uj@xxxxxxxxxxxxxx>
Add the RZ_DMAC_CHAN_STATUS_PAUSED status bit index. This is needed to
implement suspend to RAM support for cyclic DMA channels, which will be
added in subsequent commits.
The pause and resume implementations are updated to be reused by the code
that will be added for suspend to RAM handling. Since the pause state is
now stored in a per-channel software cache, there is no longer a need to
interrogate the hardware registers in the pause path. Using the software
status cache simplifies the implementation. The resume code was updated to
use the software status cache as well.
This is a preparatory commit for cyclic DMA suspend to RAM support.
Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@xxxxxxxxxxxxxx>
---
Changes in v2:
- fixed typos in patch description
drivers/dma/sh/rz-dmac.c | 68 ++++++++++++++++++++++++++++++----------
1 file changed, 52 insertions(+), 16 deletions(-)
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 8148a1c78e12..32349d214f68 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -18,6 +18,7 @@
#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/irqchip/irq-renesas-rzt2h.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
@@ -65,9 +66,11 @@ struct rz_dmac_desc {
/**
* enum rz_dmac_chan_status: RZ DMAC channel status
* @RZ_DMAC_CHAN_STATUS_ENABLED: Channel is enabled
+ * @RZ_DMAC_CHAN_STATUS_PAUSED: Channel is paused though DMA engine callbacks
*/
enum rz_dmac_chan_status {
RZ_DMAC_CHAN_STATUS_ENABLED,
+ RZ_DMAC_CHAN_STATUS_PAUSED,
};
struct rz_dmac_chan {
@@ -825,12 +828,9 @@ static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
return status;
scoped_guard(spinlock_irqsave, &channel->vc.lock) {
- u32 val;
-
residue = rz_dmac_chan_get_residue(channel, cookie);
- val = rz_dmac_ch_readl(channel, CHSTAT, 1);
- if (val & CHSTAT_SUS)
+ if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED))
status = DMA_PAUSED;
}
@@ -843,35 +843,71 @@ static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
return status;
}
-static int rz_dmac_device_pause(struct dma_chan *chan)
+static int rz_dmac_device_pause_set(struct rz_dmac_chan *channel,
+ enum rz_dmac_chan_status status)
{
- struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
u32 val;
+ int ret;
- guard(spinlock_irqsave)(&channel->vc.lock);
+ lockdep_assert_held(&channel->vc.lock);
if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_ENABLED)))
return 0;
rz_dmac_ch_writel(channel, CHCTRL_SETSUS, CHCTRL, 1);
- return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
- (val & CHSTAT_SUS), 1, 1024,
- false, channel, CHSTAT, 1);
+ ret = read_poll_timeout_atomic(rz_dmac_ch_readl, val,
+ (val & CHSTAT_SUS), 1, 1024, false,
+ channel, CHSTAT, 1);
+ if (ret)
+ return ret;
+
+ channel->status |= BIT(status);
+
+ return 0;
}
-static int rz_dmac_device_resume(struct dma_chan *chan)
+static int rz_dmac_device_pause(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
- u32 val;
guard(spinlock_irqsave)(&channel->vc.lock);
- /* Do not check CHSTAT_SUS but rely on HW capabilities. */
+ if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED))
+ return 0;
+
+ return rz_dmac_device_pause_set(channel, RZ_DMAC_CHAN_STATUS_PAUSED);
+}
+
+static int rz_dmac_device_resume_set(struct rz_dmac_chan *channel,
+ enum rz_dmac_chan_status status)
+{
+ u32 val;
+ int ret;
+
+ lockdep_assert_held(&channel->vc.lock);
+
+ if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED)))
+ return 0;
rz_dmac_ch_writel(channel, CHCTRL_CLRSUS, CHCTRL, 1);
- return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
- !(val & CHSTAT_SUS), 1, 1024,
- false, channel, CHSTAT, 1);
+ ret = read_poll_timeout_atomic(rz_dmac_ch_readl, val,
+ !(val & CHSTAT_SUS), 1, 1024, false,
+ channel, CHSTAT, 1);
+ if (ret)
+ return ret;
+
+ channel->status &= ~BIT(status);
+
+ return 0;
+}
+
+static int rz_dmac_device_resume(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+
+ guard(spinlock_irqsave)(&channel->vc.lock);
+
+ return rz_dmac_device_resume_set(channel, RZ_DMAC_CHAN_STATUS_PAUSED);
}
/*
--
2.43.0