Re: [PATCH 5/7] dmaengine: sh: rz-dmac: Add suspend to RAM support
From: Tommaso Merciai
Date: Thu Mar 12 2026 - 08:29:55 EST
Hi Claudiu,
Thanks for your patch.
On Mon, Jan 26, 2026 at 12:31:53PM +0200, Claudiu wrote:
> From: Claudiu Beznea <claudiu.beznea.uj@xxxxxxxxxxxxxx>
>
> The Renesas RZ/G3S SoC supports a power saving mode in which power to most
> SoC components is turned off, including the DMA IP. Add suspend to RAM
> support to save and restore the DMA IP registers.
>
> Cyclic DMA channels require special handling. Since they can be paused and
> resumed during system suspend and resume, the driver restores additional
> registers for these channels during the resume phase. If a channel was not
> explicitly paused during suspend, the driver ensures that it is paused and
> resumed as part of the system suspend/resume flow. This might be the
> case of a serial device being used with no_console_suspend.
>
> For non-cyclic channels, the dev_pm_ops::prepare callback waits for all
> ongoing transfers to complete before allowing suspend-to-RAM to proceed.
>
> Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@xxxxxxxxxxxxxx>
> ---
> drivers/dma/sh/rz-dmac.c | 183 +++++++++++++++++++++++++++++++++++++--
> 1 file changed, 175 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
> index ab5f49a0b9f2..8f3e2719e639 100644
> --- a/drivers/dma/sh/rz-dmac.c
> +++ b/drivers/dma/sh/rz-dmac.c
> @@ -69,11 +69,15 @@ struct rz_dmac_desc {
> * enum rz_dmac_chan_status: RZ DMAC channel status
> * @RZ_DMAC_CHAN_STATUS_ENABLED: Channel is enabled
> * @RZ_DMAC_CHAN_STATUS_PAUSED: Channel is paused though DMA engine callbacks
> + * @RZ_DMAC_CHAN_STATUS_PAUSED_INTERNAL: Channel is paused through driver internal logic
> + * @RZ_DMAC_CHAN_STATUS_SYS_SUSPENDED: Channel was prepared for system suspend
> * @RZ_DMAC_CHAN_STATUS_CYCLIC: Channel is cyclic
> */
> enum rz_dmac_chan_status {
> RZ_DMAC_CHAN_STATUS_ENABLED,
> RZ_DMAC_CHAN_STATUS_PAUSED,
> + RZ_DMAC_CHAN_STATUS_PAUSED_INTERNAL,
> + RZ_DMAC_CHAN_STATUS_SYS_SUSPENDED,
> RZ_DMAC_CHAN_STATUS_CYCLIC,
> };
>
> @@ -94,6 +98,10 @@ struct rz_dmac_chan {
> u32 chctrl;
> int mid_rid;
>
> + struct {
> + u32 nxla;
> + } pm_state;
> +
> struct list_head ld_free;
> struct list_head ld_queue;
> struct list_head ld_active;
> @@ -1002,10 +1010,17 @@ static int rz_dmac_device_pause(struct dma_chan *chan)
> return rz_dmac_device_pause_set(channel, RZ_DMAC_CHAN_STATUS_PAUSED);
> }
>
> +static int rz_dmac_device_pause_internal(struct rz_dmac_chan *channel)
> +{
> + lockdep_assert_held(&channel->vc.lock);
> +
> + return rz_dmac_device_pause_set(channel, RZ_DMAC_CHAN_STATUS_PAUSED_INTERNAL);
> +}
> +
> static int rz_dmac_device_resume_set(struct rz_dmac_chan *channel,
> enum rz_dmac_chan_status status)
> {
> - u32 val;
> + u32 val, chctrl;
> int ret;
>
> lockdep_assert_held(&channel->vc.lock);
> @@ -1013,14 +1028,33 @@ static int rz_dmac_device_resume_set(struct rz_dmac_chan *channel,
> if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED)))
> return 0;
>
> - rz_dmac_ch_writel(channel, CHCTRL_CLRSUS, CHCTRL, 1);
> - ret = read_poll_timeout_atomic(rz_dmac_ch_readl, val,
> - !(val & CHSTAT_SUS), 1, 1024, false,
> - channel, CHSTAT, 1);
> - if (ret)
> - return ret;
> + if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_SYS_SUSPENDED)) {
> + /*
> + * We can be after a sleep state with power loss. If power was
> + * lost, the CHSTAT_SUS bit is zero. In this case, we need to
> + * enable the channel directly. Otherwise, just set the CLRSUS
> + * bit.
> + */
> + val = rz_dmac_ch_readl(channel, CHSTAT, 1);
> + if (val & CHSTAT_SUS)
> + chctrl = CHCTRL_CLRSUS;
> + else
> + chctrl = CHCTRL_SETEN;
> + } else {
> + chctrl = CHCTRL_CLRSUS;
> + }
> +
> + rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
>
> - channel->status &= ~BIT(status);
> + if (chctrl & CHCTRL_CLRSUS) {
> + ret = read_poll_timeout_atomic(rz_dmac_ch_readl, val,
> + !(val & CHSTAT_SUS), 1, 1024, false,
> + channel, CHSTAT, 1);
> + if (ret)
> + return ret;
> + }
> +
> + channel->status &= ~(BIT(status) | BIT(RZ_DMAC_CHAN_STATUS_SYS_SUSPENDED));
>
> return 0;
> }
> @@ -1034,6 +1068,13 @@ static int rz_dmac_device_resume(struct dma_chan *chan)
> return rz_dmac_device_resume_set(channel, RZ_DMAC_CHAN_STATUS_PAUSED);
> }
>
> +static int rz_dmac_device_resume_internal(struct rz_dmac_chan *channel)
> +{
> + lockdep_assert_held(&channel->vc.lock);
> +
> + return rz_dmac_device_resume_set(channel, RZ_DMAC_CHAN_STATUS_PAUSED_INTERNAL);
> +}
> +
> /*
> * -----------------------------------------------------------------------------
> * IRQ handling
> @@ -1438,6 +1479,131 @@ static void rz_dmac_remove(struct platform_device *pdev)
> pm_runtime_disable(&pdev->dev);
> }
>
> +static int rz_dmac_suspend_prepare(struct device *dev)
> +{
> + struct rz_dmac *dmac = dev_get_drvdata(dev);
> +
> + for (unsigned int i = 0; i < dmac->n_channels; i++) {
> + struct rz_dmac_chan *channel = &dmac->channels[i];
> +
> + guard(spinlock_irqsave)(&channel->vc.lock);
> +
> + /* Wait for transfer completion, except in cyclic case. */
> + if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_ENABLED) &&
> + !(channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC)))
> + return -EAGAIN;
> + }
> +
> + return 0;
> +}
> +
> +static void rz_dmac_suspend_recover(struct rz_dmac *dmac)
> +{
> + for (unsigned int i = 0; i < dmac->n_channels; i++) {
> + struct rz_dmac_chan *channel = &dmac->channels[i];
> +
> + guard(spinlock_irqsave)(&channel->vc.lock);
> +
> + if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC)))
> + continue;
> +
> + if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED_INTERNAL)))
> + continue;
> +
> + rz_dmac_device_resume_internal(channel);
> + }
> +}
> +
> +static int rz_dmac_suspend(struct device *dev)
> +{
> + struct rz_dmac *dmac = dev_get_drvdata(dev);
> + int ret;
> +
> + for (unsigned int i = 0; i < dmac->n_channels; i++) {
> + struct rz_dmac_chan *channel = &dmac->channels[i];
> +
> + guard(spinlock_irqsave)(&channel->vc.lock);
> +
> + if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC)))
> + continue;
> +
> + if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED))) {
> + ret = rz_dmac_device_pause_internal(channel);
> + if (ret) {
> + dev_err(dev, "Failed to suspend channel %s\n",
> + dma_chan_name(&channel->vc.chan));
> + continue;
> + }
> + }
> +
> + channel->pm_state.nxla = rz_dmac_ch_readl(channel, NXLA, 1);
> + channel->status |= BIT(RZ_DMAC_CHAN_STATUS_SYS_SUSPENDED);
> + }
> +
> + pm_runtime_put_sync(dmac->dev);
> +
> + ret = reset_control_assert(dmac->rstc);
> + if (ret) {
> + pm_runtime_resume_and_get(dmac->dev);
> + rz_dmac_suspend_recover(dmac);
> + }
> +
> + return ret;
> +}
Testing suspend/resume support on RZ/G3E (DMAC + RSPI) I'm seeing the
following when suspending:
rz_dmac_suspend()
[ 50.657802] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.667577] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.675984] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.684394] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.692804] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.701221] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.709642] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.718062] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.726480] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
[ 50.734900] rz-dmac 11400000.dma-controller: PM: device_prepare(): genpd_prepare returns -11
I found out that this issue can be solved by:
When the IRQ handler thread completes a non-cyclic transfer and there
are no more descriptors queued (ld_queue is empty), invalidate all
link-mode descriptor headers and clear the RZ_DMAC_CHAN_STATUS_ENABLED
bit into the rz_dmac_irq_handler_thread():
static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
{
struct rz_dmac_chan *channel = dev_id;
struct rz_dmac_desc *desc = NULL;
unsigned long flags;
spin_lock_irqsave(&channel->vc.lock, flags);
if (list_empty(&channel->ld_active)) {
/* Someone might have called terminate all */
goto out;
}
desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC)) {
desc = channel->desc;
vchan_cyclic_callback(&desc->vd);
goto out;
} else {
vchan_cookie_complete(&desc->vd);
}
list_move_tail(channel->ld_active.next, &channel->ld_free);
if (!list_empty(&channel->ld_queue)) {
desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
node);
channel->desc = desc;
if (rz_dmac_xfer_desc(channel) == 0)
list_move_tail(channel->ld_queue.next, &channel->ld_active);
+ } else {
+ rz_dmac_invalidate_lmdesc(channel);
+ channel->status &= ~BIT(RZ_DMAC_CHAN_STATUS_ENABLED);
}
out:
spin_unlock_irqrestore(&channel->vc.lock, flags);
return IRQ_HANDLED;
}
> +
> +static int rz_dmac_resume(struct device *dev)
> +{
> + struct rz_dmac *dmac = dev_get_drvdata(dev);
> + int ret;
> +
> + ret = reset_control_deassert(dmac->rstc);
> + if (ret)
> + return ret;
> +
> + ret = pm_runtime_resume_and_get(dmac->dev);
> + if (ret) {
> + reset_control_assert(dmac->rstc);
> + return ret;
> + }
> +
> + rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
> + rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
> +
> + for (unsigned int i = 0; i < dmac->n_channels; i++) {
> + struct rz_dmac_chan *channel = &dmac->channels[i];
> +
> + guard(spinlock_irqsave)(&channel->vc.lock);
> +
> + rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
> +
> + if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC))) {
> + rz_dmac_ch_writel(&dmac->channels[i], CHCTRL_DEFAULT, CHCTRL, 1);
> + continue;
> + }
> +
> + rz_dmac_ch_writel(channel, channel->pm_state.nxla, NXLA, 1);
> + rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
> + rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
> + rz_dmac_ch_writel(channel, channel->chctrl, CHCTRL, 1);
> +
> + if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED_INTERNAL)) {
> + ret = rz_dmac_device_resume_internal(channel);
> + if (ret) {
> + dev_err(dev, "Failed to resume channel %s\n",
> + dma_chan_name(&channel->vc.chan));
> + continue;
> + }
> + }
> + }
> +
> + return 0;
> +}
Then on resume I'm seeing the following when testing DMAC + RSPI:
[ 52.831840] spi-nor spi0.0: SPI transfer failed: -110
[ 52.836950] spi_master spi0: failed to transfer one message from queue
[ 52.843474] spi_master spi0: noqueue transfer failed
Which I found out that can be solved by moving:
rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
after the cyclic check:
if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC))) {
rz_dmac_ch_writel(&dmac->channels[i], CHCTRL_DEFAULT, CHCTRL, 1);
continue;
}
+ rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
rz_dmac_ch_writel(channel, channel->pm_state.nxla, NXLA, 1);
rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
In this way
rz_dmac_set_dma_req_no()
Is only called for cyclic channels
What do you think?
Kind Regards,
Tommaso
> +
> +static const struct dev_pm_ops rz_dmac_pm_ops = {
> + .prepare = rz_dmac_suspend_prepare,
> + SYSTEM_SLEEP_PM_OPS(rz_dmac_suspend, rz_dmac_resume)
> +};
> +
> static const struct rz_dmac_info rz_dmac_v2h_info = {
> .icu_register_dma_req = rzv2h_icu_register_dma_req,
> .default_dma_req_no = RZV2H_ICU_DMAC_REQ_NO_DEFAULT,
> @@ -1464,6 +1630,7 @@ static struct platform_driver rz_dmac_driver = {
> .driver = {
> .name = "rz-dmac",
> .of_match_table = of_rz_dmac_match,
> + .pm = pm_sleep_ptr(&rz_dmac_pm_ops),
> },
> .probe = rz_dmac_probe,
> .remove = rz_dmac_remove,
> --
> 2.43.0
>