[PATCH v1 05/12] dmaengine: fsl-edma: move common IRQ handler to common.c
From: Frank Li
Date: Fri May 26 2023 - 10:39:02 EST
Move the common part of IRQ handler from fsl-edma-main.c and
mcf-edma-main.c to fsl-edma-common.c. This eliminates redundant code, as
the both files contains mostly identical code.
Signed-off-by: Frank Li <Frank.Li@xxxxxxx>
---
drivers/dma/fsl-edma-common.c | 26 ++++++++++++++++++++++++++
drivers/dma/fsl-edma-common.h | 7 +++++++
drivers/dma/fsl-edma-main.c | 30 ++----------------------------
drivers/dma/mcf-edma-main.c | 30 ++----------------------------
4 files changed, 37 insertions(+), 56 deletions(-)
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index e5c7497c1ff3..839471c42758 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -42,6 +42,32 @@
#define EDMA_TCD 0x1000
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ return;
+ }
+
+ if (!fsl_chan->edesc->iscyclic) {
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock(&fsl_chan->vchan.lock);
+}
+
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 2f13e687a721..71e19e20f1cb 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -223,6 +223,13 @@ static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
return container_of(vd, struct fsl_edma_desc, vdesc);
}
+static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ fsl_chan->status = DMA_ERROR;
+ fsl_chan->idle = true;
+}
+
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 31531b8bde78..9c0b6fb4cb8f 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -33,7 +33,6 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
struct edma_regs *regs = &fsl_edma->regs;
- struct fsl_edma_chan *fsl_chan;
intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
@@ -42,31 +41,7 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
-
- fsl_chan = &fsl_edma->chans[ch];
-
- spin_lock(&fsl_chan->vchan.lock);
-
- if (!fsl_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&fsl_chan->vchan.lock);
- continue;
- }
-
- if (!fsl_chan->edesc->iscyclic) {
- list_del(&fsl_chan->edesc->vdesc.node);
- vchan_cookie_complete(&fsl_chan->edesc->vdesc);
- fsl_chan->edesc = NULL;
- fsl_chan->status = DMA_COMPLETE;
- fsl_chan->idle = true;
- } else {
- vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
- }
-
- if (!fsl_chan->edesc)
- fsl_edma_xfer_desc(fsl_chan);
-
- spin_unlock(&fsl_chan->vchan.lock);
+ fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
}
}
return IRQ_HANDLED;
@@ -86,8 +61,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
- fsl_edma->chans[ch].status = DMA_ERROR;
- fsl_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
}
}
return IRQ_HANDLED;
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index ebd8733f72ad..af1b0e8a3021 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -19,7 +19,6 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int ch;
- struct fsl_edma_chan *mcf_chan;
u64 intmap;
intmap = ioread32(regs->inth);
@@ -31,31 +30,7 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < mcf_edma->n_chans; ch++) {
if (intmap & BIT(ch)) {
iowrite8(EDMA_MASK_CH(ch), regs->cint);
-
- mcf_chan = &mcf_edma->chans[ch];
-
- spin_lock(&mcf_chan->vchan.lock);
-
- if (!mcf_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&mcf_chan->vchan.lock);
- continue;
- }
-
- if (!mcf_chan->edesc->iscyclic) {
- list_del(&mcf_chan->edesc->vdesc.node);
- vchan_cookie_complete(&mcf_chan->edesc->vdesc);
- mcf_chan->edesc = NULL;
- mcf_chan->status = DMA_COMPLETE;
- mcf_chan->idle = true;
- } else {
- vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
- }
-
- if (!mcf_chan->edesc)
- fsl_edma_xfer_desc(mcf_chan);
-
- spin_unlock(&mcf_chan->vchan.lock);
+ fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
}
}
@@ -76,8 +51,7 @@ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
if (err & BIT(ch)) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
- mcf_edma->chans[ch].status = DMA_ERROR;
- mcf_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
}
}
--
2.34.1