Commit 79434f9b authored by Frank Li's avatar Frank Li Committed by Vinod Koul
Browse files

dmaengine: fsl-edma: move common IRQ handler to common.c



Move the common part of IRQ handler from fsl-edma-main.c and
mcf-edma-main.c to fsl-edma-common.c. This eliminates redundant code, as
the both files contains mostly identical code.

Signed-off-by: default avatarFrank Li <Frank.Li@nxp.com>
Link: https://lore.kernel.org/r/20230821161617.2142561-6-Frank.Li@nxp.com


Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent c26e6114
Loading
Loading
Loading
Loading
+26 −0
Original line number Diff line number Diff line
@@ -42,6 +42,32 @@

#define EDMA_TCD		0x1000

void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
{
	spin_lock(&fsl_chan->vchan.lock);

	if (!fsl_chan->edesc) {
		/* terminate_all called before */
		spin_unlock(&fsl_chan->vchan.lock);
		return;
	}

	if (!fsl_chan->edesc->iscyclic) {
		list_del(&fsl_chan->edesc->vdesc.node);
		vchan_cookie_complete(&fsl_chan->edesc->vdesc);
		fsl_chan->edesc = NULL;
		fsl_chan->status = DMA_COMPLETE;
		fsl_chan->idle = true;
	} else {
		vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
	}

	if (!fsl_chan->edesc)
		fsl_edma_xfer_desc(fsl_chan);

	spin_unlock(&fsl_chan->vchan.lock);
}

static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
	struct edma_regs *regs = &fsl_chan->edma->regs;
+7 −0
Original line number Diff line number Diff line
@@ -219,6 +219,13 @@ static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
	return container_of(vd, struct fsl_edma_desc, vdesc);
}

static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
{
	fsl_chan->status = DMA_ERROR;
	fsl_chan->idle = true;
}

void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
			unsigned int slot, bool enable);
+2 −28
Original line number Diff line number Diff line
@@ -33,7 +33,6 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
	struct fsl_edma_engine *fsl_edma = dev_id;
	unsigned int intr, ch;
	struct edma_regs *regs = &fsl_edma->regs;
	struct fsl_edma_chan *fsl_chan;

	intr = edma_readl(fsl_edma, regs->intl);
	if (!intr)
@@ -42,31 +41,7 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
		if (intr & (0x1 << ch)) {
			edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);

			fsl_chan = &fsl_edma->chans[ch];

			spin_lock(&fsl_chan->vchan.lock);

			if (!fsl_chan->edesc) {
				/* terminate_all called before */
				spin_unlock(&fsl_chan->vchan.lock);
				continue;
			}

			if (!fsl_chan->edesc->iscyclic) {
				list_del(&fsl_chan->edesc->vdesc.node);
				vchan_cookie_complete(&fsl_chan->edesc->vdesc);
				fsl_chan->edesc = NULL;
				fsl_chan->status = DMA_COMPLETE;
				fsl_chan->idle = true;
			} else {
				vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
			}

			if (!fsl_chan->edesc)
				fsl_edma_xfer_desc(fsl_chan);

			spin_unlock(&fsl_chan->vchan.lock);
			fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
		}
	}
	return IRQ_HANDLED;
@@ -86,8 +61,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
		if (err & (0x1 << ch)) {
			fsl_edma_disable_request(&fsl_edma->chans[ch]);
			edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
			fsl_edma->chans[ch].status = DMA_ERROR;
			fsl_edma->chans[ch].idle = true;
			fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
		}
	}
	return IRQ_HANDLED;
+2 −28
Original line number Diff line number Diff line
@@ -19,7 +19,6 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
	struct fsl_edma_engine *mcf_edma = dev_id;
	struct edma_regs *regs = &mcf_edma->regs;
	unsigned int ch;
	struct fsl_edma_chan *mcf_chan;
	u64 intmap;

	intmap = ioread32(regs->inth);
@@ -31,31 +30,7 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
	for (ch = 0; ch < mcf_edma->n_chans; ch++) {
		if (intmap & BIT(ch)) {
			iowrite8(EDMA_MASK_CH(ch), regs->cint);

			mcf_chan = &mcf_edma->chans[ch];

			spin_lock(&mcf_chan->vchan.lock);

			if (!mcf_chan->edesc) {
				/* terminate_all called before */
				spin_unlock(&mcf_chan->vchan.lock);
				continue;
			}

			if (!mcf_chan->edesc->iscyclic) {
				list_del(&mcf_chan->edesc->vdesc.node);
				vchan_cookie_complete(&mcf_chan->edesc->vdesc);
				mcf_chan->edesc = NULL;
				mcf_chan->status = DMA_COMPLETE;
				mcf_chan->idle = true;
			} else {
				vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
			}

			if (!mcf_chan->edesc)
				fsl_edma_xfer_desc(mcf_chan);

			spin_unlock(&mcf_chan->vchan.lock);
			fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
		}
	}

@@ -76,8 +51,7 @@ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
		if (err & BIT(ch)) {
			fsl_edma_disable_request(&mcf_edma->chans[ch]);
			iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
			mcf_edma->chans[ch].status = DMA_ERROR;
			mcf_edma->chans[ch].idle = true;
			fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
		}
	}