Adding GPC DMA controller driver for Tegra186 and Tegra194. The driver
supports dma transfers between memory to memory, IO peripheral to memory
and memory to IO peripheral.
Signed-off-by: Pavan Kunapuli <pkunapuli@xxxxxxxxxx>
Signed-off-by: Rajesh Gumasta <rgumasta@xxxxxxxxxx>
Signed-off-by: Akhil R <akhilrajeev@xxxxxxxxxx>
---
drivers/dma/Kconfig | 12 +
drivers/dma/Makefile | 1 +
drivers/dma/tegra186-gpc-dma.c | 1375 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 1388 insertions(+)
create mode 100644 drivers/dma/tegra186-gpc-dma.c
+static int tegra_dma_slave_config(struct dma_chan *dc,
+ struct dma_slave_config *sconfig)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ if (tdc->dma_desc) {
+ dev_err(tdc2dev(tdc), "Configuration not allowed\n");
+ return -EBUSY;
+ }
+
+ memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
+ if (tdc->slave_id == -1)
+ tdc->slave_id = sconfig->slave_id;
+
+ tdc->config_init = true;
+ return 0;
+}
+
+static int tegra_dma_pause(struct tegra_dma_channel *tdc)
+{
+ u32 val;
+ int ret;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, TEGRA_GPCDMA_CHAN_CSRE_PAUSE);
+
+ /* Wait until busy bit is de-asserted */
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
+ val,
+ !(val & TEGRA_GPCDMA_STATUS_BUSY),
+ TEGRA_GPCDMA_BURST_COMPLETE_TIME,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+
+ if (ret)
+ dev_err(tdc2dev(tdc), "DMA pause timed out\n");
+
+ return ret;
+}
+
+static void tegra_dma_stop(struct tegra_dma_channel *tdc)
+{
+ u32 csr, status;
+
+ csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
+
+ /* Disable interrupts */
+ csr &= ~TEGRA_GPCDMA_CSR_IE_EOC;
+
+ /* Disable DMA */
+ csr &= ~TEGRA_GPCDMA_CSR_ENB;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
+
+ /* Clear interrupt status if it is there */
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
+ dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status);
+ }
+ tdc->busy = false;
+}
+
+static void tegra_dma_start(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_channel_regs *ch_regs;
+ struct virt_dma_desc *vdesc;
+
+ if (tdc->busy)
+ return;
+
+ vdesc = vchan_next_desc(&tdc->vc);
+ if (!vdesc)
+ return;
+
+ tdc->dma_desc = vd_to_tegra_dma_desc(vdesc);
+ if (!tdc->dma_desc)
+ return;
+
+ list_del(&vdesc->node);
+
+ tdc->dma_desc->tdc = tdc;
+ ch_regs = &tdc->dma_desc->ch_regs;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, 0);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr);
+
+ /* Start DMA */
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
+ ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
+
+ tdc->busy = true;
+}
+
+static void tegra_dma_sid_free(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+ unsigned int sid = tdc->slave_id;
+
+ switch (tdc->sid_dir) {
+ case DMA_MEM_TO_DEV:
+ clear_bit(sid, &tdma->sid_m2d_reserved);
+ break;
+ case DMA_DEV_TO_MEM:
+ clear_bit(sid, &tdma->sid_d2m_reserved);
+ break;
+ case DMA_MEM_TO_MEM:
+ clear_bit(sid, &tdma->sid_m2m_reserved);
+ break;
+ }
+
+ tdc->sid_reserved = false;
+ tdc->sid_dir = DMA_TRANS_NONE;
+}
+static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
+{
+ tegra_dma_sid_free(tdc);
+ kfree(tdc->dma_desc);
+}
+
+static void tegra_dma_isr_callback(struct tegra_dma_channel *tdc,
+ bool to_terminate)
+{
+ struct tegra_dma_desc *dma_desc;
+
+ tdc->busy = false;
+ dma_desc = tdc->dma_desc;
+ dma_desc->bytes_transferred += dma_desc->bytes_requested;
+
+ vchan_cookie_complete(&tdc->dma_desc->vd);
+
+ if (to_terminate)
+ return;
+
+ tegra_dma_start(tdc);
+}
+
+static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
+ unsigned int err_status)
+{
+ switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) {
+ case TEGRA_DMA_BM_FIFO_FULL_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d bm fifo full\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_PERIPH_FIFO_FULL_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d peripheral fifo full\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_PERIPH_ID_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d illegal peripheral id\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_STREAM_ID_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d illegal stream id\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_MC_SLAVE_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d mc slave error\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_MMIO_SLAVE_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d mmio slave error\n", tdc->id);
+ break;
+
+ default:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d security violation %x\n", tdc->id,
+ err_status);
+ }
+}
+
+static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
+{
+ struct tegra_dma_channel *tdc = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int err_status;
+ unsigned long status;
+
+ raw_spin_lock(&tdc->lock);
+
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ err_status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS);
+
+ if (err_status) {
+ tegra_dma_chan_decode_error(tdc, err_status);
+ tegra_dma_dump_chan_regs(tdc);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF);
+ }
+
+ if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS,
+ TEGRA_GPCDMA_STATUS_ISE_EOC);
+ tegra_dma_isr_callback(tdc, false);
+
+ ret = IRQ_HANDLED;
+ }
+
+ raw_spin_unlock(&tdc->lock);
+ return ret;
+}
+
+static void tegra_dma_issue_pending(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&tdc->lock, flags);
+
+ if (!tdc->busy)
+ if (vchan_issue_pending(&tdc->vc))
+ tegra_dma_start(tdc);
+
+ raw_spin_unlock_irqrestore(&tdc->lock, flags);
+}
+
+static void tegra_dma_reset_client(struct tegra_dma_channel *tdc)
+{
+ u32 csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
+
+ csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK);
+ csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
+}
+
+static void tegra_dma_stop_client(struct tegra_dma_channel *tdc)
+{
+ int ret;
+ unsigned long status;
+
+ /* Before Reading DMA status to figure out number
+ * of bytes transferred by DMA channel:
+ * Change the client associated with the DMA channel
+ * to stop DMA engine from starting any more bursts for
+ * the given client and wait for in flight bursts to complete
+ */
+ tegra_dma_reset_client(tdc);
+
+ /* Wait for in flight data transfer to finish */
+ udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME);
+
+ /* If TX/RX path is still active wait till it becomes
+ * inactive
+ */
+
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset +
+ TEGRA_GPCDMA_CHAN_STATUS,
+ status,
+ !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX |
+ TEGRA_GPCDMA_STATUS_CHANNEL_RX)),
+ 5,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+ if (ret) {
+ dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n");
+ tegra_dma_dump_chan_regs(tdc);
+ }
+}
+
+static int tegra_dma_terminate_all(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long wcount = 0;
+ unsigned long status;
+ unsigned long flags;
+ bool was_busy;
+ int err;
+
+ raw_spin_lock_irqsave(&tdc->lock, flags);
+
+ if (!tdc->dma_desc) {
+ raw_spin_unlock_irqrestore(&tdc->lock, flags);
+ return 0;
+ }
+
+ if (!tdc->busy)
+ goto skip_dma_stop;
+
+ if (tdc->tdma->chip_data->hw_support_pause) {
+ err = tegra_dma_pause(tdc);
+ if (err) {
+ raw_spin_unlock_irqrestore(&tdc->lock, flags);
+ return err;
+ }
+ } else {
+ tegra_dma_stop_client(tdc);
+ }
+
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
+ if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
+ dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
+ tegra_dma_isr_callback(tdc, true);
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
+ }
+
+ was_busy = tdc->busy;
+
+ tegra_dma_stop(tdc);
+ if (tdc->dma_desc && was_busy)
+ tdc->dma_desc->bytes_transferred +=
+ tdc->dma_desc->bytes_requested - (wcount * 4);
+
+skip_dma_stop:
+ tegra_dma_abort_all(tdc);
+ vchan_free_chan_resources(&tdc->vc);
+
+ raw_spin_unlock_irqrestore(&tdc->lock, flags);
+ return 0;
+}
+
+static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc = NULL;
+ struct virt_dma_desc *vd;
+ unsigned int residual;
+ enum dma_status ret;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&tdc->lock, flags);
+
+ ret = dma_cookie_status(dc, cookie, txstate);
+ if (ret == DMA_COMPLETE) {
+ raw_spin_unlock_irqrestore(&tdc->lock, flags);
+ return ret;
+ }
+
+ vd = vchan_find_desc(&tdc->vc, cookie);
+ if (vd)
+ dma_desc = vd_to_tegra_dma_desc(vd);
+
+ if (dma_desc) {
+ residual = dma_desc->bytes_requested -
+ (dma_desc->bytes_transferred %
+ dma_desc->bytes_requested);
+ dma_set_residue(txstate, residual);
+ kfree(dma_desc);
+ } else {
+ dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie);
+ }
+
+ raw_spin_unlock_irqrestore(&tdc->lock, flags);
+ return ret;
+}
+
+static inline int get_bus_width(struct tegra_dma_channel *tdc,
+ enum dma_slave_buswidth slave_bw)
+{
+ switch (slave_bw) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32;
+ default:
+ dev_err(tdc2dev(tdc), "given slave bw is not supported\n");
+ return -EINVAL;
+ }
+}
+
+static inline int get_burst_size_by_len(int len)
+{
+ int ret;
+
+ switch (len) {
+ case BIT(0) ... BIT(2):
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_1;
+ break;
+ case BIT(3):
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_2;
+ break;
+ case BIT(4):
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_4;
+ break;
+ case BIT(5):
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_8;
+ break;
+ default:
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_16;
+ break;
+ }
+
+ return ret;
+}
+
+static inline int get_burst_size(struct tegra_dma_channel *tdc,
+ u32 burst_size,
+ enum dma_slave_buswidth slave_bw,
+ int len)
+{
+ int burst_mmio_width, burst_byte, ret;
+
+ /*
+ * burst_size from client is in terms of the bus_width.
+ * convert that into words.
+ */
+ burst_byte = burst_size * slave_bw;
+ burst_mmio_width = burst_byte / 4;
+
+ switch (burst_mmio_width) {
+ case 0:
+ ret = get_burst_size_by_len(len);
+ break;
+ case 1:
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_1;
+ break;
+ case 2 ... 3:
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_2;
+ break;
+ case 4 ... 7:
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_4;
+ break;
+ case 8 ... 15:
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_8;
+ break;
+ default:
+ ret = TEGRA_GPCDMA_MMIOSEQ_BURST_16;
+ break;
+ }
+
+ return ret;
+}
+
+static int tegra_dma_reserve_sid(struct tegra_dma_channel *tdc,
+ enum dma_transfer_direction direction)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+ unsigned int sid = tdc->slave_id;
+
+ if (tdc->sid_reserved)
+ return tdc->sid_dir == direction ? 0 : -EINVAL;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) {
+ dev_err(tdma->dev, "slave id already in use\n");
+ return -EINVAL;
+ }
+ break;
+ case DMA_DEV_TO_MEM:
+ if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) {
+ dev_err(tdma->dev, "slave id already in use\n");
+ return -EINVAL;
+ }
+ break;
+ case DMA_MEM_TO_MEM:
+ if (test_and_set_bit(sid, &tdma->sid_m2m_reserved)) {
+ dev_err(tdma->dev, "slave id already in use\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ tdc->sid_reserved = true;
+ tdc->sid_dir = direction;
+
+ return 0;
+}
+
+static int get_transfer_param(struct tegra_dma_channel *tdc,
+ enum dma_transfer_direction direction,
+ unsigned long *apb_addr,
+ unsigned long *mmio_seq,
+ unsigned long *csr,
+ unsigned int *burst_size,
+ enum dma_slave_buswidth *slave_bw)
+{
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ *apb_addr = tdc->dma_sconfig.dst_addr;
+ *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
+ *burst_size = tdc->dma_sconfig.dst_maxburst;
+ *slave_bw = tdc->dma_sconfig.dst_addr_width;
+ *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC;
+ return 0;
+ case DMA_DEV_TO_MEM:
+ *apb_addr = tdc->dma_sconfig.src_addr;
+ *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
+ *burst_size = tdc->dma_sconfig.src_maxburst;
+ *slave_bw = tdc->dma_sconfig.src_addr_width;
+ *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC;
+ return 0;
+ case DMA_MEM_TO_MEM:
+ *burst_size = tdc->dma_sconfig.src_addr_width;
+ *csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
+ return 0;
+ default:
+ dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ unsigned long csr, mc_seq;
+
+ /* Set dma mode to fixed pattern */
+ csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT;
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Enable the dma interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+ /* Configure default priority weight for the channel */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program outstanding MC requests */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Set burst size */
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+
+ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_requested = 0;
+ dma_desc->bytes_transferred = 0;
+
+ if ((len & 3) || (dest & 3) ||
+ len > tdc->tdma->chip_data->max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "Dma length/memory address is not supported\n");
+ kfree(dma_desc);
+ return NULL;
+ }
+
+ dma_desc->bytes_requested += len;
+ dma_desc->ch_regs.src_ptr = 0;
+ dma_desc->ch_regs.dst_ptr = dest;
+ dma_desc->ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
+ dma_desc->ch_regs.fixed_pattern = value;
+ /* Word count reg takes value as (N +1) words */
+ dma_desc->ch_regs.wcount = ((len - 4) >> 2);
+ dma_desc->ch_regs.csr = csr;
+ dma_desc->ch_regs.mmio_seq = 0;
+ dma_desc->ch_regs.mc_seq = mc_seq;
+
+ tdc->dma_desc = dma_desc;
+
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ unsigned long csr, mc_seq;
+
+ /* Set dma mode to memory to memory transfer */
+ csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Enable the dma interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+ /* Configure default priority weight for the channel */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) |
+ (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
+
+ /* Set the address wrapping */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program outstanding MC requests */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Set burst size */
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+
+ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_requested = 0;
+ dma_desc->bytes_transferred = 0;
+
+ if ((len & 3) || (src & 3) || (dest & 3) ||
+ len > tdc->tdma->chip_data->max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "Dma length/memory address is not supported\n");
+ kfree(dma_desc);
+ return NULL;
+ }
+
+ dma_desc->bytes_requested += len;
+ dma_desc->ch_regs.src_ptr = src;
+ dma_desc->ch_regs.dst_ptr = dest;
+ dma_desc->ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32));
+ dma_desc->ch_regs.high_addr_ptr |=
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
+ /* Word count reg takes value as (N +1) words */
+ dma_desc->ch_regs.wcount = ((len - 4) >> 2);
+ dma_desc->ch_regs.csr = csr;
+ dma_desc->ch_regs.mmio_seq = 0;
+ dma_desc->ch_regs.mc_seq = mc_seq;
+
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long csr, mc_seq, apb_ptr = 0, mmio_seq = 0;
+ enum dma_slave_buswidth slave_bw;
+ struct tegra_dma_desc *dma_desc;
+ struct scatterlist *sg;
+ u32 burst_size;
+ unsigned int i;
+ int ret;
+
+ if (!tdc->config_init) {
+ dev_err(tdc2dev(tdc), "dma channel is not configured\n");
+ return NULL;
+ }
+ if (sg_len < 1) {
+ dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
+ return NULL;
+ }
+
+ ret = tegra_dma_reserve_sid(tdc, direction);
+ if (ret)
+ return NULL;
+
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
+ &burst_size, &slave_bw);
+ if (ret < 0)
+ return NULL;
+
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Program the slave id in requestor select */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Configure default priority weight for the channel*/
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ /* Enable the dma interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping on both MC and MMIO side */
+
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
+
+ /* Program 2 MC outstanding requests by default. */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+
+ /* Setting MC burst size depending on MMIO burst size */
+ if (burst_size == 64)
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+ else
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
+
+ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_requested = 0;
+ dma_desc->bytes_transferred = 0;
+
+ /* Make transfer requests */
+ for_each_sg(sgl, sg, sg_len, i) {
+ u32 len;
+ dma_addr_t mem;
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((len & 3) || (mem & 3) ||
+ len > tdc->tdma->chip_data->max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "Dma length/memory address is not supported\n");
+ kfree(dma_desc);
+ return NULL;
+ }
+
+ mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
+ dma_desc->bytes_requested += len;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ dma_desc->ch_regs.src_ptr = mem;
+ dma_desc->ch_regs.dst_ptr = apb_ptr;
+ dma_desc->ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
+ } else if (direction == DMA_DEV_TO_MEM) {
+ dma_desc->ch_regs.src_ptr = apb_ptr;
+ dma_desc->ch_regs.dst_ptr = mem;
+ dma_desc->ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
+ }
+
+ /*
+ * Word count register takes input in words. Writing a value
+ * of N into word count register means a req of (N+1) words.
+ */
+ dma_desc->ch_regs.wcount = ((len - 4) >> 2);
+ dma_desc->ch_regs.csr = csr;
+ dma_desc->ch_regs.mmio_seq = mmio_seq;
+ dma_desc->ch_regs.mc_seq = mc_seq;
+ tdc->dma_desc = dma_desc;
+ }
+
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ dma_cookie_init(&tdc->vc.chan);
+ tdc->config_init = false;
+ return 0;
+}
+
+static void tegra_dma_chan_synchronize(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ vchan_synchronize(&tdc->vc);
+}
+
+static void tegra_dma_free_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+
+ dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
+
+ if (tdc->busy)
+ tegra_dma_terminate_all(dc);
+
+ tegra_dma_chan_synchronize(dc);
+
+ tasklet_kill(&tdc->vc.task);
+ raw_spin_lock_irqsave(&tdc->lock, flags);
+ tdc->config_init = false;
+ tdc->slave_id = -1;
+ tdc->sid_reserved = false;
+ tdc->sid_dir = DMA_TRANS_NONE;
+
+ raw_spin_unlock_irqrestore(&tdc->lock, flags);
+}
+
+static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct tegra_dma *tdma = ofdma->of_dma_data;
+ struct tegra_dma_channel *tdc;
+ struct dma_chan *chan;
+
+ chan = dma_get_any_slave_channel(&tdma->dma_dev);
+ if (!chan)
+ return NULL;
+
+ tdc = to_tegra_dma_chan(chan);
+ tdc->slave_id = dma_spec->args[0];
+
+ return chan;
+}
+
+static const struct tegra_dma_chip_data tegra186_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = false,
+};
+
+static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = true,
+};
+
+static const struct of_device_id tegra_dma_of_match[] = {
+ {
+ .compatible = "nvidia,tegra186-gpcdma",
+ .data = &tegra186_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra194-gpcdma",
+ .data = &tegra194_dma_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
+
+static int tegra_dma_program_sid(struct tegra_dma_channel *tdc,
+ int chan, int stream_id)
+{
+ unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+
+ reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK);
+ reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
+
+ reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id);
+ reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id);
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val);
+ return 0;
+}
+
+static int tegra_dma_probe(struct platform_device *pdev)
+{
+ const struct tegra_dma_chip_data *cdata = NULL;
+ struct iommu_fwspec *iommu_spec;
+ unsigned int stream_id, i;
+ struct tegra_dma *tdma;
+ struct resource *res;
+ int ret;
+
+ cdata = of_device_get_match_data(&pdev->dev);
+
+ tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
+ sizeof(struct tegra_dma_channel), GFP_KERNEL);
+ if (!tdma)
+ return -ENOMEM;
+
+ tdma->dev = &pdev->dev;
+ tdma->chip_data = cdata;
+ platform_set_drvdata(pdev, tdma);
+
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+
+ tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma");
+ if (IS_ERR(tdma->rst)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst),
+ "Missing controller reset\n");
+ return PTR_ERR(tdma->rst);
+ }
+ reset_control_reset(tdma->rst);
+
+ tdma->dma_dev.dev = &pdev->dev;
+
+ iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
+ if (!iommu_spec) {
+ dev_err(&pdev->dev, "Missing iommu stream-id\n");
+ return -EINVAL;
+ }
+ stream_id = iommu_spec->ids[0] & 0xffff;