[PATCH] dmaengine: ti: edma: Support for interleaved mem to mem transfer

From: Peter Ujfalusi
Date: Fri Feb 07 2020 - 09:20:12 EST


Add basic interleaved support via EDMA.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@xxxxxx>
---
drivers/dma/ti/edma.c | 80 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 80 insertions(+)

diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 03a7f647f7b2..c291e72260bd 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1275,6 +1275,82 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

+static struct dma_async_tx_descriptor *
+edma_prep_dma_interleaved(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long tx_flags)
+{
+ struct device *dev = chan->device->dev;
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct edmacc_param *param;
+ struct edma_desc *edesc;
+ size_t src_icg, dst_icg;
+ int src_bidx, dst_bidx;
+
+ /* Slave mode is not supported */
+ if (is_slave_direction(xt->dir))
+ return NULL;
+
+ if (xt->frame_size != 1 || xt->numf == 0)
+ return NULL;
+
+ if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
+ return NULL;
+
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+ if (src_icg) {
+ src_bidx = src_icg + xt->sgl[0].size;
+ } else if (xt->src_inc) {
+ src_bidx = xt->sgl[0].size;
+ } else {
+ dev_err(dev, "%s: SRC constant addressing is not supported\n",
+ __func__);
+ return NULL;
+ }
+
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+ if (dst_icg) {
+ dst_bidx = dst_icg + xt->sgl[0].size;
+ } else if (xt->dst_inc) {
+ dst_bidx = xt->sgl[0].size;
+ } else {
+ dev_err(dev, "%s: DST constant addressing is not supported\n",
+ __func__);
+ return NULL;
+ }
+
+ if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
+ return NULL;
+
+ edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
+ if (!edesc)
+ return NULL;
+
+ edesc->direction = DMA_MEM_TO_MEM;
+ edesc->echan = echan;
+ edesc->pset_nr = 1;
+
+ param = &edesc->pset[0].param;
+
+ param->src = xt->src_start;
+ param->dst = xt->dst_start;
+ param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
+ param->ccnt = 1;
+ param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ param->src_dst_cidx = 0;
+ param->link_bcntrld = 0xffffffff;
+
+ param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ param->opt |= ITCCHEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ param->opt |= TCINTEN;
+ else
+ edesc->polled = true;
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
@@ -1917,7 +1993,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
"Legacy memcpy is enabled, things might not work\n");

dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
s_ddev->directions = BIT(DMA_MEM_TO_MEM);
}

@@ -1953,8 +2031,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)

dma_cap_zero(m_ddev->cap_mask);
dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);

m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
m_ddev->device_free_chan_resources = edma_free_chan_resources;
m_ddev->device_issue_pending = edma_issue_pending;
--
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki