bad69a5
From caf38f4702a75c7ba13d5d80d902812c5faa8501 Mon Sep 17 00:00:00 2001
bad69a5
From: Joel Fernandes <joelf@ti.com>
bad69a5
Date: Thu, 27 Jun 2013 20:18:52 -0500
bad69a5
Subject: [PATCH 10/13] DMA: EDMA: Add support for Cyclic DMA
bad69a5
bad69a5
Using the PaRAM configuration function that we split for reuse by the
bad69a5
different DMA types, we implement Cyclic DMA support.
bad69a5
For the cyclic case, we pass different configuration paramters to this
bad69a5
function, and add all the Cyclic-specific code separately.
bad69a5
Callbacks are handled transparently as usual by the virt-dma layer.
bad69a5
Linking is handled the same way as the slave SG case.
bad69a5
bad69a5
Signed-off-by: Joel Fernandes <joelf@ti.com>
bad69a5
---
bad69a5
 drivers/dma/edma.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
bad69a5
 1 file changed, 85 insertions(+)
bad69a5
bad69a5
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
bad69a5
index 87b7e2b..cec9a12 100644
bad69a5
--- a/drivers/dma/edma.c
bad69a5
+++ b/drivers/dma/edma.c
bad69a5
@@ -321,6 +321,88 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
bad69a5
 	return absync;
bad69a5
 }
bad69a5
 
bad69a5
+static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
bad69a5
+	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
bad69a5
+	size_t period_len, enum dma_transfer_direction direction, unsigned long flags,
bad69a5
+	void *context)
bad69a5
+{
bad69a5
+	struct edma_chan *echan = to_edma_chan(chan);
bad69a5
+	struct device *dev = chan->device->dev;
bad69a5
+	struct edma_desc *edesc;
bad69a5
+	dma_addr_t src_addr, dst_addr;
bad69a5
+	enum dma_slave_buswidth dev_width;
bad69a5
+	u32 burst;
bad69a5
+	int i, ret, nr_periods;
bad69a5
+
bad69a5
+	if (unlikely(!echan || !buf_len || !period_len))
bad69a5
+		return NULL;
bad69a5
+
bad69a5
+	if (direction == DMA_DEV_TO_MEM) {
bad69a5
+		src_addr = echan->cfg.src_addr;
bad69a5
+		dst_addr = buf_addr;
bad69a5
+		dev_width = echan->cfg.src_addr_width;
bad69a5
+		burst = echan->cfg.src_maxburst;
bad69a5
+	} else if (direction == DMA_MEM_TO_DEV) {
bad69a5
+		src_addr = buf_addr;
bad69a5
+		dst_addr = echan->cfg.dst_addr;
bad69a5
+		dev_width = echan->cfg.dst_addr_width;
bad69a5
+		burst = echan->cfg.dst_maxburst;
bad69a5
+	} else {
bad69a5
+		dev_err(dev, "%s: bad direction?\n", __func__);
bad69a5
+		return NULL;
bad69a5
+	}
bad69a5
+
bad69a5
+	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
bad69a5
+		dev_err(dev, "Undefined slave buswidth\n");
bad69a5
+		return NULL;
bad69a5
+	}
bad69a5
+
bad69a5
+	if(unlikely(period_len % buf_len)) {
bad69a5
+		dev_err(dev, "Period should be multiple of Buf length\n");
bad69a5
+		return NULL;
bad69a5
+	}
bad69a5
+
bad69a5
+	nr_periods = period_len / buf_len;
bad69a5
+
bad69a5
+	edesc = kzalloc(sizeof(*edesc) + nr_periods *
bad69a5
+		sizeof(edesc->pset[0]), GFP_ATOMIC);
bad69a5
+	if (!edesc) {
bad69a5
+		dev_dbg(dev, "Failed to allocate a descriptor\n");
bad69a5
+		return NULL;
bad69a5
+	}
bad69a5
+
bad69a5
+	edesc->pset_nr = nr_periods;
bad69a5
+
bad69a5
+	for(i = 0; i < nr_periods; i++) {
bad69a5
+		/* Allocate a PaRAM slot, if needed */
bad69a5
+		if (echan->slot[i] < 0) {
bad69a5
+			echan->slot[i] =
bad69a5
+				edma_alloc_slot(EDMA_CTLR(echan->ch_num),
bad69a5
+						EDMA_SLOT_ANY);
bad69a5
+			if (echan->slot[i] < 0) {
bad69a5
+				dev_err(dev, "Failed to allocate slot\n");
bad69a5
+				return NULL;
bad69a5
+			}
bad69a5
+		}
bad69a5
+
bad69a5
+		if (direction == DMA_DEV_TO_MEM)
bad69a5
+			dst_addr += period_len;
bad69a5
+		else
bad69a5
+			src_addr += period_len;
bad69a5
+
bad69a5
+		ret = edma_config_pset(chan, &edesc->pset[i], src_addr, dst_addr,
bad69a5
+					      burst, dev_width, period_len, direction);
bad69a5
+		if(ret < 0)
bad69a5
+			return NULL;
bad69a5
+
bad69a5
+		edesc->absync = ret;
bad69a5
+		if (i == nr_periods - 1)
bad69a5
+			edesc->pset[i].opt |= TCINTEN;
bad69a5
+	}
bad69a5
+	/* TODO tx_flags (last parameter) needs to be investigated...\n" */
bad69a5
+	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, 0);
bad69a5
+}
bad69a5
+
bad69a5
 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
bad69a5
 	struct dma_chan *chan, struct scatterlist *sgl,
bad69a5
 	unsigned int sg_len, enum dma_transfer_direction direction,
bad69a5
@@ -424,6 +506,8 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
bad69a5
 		edesc = echan->edesc;
bad69a5
 		if (edesc) {
bad69a5
 			edma_execute(echan);
bad69a5
+			/* Note: that desc->callback must be setup by EDMA users so that
bad69a5
+			the virt-dma layer calls their callback on vchan_cookie_complete() */
bad69a5
 			vchan_cookie_complete(&edesc->vdesc);
bad69a5
 		}
bad69a5
 
bad69a5
@@ -605,6 +689,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
bad69a5
 			  struct device *dev)
bad69a5
 {
bad69a5
 	dma->device_prep_slave_sg = edma_prep_slave_sg;
bad69a5
+	dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
bad69a5
 	dma->device_alloc_chan_resources = edma_alloc_chan_resources;
bad69a5
 	dma->device_free_chan_resources = edma_free_chan_resources;
bad69a5
 	dma->device_issue_pending = edma_issue_pending;
bad69a5
-- 
bad69a5
1.8.2.1
bad69a5