Re: [PATCH v3] dmaengine: add CSR SiRFprimaII DMAC driver

From: Barry Song
Date: Mon Oct 17 2011 - 10:18:57 EST


2011/10/17 Vinod Koul <vinod.koul@xxxxxxxxx>:
> On Mon, 2011-10-17 at 02:36 -0700, Barry Song wrote:
>> From: Rongjun Ying <Rongjun.Ying@xxxxxxx>
>>
>> Cc: Jassi Brar <jaswinder.singh@xxxxxxxxxx>
>> Cc: Arnd Bergmann <arnd@xxxxxxxx>
>> Cc: Linus Walleij <linus.walleij@xxxxxxxxxx>
>> Signed-off-by: Rongjun Ying <rongjun.ying@xxxxxxx>
>> Signed-off-by: Barry Song <Barry.Song@xxxxxxx>
>> ---
>> Â-v3:
>> Âuse new dma_transfer_direction API from Jassi and Vinod;
>> Âuse new dma_interleaved_template API from Jassi Brar;
>> Âfix comment minor issues from Arnd and Vinod in v2;
>> Âadd prima2 loop DMA mode support.
>>
>> ÂMAINTAINERS Â Â Â Â Â Â| Â Â1 +
>> Âdrivers/dma/Kconfig  Â|  Â7 +
>> Âdrivers/dma/Makefile  |  Â1 +
>> Âdrivers/dma/sirf-dma.c | Â685 ++++++++++++++++++++++++++++++++++++++++++++++++
>> Â4 files changed, 694 insertions(+), 0 deletions(-)
>> Âcreate mode 100644 drivers/dma/sirf-dma.c
>>
>> diff --git a/MAINTAINERS b/MAINTAINERS
>> index 5483e0c..adcb6ce 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -739,6 +739,7 @@ M: Â Â Â ÂBarry Song <baohua.song@xxxxxxx>
>> ÂL: Â linux-arm-kernel@xxxxxxxxxxxxxxxxxxx (moderated for non-subscribers)
>> ÂS: Â Maintained
>> ÂF: Â arch/arm/mach-prima2/
>> +F: Â drivers/dma/sirf-dma*
>>
>> ÂARM/EBSA110 MACHINE SUPPORT
>> ÂM: Â Russell King <linux@xxxxxxxxxxxxxxxx>
>> +++ b/drivers/dma/sirf-dma.c
>> @@ -0,0 +1,685 @@
>> +/*
>> + * DMA controller driver for CSR SiRFprimaII
>> + *
>> + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
>> + *
>> + * Licensed under GPLv2 or later.
> There is a mismatch between this and MODULE_LICENSE()
>> +
>> +
>> +struct sirfsoc_dma_chan {
>> +   struct dma_chan         chan;
>> +   struct list_head        Âfree;
>> +   struct list_head        Âprepared;
>> +   struct list_head        Âqueued;
>> +   struct list_head        Âactive;
>> +   struct list_head        Âcompleted;
>> +   dma_cookie_t          Âcompleted_cookie;
>> +   unsigned long          happened_cyclic;
>> +   unsigned long          completed_cyclic;
>> +
>> + Â Â /* Lock for this structure */
>> +   spinlock_t           Âlock;
>> +
>> +   int       mode;
> alignment pls...
>
>> +
>> +/* Execute all queued DMA descriptors */
>> +static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
>> +{
>> + Â Â struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
>> + Â Â int cid = schan->chan.chan_id;
>> + Â Â struct sirfsoc_dma_desc *sdesc = NULL;
>> +
>> + Â Â sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
>> + Â Â Â Â Â Â node);
>> + Â Â /* Move the first queued descriptor to active list */
>> + Â Â list_move_tail(&schan->queued, &schan->active);
> shouldn't lock be held before this, and if this function is called with
> lock held, would help to mention that explicitly
>> +
>> + Â Â /* Start the DMA transfer */
>> + Â Â writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
>> + Â Â writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
>> + Â Â Â Â Â Â (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
>> + Â Â Â Â Â Â sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
>> + Â Â writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
>> + Â Â writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
>> + Â Â writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | (1 << cid),
>> + Â Â Â Â Â Â sdma->base + SIRFSOC_DMA_INT_EN);
>> + Â Â writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
>> +
>> + Â Â if (sdesc->cyclic) {
>> + Â Â Â Â Â Â writel((1 << cid) | 1 << (cid + 16) |
>> + Â Â Â Â Â Â Â Â Â Â readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
>> + Â Â Â Â Â Â Â Â Â Â sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
>> + Â Â Â Â Â Â schan->happened_cyclic = schan->completed_cyclic = 0;
>> + Â Â }
> any reason why we have mixed use of writel_relaxes and writel?
> Shouldn't all the DMA register writes be done only using writel?

Arnd comment this in v2.

>
>> +}
>> +
>> +
>> +static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
>> + Â Â struct dma_chan *chan, struct dma_interleaved_template *xt)
>> +{
>> + Â Â struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
>> + Â Â struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> + Â Â struct sirfsoc_dma_desc *sdesc = NULL;
>> + Â Â unsigned long iflags;
>> + Â Â int ret;
>> +
>> + Â Â if ((xt->dir != MEM_TO_DEV) || (xt->dir != DEV_TO_MEM)) {
>> + Â Â Â Â Â Â ret = -EINVAL;
>> + Â Â Â Â Â Â goto err_dir;
>> + Â Â }
>> +
>> + Â Â /* Get free descriptor */
>> + Â Â spin_lock_irqsave(&schan->lock, iflags);
>> + Â Â if (!list_empty(&schan->free)) {
>> + Â Â Â Â Â Â sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
>> + Â Â Â Â Â Â Â Â Â Â node);
>> + Â Â Â Â Â Â list_del(&sdesc->node);
>> + Â Â }
>> + Â Â spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> + Â Â if (!sdesc) {
>> + Â Â Â Â Â Â /* try to free completed descriptors */
>> + Â Â Â Â Â Â sirfsoc_dma_process_completed(sdma);
>> + Â Â Â Â Â Â ret = 0;
>> + Â Â Â Â Â Â goto no_desc;
>> + Â Â }
>> +
>> + Â Â /* Place descriptor in prepared list */
>> + Â Â spin_lock_irqsave(&schan->lock, iflags);
>> + Â Â if ((xt->frame_size == 1) && (xt->numf > 0)) {
> what does this mean?
>> + Â Â Â Â Â Â sdesc->cyclic = 0;
>> + Â Â Â Â Â Â sdesc->xlen = xt->sgl[0].size / 4;
>> + Â Â Â Â Â Â sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / 4;
> whats so magical about 4?

the xlen and dma_width is in 4 byes boundary. xlen =1 means 4 bytes
will be transferred every line.

>> + Â Â Â Â Â Â sdesc->ylen = xt->numf - 1;
>> + Â Â Â Â Â Â if (xt->dir == MEM_TO_DEV) {
>> + Â Â Â Â Â Â Â Â Â Â sdesc->addr = xt->src_start;
>> + Â Â Â Â Â Â Â Â Â Â sdesc->dir = 1;
>> + Â Â Â Â Â Â } else {
>> + Â Â Â Â Â Â Â Â Â Â sdesc->addr = xt->dst_start;
>> + Â Â Â Â Â Â Â Â Â Â sdesc->dir = 0;
>> + Â Â Â Â Â Â }
>> +
>> + Â Â Â Â Â Â list_add_tail(&sdesc->node, &schan->prepared);
>> + Â Â } else {
>> + Â Â Â Â Â Â pr_err("sirfsoc DMA Invalid xfer\n");
>> + Â Â Â Â Â Â ret = -EINVAL;
>> + Â Â Â Â Â Â goto err_xfer;
>> + Â Â }
>> + Â Â spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> + Â Â return &sdesc->desc;
>> +err_xfer:
>> + Â Â spin_unlock_irqrestore(&schan->lock, iflags);
>> +no_desc:
>> +err_dir:
>> + Â Â return ERR_PTR(ret);
>> +}
>> +
>> +static struct dma_async_tx_descriptor *
>> +sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
>> + Â Â size_t buf_len, size_t period_len,
>> + Â Â enum dma_transfer_direction direction)
>> +{
>> + Â Â struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> + Â Â struct sirfsoc_dma_desc *sdesc = NULL;
>> + Â Â unsigned long iflags;
>> +
>> + Â Â /*
>> + Â Â Â* we only support cycle transfer with 2 period
>> + Â Â Â* If the X-length is set to 0, it would be the loop mode.
>> + Â Â Â* The DMA address keeps increasing until reaching the end of a loop
>> + Â Â Â* area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
>> + Â Â Â* the DMA address goes back to the beginning of this area.
>> + Â Â Â* In loop mode, the DMA data region is divided into two parts, BUFA
>> + Â Â Â* and BUFB. DMA controller generates interrupts twice in each loop:
>> + Â Â Â* when the DMA address reaches the end of BUFA or the end of the
>> + Â Â Â* BUFB
>> + Â Â Â*/
>> + Â Â if (buf_len != Â2 * period_len)
>> + Â Â Â Â Â Â return ERR_PTR(-EINVAL);
>> +
>> + Â Â /* Get free descriptor */
>> + Â Â spin_lock_irqsave(&schan->lock, iflags);
>> + Â Â if (!list_empty(&schan->free)) {
>> + Â Â Â Â Â Â sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
>> + Â Â Â Â Â Â Â Â Â Â node);
>> + Â Â Â Â Â Â list_del(&sdesc->node);
>> + Â Â }
>> + Â Â spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> + Â Â if (!sdesc)
>> + Â Â Â Â Â Â return 0;
>> +
>> + Â Â /* Place descriptor in prepared list */
>> + Â Â spin_lock_irqsave(&schan->lock, iflags);
>> + Â Â sdesc->addr = addr;
>> + Â Â sdesc->cyclic = 1;
>> + Â Â sdesc->xlen = 0;
>> + Â Â sdesc->ylen = buf_len / 4 - 1;
>> + Â Â sdesc->width = 1;
>> + Â Â list_add_tail(&sdesc->node, &schan->prepared);
>> + Â Â spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> + Â Â return &sdesc->desc;
>> +}
> is this interleave cyclic dma or plain sg?

this is not interleave.

>> +
>> +/*
>> + * The DMA controller consists of 16 independent DMA channels.
>> + * Each channel is allocated to a different function
>> + */
>> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
>> +{
>> + Â Â unsigned int ch_nr = (unsigned int) chan_id;
>> +
>> + Â Â if (ch_nr == chan->chan_id +
>> + Â Â Â Â Â Â chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
>> + Â Â Â Â Â Â return true;
> pls fix indent here
>> +
>> + Â Â return false;
>> +}
>> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
>> +
>> +static int __devinit sirfsoc_dma_probe(struct platform_device *op)
>> +{
>> + Â Â struct device_node *dn = op->dev.of_node;
>> + Â Â struct device *dev = &op->dev;
>> + Â Â struct dma_device *dma;
>> + Â Â struct sirfsoc_dma *sdma;
>> + Â Â struct sirfsoc_dma_chan *schan;
>> + Â Â struct resource res;
>> + Â Â ulong regs_start, regs_size;
>> + Â Â u32 id;
>> + Â Â int retval, i;
>> +
>> + Â Â sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
>> + Â Â if (!sdma) {
>> + Â Â Â Â Â Â dev_err(dev, "Memory exhausted!\n");
>> + Â Â Â Â Â Â return -ENOMEM;
>> + Â Â }
>> +
>> + Â Â if (of_property_read_u32(dn, "cell-index", &id)) {
>> + Â Â Â Â Â Â dev_err(dev, "Fail to get DMAC index\n");
> and you leak memory!!
>> + Â Â Â Â Â Â return -ENODEV;
>> + Â Â }
>> +
>> + Â Â sdma->irq = irq_of_parse_and_map(dn, 0);
>> + Â Â if (sdma->irq == NO_IRQ) {
>> + Â Â Â Â Â Â dev_err(dev, "Error mapping IRQ!\n");
> and again...
>> + Â Â Â Â Â Â return -EINVAL;
>> + Â Â }
>> +
>> + Â Â retval = of_address_to_resource(dn, 0, &res);
>> + Â Â if (retval) {
>> + Â Â Â Â Â Â dev_err(dev, "Error parsing memory region!\n");
> and again...
>> + Â Â Â Â Â Â return retval;
>> + Â Â }
>> +
>> + Â Â regs_start = res.start;
>> + Â Â regs_size = resource_size(&res);
>> +
>> + Â Â if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
>> + Â Â Â Â Â Â dev_err(dev, "Error requesting memory region!\n");
>> + Â Â Â Â Â Â return -EBUSY;
>> + Â Â }
>> +
>> + Â Â sdma->base = devm_ioremap(dev, regs_start, regs_size);
>> + Â Â if (!sdma->base) {
>> + Â Â Â Â Â Â dev_err(dev, "Error mapping memory region!\n");
>> + Â Â Â Â Â Â return -ENOMEM;
>> + Â Â }
>> +
>> + Â Â retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
>> + Â Â Â Â Â Â sdma);
>> + Â Â if (retval) {
>> + Â Â Â Â Â Â dev_err(dev, "Error requesting IRQ!\n");
>> + Â Â Â Â Â Â return -EINVAL;
>> + Â Â }
>> +
>> + Â Â dma = &sdma->dma;
>> + Â Â dma->dev = dev;
>> + Â Â dma->chancnt = SIRFSOC_DMA_CHANNELS;
>> +
>> + Â Â dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
>> + Â Â dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
>> + Â Â dma->device_issue_pending = sirfsoc_dma_issue_pending;
>> + Â Â dma->device_control = sirfsoc_dma_control;
>> + Â Â dma->device_tx_status = sirfsoc_dma_tx_status;
>> + Â Â dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
>> + Â Â dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
>> +
>> + Â Â INIT_LIST_HEAD(&dma->channels);
>> + Â Â dma_cap_set(DMA_SLAVE, dma->cap_mask);
>> + Â Â dma_cap_set(DMA_CYCLIC, dma->cap_mask);
>> + Â Â dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
>> + Â Â dma_cap_set(DMA_PRIVATE, dma->cap_mask);
>> +
>> + Â Â for (i = 0; i < dma->chancnt; i++) {
>> + Â Â Â Â Â Â schan = &sdma->channels[i];
>> +
>> + Â Â Â Â Â Â schan->chan.device = dma;
>> + Â Â Â Â Â Â schan->chan.cookie = 1;
>> + Â Â Â Â Â Â schan->completed_cookie = schan->chan.cookie;
>> +
>> + Â Â Â Â Â Â INIT_LIST_HEAD(&schan->free);
>> + Â Â Â Â Â Â INIT_LIST_HEAD(&schan->prepared);
>> + Â Â Â Â Â Â INIT_LIST_HEAD(&schan->queued);
>> + Â Â Â Â Â Â INIT_LIST_HEAD(&schan->active);
>> + Â Â Â Â Â Â INIT_LIST_HEAD(&schan->completed);
>> +
>> + Â Â Â Â Â Â spin_lock_init(&schan->lock);
>> + Â Â Â Â Â Â list_add_tail(&schan->chan.device_node, &dma->channels);
>> + Â Â }
>> +
>> + Â Â tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
>> +
>> + Â Â /* Register DMA engine */
>> + Â Â dev_set_drvdata(dev, sdma);
>> + Â Â retval = dma_async_device_register(dma);
>> + Â Â if (retval) {
>> + Â Â Â Â Â Â devm_free_irq(dev, sdma->irq, sdma);
>> + Â Â Â Â Â Â irq_dispose_mapping(sdma->irq);
>> + Â Â }
>> +
>> + Â Â return retval;
>> +}
> the error path handling in this function is totally non existent!!

sorry. i must have maken these idiot mistakens due to copying from the
probe of drivers/dma/mpc512x_dma.c.

>> +
>> +static int __devexit sirfsoc_dma_remove(struct platform_device *op)
>> +{
>> + Â Â struct device *dev = &op->dev;
>> + Â Â struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
>> +
>> + Â Â dma_async_device_unregister(&sdma->dma);
>> + Â Â devm_free_irq(dev, sdma->irq, sdma);
>> + Â Â irq_dispose_mapping(sdma->irq);
> who will free mem allocated in probe?

also due to copying from drivers/dma/mpc512x_dma.c. sorry.

>> +
>> + Â Â return 0;
>> +}
>> +
> --
> ~Vinod

-barry
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/