From 84adccfb8cd2a6b8237da6752668ba25cd90c20b Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 24 Mar 2011 11:32:15 +0530 Subject: dmaengine/dw_dmac fix: dwc_scan_descriptors must compare first desc address also with llp dwc_scan_descriptors scans all descriptors from active_list in case transfer is not completed. It compares first_desc->lli.llp, and then all childrens of its tx_list. But it doesn't compare its own address, i.e. first_desc->txd.phys, as this is what we have initially programmed into the controller register. So this causes dma to stop and finish a transfer, which was never started. And thus fail. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 9c25c7d..b15c32c 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -304,6 +304,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { + /* check first descriptors addr */ + if (desc->txd.phys == llp) + return; + + /* check first descriptors llp */ if (desc->lli.llp == llp) /* This one is currently in progress */ return; -- cgit v1.1 From e2ec771a99a5cf231c9dea4da26238bf073e1e9c Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 27 Mar 2011 01:26:52 +0800 Subject: dma: use BUG_ON correctly in iop-adma.c, v4 This patch makes BUG_ON() usage correct in drivers/dma/iop-adma.c. Cc: Dan Williams Cc: Vinod Koul Signed-off-by: Coly Li Signed-off-by: Vinod Koul --- drivers/dma/iop-adma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c6b01f5..e03f811 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, if (unlikely(!len)) return NULL; - BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); + BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", __func__, len); @@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, if (unlikely(!len)) return NULL; - BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); + BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", __func__, len); @@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, if (unlikely(!len)) return NULL; - BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); + BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u flags: %lx\n", -- cgit v1.1 From 7912d30007d0c958bcf11cd5ce19f77856cf041b Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 27 Mar 2011 01:26:53 +0800 Subject: dma: use BUG_ON correctly in mv_xor.c, v4 This patch makes BUG_ON() usage correct in drivers/dma/mv_xor.c Cc: Vinod Koul Cc: Dan Williams Signed-off-by: Coly Li Signed-off-by: Vinod Koul --- drivers/dma/mv_xor.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index a25f5f6..954e334 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; - BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); + BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memcpy_slot_count(len); @@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; - BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); + BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memset_slot_count(len); @@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; - BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); + BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan->device->common.dev, "%s src_cnt: %d len: dest %x %u flags: %ld\n", -- cgit v1.1 From 427cdf19b97e509e21e5d347e18d8b0b34723dfc Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 27 Mar 2011 01:26:54 +0800 Subject: dma: use BUG_ON correctly in ppc4xx/adam.c, v4 This patch makes BUG_ON() usage correct in drivers/dma/ppc4xx/adam.c Cc: Vinod Koul Cc: Dan Williams Cc: Grant Likely Cc: Anatolij Gustschin Cc: Sean MacLennan Cc: Joe Perches Signed-off-by: Coly Li Signed-off-by: Vinod Koul --- drivers/dma/ppc4xx/adma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index cef5845..a2b62e9 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( if (unlikely(!len)) return NULL; - BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); + BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); spin_lock_bh(&ppc440spe_chan->lock); @@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( if (unlikely(!len)) return NULL; - BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); + BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); spin_lock_bh(&ppc440spe_chan->lock); @@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( dma_dest, dma_src, src_cnt)); if (unlikely(!len)) return NULL; - BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); + BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(ppc440spe_chan->device->common.dev, "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", @@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq( ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, dst, src, src_cnt)); BUG_ON(!len); - BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); + BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); BUG_ON(!src_cnt); if (src_cnt == 1 && dst[1] == src[0]) { -- cgit v1.1 From e2142df7ec7184ed4a77ada686bc1eb41075490f Mon Sep 17 00:00:00 2001 From: Kristen Carlson Accardi Date: Thu, 31 Mar 2011 11:02:43 -0700 Subject: intel_mid_dma: fix runtime pm issues Use the correct api in probe to enable runtime pm for this driver. Additionally, do not just call legacy suspend for runtime_suspend, as this duplicates some work the pci core does for you. Signed-off-by: Kristen Carlson Accardi Signed-off-by: Vinod Koul --- drivers/dma/intel_mid_dma.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 798f46a..f153adf 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, if (err) goto err_dma; - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; @@ -1322,6 +1321,9 @@ err_enable_device: static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) { struct middma_device *device = pci_get_drvdata(pdev); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_forbid(&pdev->dev); middma_shutdown(pdev); pci_dev_put(pdev); kfree(device); @@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci) static int dma_runtime_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - return dma_suspend(pci_dev, PMSG_SUSPEND); + struct middma_device *device = pci_get_drvdata(pci_dev); + + device->state = SUSPENDED; + return 0; } static int dma_runtime_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - return dma_resume(pci_dev); + struct middma_device *device = pci_get_drvdata(pci_dev); + + device->state = RUNNING; + iowrite32(REG_BIT0, device->dma_base + DMA_CFG); + return 0; } static int dma_runtime_idle(struct device *dev) -- cgit v1.1 From 364de77831213be20f7f33c39ca1c194593b5c11 Mon Sep 17 00:00:00 2001 From: Liu Yuan Date: Sat, 2 Apr 2011 14:20:47 +0800 Subject: drivers, pch_dma: Fix uninitialized var before use In the function pdc_desc_get(), var 'i' is not initialized before use. This patch fixes it. Signed-off-by: Liu Yuan Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 8d8fef1..6eebc62 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -403,7 +403,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) { struct pch_dma_desc *desc, *_d; struct pch_dma_desc *ret = NULL; - int i; + int i = 0; spin_lock(&pd_chan->lock); list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { -- cgit v1.1 From 9b3aa589eaa1366200062ce1f9cc7ddca8d1d578 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Sat, 30 Apr 2011 16:57:45 +0200 Subject: dmaengine: at_hdmac: modify way to use interrupts Now we use Buffer Transfer Completed interrupts. If we want a chained buffer completed information, we setup the ATC_IEN bit in CTRLB register in the lli. This is done by set_desc_eol() function and used by memcpy/slave_sg functions. Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 4 ++-- drivers/dma/at_hdmac_regs.h | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 3d7d705..13050e6 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -464,7 +464,7 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) for (i = 0; i < atdma->dma_common.chancnt; i++) { atchan = &atdma->chan[i]; - if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { + if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { if (pending & AT_DMA_ERR(i)) { /* Disable channel on AHB error */ dma_writel(atdma, CHDR, atchan->mask); @@ -549,7 +549,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, } ctrla = ATC_DEFAULT_CTRLA; - ctrlb = ATC_DEFAULT_CTRLB + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | ATC_SRC_ADDR_MODE_INCR | ATC_DST_ADDR_MODE_INCR | ATC_FC_MEM2MEM; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 495457e..8303306 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -309,8 +309,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on) struct at_dma *atdma = to_at_dma(atchan->chan_common.device); u32 ebci; - /* enable interrupts on buffer chain completion & error */ - ebci = AT_DMA_CBTC(atchan->chan_common.chan_id) + /* enable interrupts on buffer transfer completion & error */ + ebci = AT_DMA_BTC(atchan->chan_common.chan_id) | AT_DMA_ERR(atchan->chan_common.chan_id); if (on) dma_writel(atdma, EBCIER, ebci); @@ -347,7 +347,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) */ static void set_desc_eol(struct at_desc *desc) { - desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; + u32 ctrlb = desc->lli.ctrlb; + + ctrlb &= ~ATC_IEN; + ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; + + desc->lli.ctrlb = ctrlb; desc->lli.dscr = 0; } -- cgit v1.1 From 53830cc75974a199b6b654c062ff8c54c58caa0b Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Sat, 30 Apr 2011 16:57:46 +0200 Subject: dmaengine: at_hdmac: add cyclic DMA operation support Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 231 +++++++++++++++++++++++++++++++++++++++++--- drivers/dma/at_hdmac_regs.h | 14 ++- 2 files changed, 229 insertions(+), 16 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 13050e6..ed9d924 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) } /** + * atc_desc_chain - build chain adding a descripor + * @first: address of first descripor of the chain + * @prev: address of previous descripor of the chain + * @desc: descriptor to queue + * + * Called from prep_* functions + */ +static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, + struct at_desc *desc) +{ + if (!(*first)) { + *first = desc; + } else { + /* inform the HW lli about chaining */ + (*prev)->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &(*first)->tx_list); + } + *prev = desc; +} + +/** * atc_assign_cookie - compute and assign new cookie * @atchan: channel we work on * @desc: descriptor to asign cookie for @@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) static void atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) { - dma_async_tx_callback callback; - void *param; struct dma_async_tx_descriptor *txd = &desc->txd; dev_vdbg(chan2dev(&atchan->chan_common), "descriptor %u complete\n", txd->cookie); atchan->completed_cookie = txd->cookie; - callback = txd->callback; - param = txd->callback_param; /* move children to free_list */ list_splice_init(&desc->tx_list, &atchan->free_list); @@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) } } - /* - * The API requires that no submissions are done from a - * callback, so we don't need to drop the lock here - */ - if (callback) - callback(param); + /* for cyclic transfers, + * no need to replay callback function while stopping */ + if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { + dma_async_tx_callback callback = txd->callback; + void *param = txd->callback_param; + + /* + * The API requires that no submissions are done from a + * callback, so we don't need to drop the lock here + */ + if (callback) + callback(param); + } dma_run_dependencies(txd); } @@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan) atc_chain_complete(atchan, bad_desc); } +/** + * atc_handle_cyclic - at the end of a period, run callback function + * @atchan: channel used for cyclic operations + * + * Called with atchan->lock held and bh disabled + */ +static void atc_handle_cyclic(struct at_dma_chan *atchan) +{ + struct at_desc *first = atc_first_active(atchan); + struct dma_async_tx_descriptor *txd = &first->txd; + dma_async_tx_callback callback = txd->callback; + void *param = txd->callback_param; + + dev_vdbg(chan2dev(&atchan->chan_common), + "new cyclic period llp 0x%08x\n", + channel_readl(atchan, DSCR)); + + if (callback) + callback(param); +} /*-- IRQ & Tasklet ---------------------------------------------------*/ @@ -434,8 +480,10 @@ static void atc_tasklet(unsigned long data) } spin_lock(&atchan->lock); - if (test_and_clear_bit(0, &atchan->error_status)) + if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) atc_handle_error(atchan); + else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + atc_handle_cyclic(atchan); else atc_advance_work(atchan); @@ -469,7 +517,7 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) /* Disable channel on AHB error */ dma_writel(atdma, CHDR, atchan->mask); /* Give information to tasklet */ - set_bit(0, &atchan->error_status); + set_bit(ATC_IS_ERROR, &atchan->status); } tasklet_schedule(&atchan->tasklet); ret = IRQ_HANDLED; @@ -759,6 +807,148 @@ err_desc_get: return NULL; } +/** + * atc_dma_cyclic_check_values + * Check for too big/unaligned periods and unaligned DMA buffer + */ +static int +atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, + size_t period_len, enum dma_data_direction direction) +{ + if (period_len > (ATC_BTSIZE_MAX << reg_width)) + goto err_out; + if (unlikely(period_len & ((1 << reg_width) - 1))) + goto err_out; + if (unlikely(buf_addr & ((1 << reg_width) - 1))) + goto err_out; + if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) + goto err_out; + + return 0; + +err_out: + return -EINVAL; +} + +/** + * atc_dma_cyclic_fill_desc - Fill one period decriptor + */ +static int +atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, + unsigned int period_index, dma_addr_t buf_addr, + size_t period_len, enum dma_data_direction direction) +{ + u32 ctrla; + unsigned int reg_width = atslave->reg_width; + + /* prepare common CRTLA value */ + ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla + | ATC_DST_WIDTH(reg_width) + | ATC_SRC_WIDTH(reg_width) + | period_len >> reg_width; + + switch (direction) { + case DMA_TO_DEVICE: + desc->lli.saddr = buf_addr + (period_len * period_index); + desc->lli.daddr = atslave->tx_reg; + desc->lli.ctrla = ctrla; + desc->lli.ctrlb = ATC_DEFAULT_CTRLB + | ATC_DST_ADDR_MODE_FIXED + | ATC_SRC_ADDR_MODE_INCR + | ATC_FC_MEM2PER; + break; + + case DMA_FROM_DEVICE: + desc->lli.saddr = atslave->rx_reg; + desc->lli.daddr = buf_addr + (period_len * period_index); + desc->lli.ctrla = ctrla; + desc->lli.ctrlb = ATC_DEFAULT_CTRLB + | ATC_DST_ADDR_MODE_INCR + | ATC_SRC_ADDR_MODE_FIXED + | ATC_FC_PER2MEM; + break; + + default: + return -EINVAL; + } + + return 0; +} + +/** + * atc_prep_dma_cyclic - prepare the cyclic DMA transfer + * @chan: the DMA channel to prepare + * @buf_addr: physical DMA address where the buffer starts + * @buf_len: total number of bytes for the entire buffer + * @period_len: number of bytes for each period + * @direction: transfer direction, to or from device + */ +static struct dma_async_tx_descriptor * +atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_data_direction direction) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma_slave *atslave = chan->private; + struct at_desc *first = NULL; + struct at_desc *prev = NULL; + unsigned long was_cyclic; + unsigned int periods = buf_len / period_len; + unsigned int i; + + dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", + direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", + buf_addr, + periods, buf_len, period_len); + + if (unlikely(!atslave || !buf_len || !period_len)) { + dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); + return NULL; + } + + was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); + if (was_cyclic) { + dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); + return NULL; + } + + /* Check for too big/unaligned periods and unaligned DMA buffer */ + if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, + period_len, direction)) + goto err_out; + + /* build cyclic linked list */ + for (i = 0; i < periods; i++) { + struct at_desc *desc; + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, + period_len, direction)) + goto err_desc_get; + + atc_desc_chain(&first, &prev, desc); + } + + /* lets make a cyclic list */ + prev->lli.dscr = first->txd.phys; + + /* First descriptor of the chain embedds additional information */ + first->txd.cookie = -EBUSY; + first->len = buf_len; + + return &first->txd; + +err_desc_get: + dev_err(chan2dev(chan), "not enough descriptors available\n"); + atc_desc_put(atchan, first); +err_out: + clear_bit(ATC_IS_CYCLIC, &atchan->status); + return NULL; +} + + static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { @@ -793,6 +983,9 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); + /* if channel dedicated to cyclic operations, free it */ + clear_bit(ATC_IS_CYCLIC, &atchan->status); + spin_unlock_bh(&atchan->lock); return 0; @@ -853,6 +1046,10 @@ static void atc_issue_pending(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "issue_pending\n"); + /* Not needed for cyclic transfers */ + if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + return; + spin_lock_bh(&atchan->lock); if (!atc_chan_is_enabled(atchan)) { atc_advance_work(atchan); @@ -959,6 +1156,7 @@ static void atc_free_chan_resources(struct dma_chan *chan) } list_splice_init(&atchan->free_list, &list); atchan->descs_allocated = 0; + atchan->status = 0; dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } @@ -1092,10 +1290,15 @@ static int __init at_dma_probe(struct platform_device *pdev) if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; + + if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) + atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; + + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || + dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) atdma->dma_common.device_control = atc_control; - } dma_writel(atdma, EN, AT_DMA_ENABLE); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 8303306..c79a9e0 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -181,12 +181,22 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) /*-- Channels --------------------------------------------------------*/ /** + * atc_status - information bits stored in channel status flag + * + * Manipulated with atomic operations. + */ +enum atc_status { + ATC_IS_ERROR = 0, + ATC_IS_CYCLIC = 24, +}; + +/** * struct at_dma_chan - internal representation of an Atmel HDMAC channel * @chan_common: common dmaengine channel object members * @device: parent device * @ch_regs: memory mapped register base * @mask: channel index in a mask - * @error_status: transmit error status information from irq handler + * @status: transmit status information from irq/prep* functions * to tasklet (use atomic operations) * @tasklet: bottom half to finish transaction work * @lock: serializes enqueue/dequeue operations to descriptors lists @@ -201,7 +211,7 @@ struct at_dma_chan { struct at_dma *device; void __iomem *ch_regs; u8 mask; - unsigned long error_status; + unsigned long status; struct tasklet_struct tasklet; spinlock_t lock; -- cgit v1.1 From cc52a10a048fc1fbe4ffba58c2f0afc79ae0f56f Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Sat, 30 Apr 2011 16:57:47 +0200 Subject: dmaengine: at_hdmac: debug information sg_len for prep_slave_sg Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ed9d924..8f50a0f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -687,7 +687,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct scatterlist *sg; size_t total_len = 0; - dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", + dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", + sg_len, direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", flags); -- cgit v1.1 From 2f432823ec6e693d7b934e805ce1838f41d66ce7 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Sat, 30 Apr 2011 16:57:48 +0200 Subject: dmaengine: at_hdmac: remove channel status testing in tasklet There is no need to test if channel is enabled in tasklet: - in error path, channel is disabled in interrupt routine - in normal path, this test is performed in sub functions to report a misuse of the engine. Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 8f50a0f..65bd52a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -472,13 +472,6 @@ static void atc_tasklet(unsigned long data) { struct at_dma_chan *atchan = (struct at_dma_chan *)data; - /* Channel cannot be enabled here */ - if (atc_chan_is_enabled(atchan)) { - dev_err(chan2dev(&atchan->chan_common), - "BUG: channel enabled in tasklet\n"); - return; - } - spin_lock(&atchan->lock); if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) atc_handle_error(atchan); -- cgit v1.1 From ae14d4b5e0a4ebc4e674831cbb97b73ba66dba08 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Sat, 30 Apr 2011 16:57:49 +0200 Subject: dmaengine: at_hdmac: specialize AHB interfaces to optimize transfers DMA controller has two AHB interfaces on the SOC internal matrix. It is more efficient to specialize each interface as the access to memory can introduce latencies that are not compatible with peripheral accesses requirements. Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 26 +++++++++++++++----------- drivers/dma/at_hdmac_regs.h | 4 ++++ 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 65bd52a..f52c9e3 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -37,8 +37,8 @@ #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) #define ATC_DEFAULT_CTRLA (0) -#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ - |ATC_DIF(1)) +#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ + |ATC_DIF(AT_DMA_MEM_IF)) /* * Initial number of descriptors to allocate for each channel. This could @@ -693,14 +693,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, reg_width = atslave->reg_width; ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; - ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; + ctrlb = ATC_IEN; switch (direction) { case DMA_TO_DEVICE: ctrla |= ATC_DST_WIDTH(reg_width); ctrlb |= ATC_DST_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_INCR - | ATC_FC_MEM2PER; + | ATC_FC_MEM2PER + | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); reg = atslave->tx_reg; for_each_sg(sgl, sg, sg_len, i) { struct at_desc *desc; @@ -741,7 +742,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrla |= ATC_SRC_WIDTH(reg_width); ctrlb |= ATC_DST_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_FIXED - | ATC_FC_PER2MEM; + | ATC_FC_PER2MEM + | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); reg = atslave->rx_reg; for_each_sg(sgl, sg, sg_len, i) { @@ -846,20 +848,22 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, desc->lli.saddr = buf_addr + (period_len * period_index); desc->lli.daddr = atslave->tx_reg; desc->lli.ctrla = ctrla; - desc->lli.ctrlb = ATC_DEFAULT_CTRLB - | ATC_DST_ADDR_MODE_FIXED + desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_INCR - | ATC_FC_MEM2PER; + | ATC_FC_MEM2PER + | ATC_SIF(AT_DMA_MEM_IF) + | ATC_DIF(AT_DMA_PER_IF); break; case DMA_FROM_DEVICE: desc->lli.saddr = atslave->rx_reg; desc->lli.daddr = buf_addr + (period_len * period_index); desc->lli.ctrla = ctrla; - desc->lli.ctrlb = ATC_DEFAULT_CTRLB - | ATC_DST_ADDR_MODE_INCR + desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_FIXED - | ATC_FC_PER2MEM; + | ATC_FC_PER2MEM + | ATC_SIF(AT_DMA_PER_IF) + | ATC_DIF(AT_DMA_MEM_IF); break; default: diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index c79a9e0..ae3056d 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -103,6 +103,10 @@ /* Bitfields in CTRLB */ #define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ #define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ + /* Specify AHB interfaces */ +#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */ +#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */ + #define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ #define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ #define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ -- cgit v1.1 From 711b9cea92554be6bd44f04f2485582d762fc441 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Sat, 7 May 2011 17:09:43 +0200 Subject: dmaengine/ste_dma40: fix introduced warnings The compiler nowadays moans about possibly non-assigned variable. Fix this by default-assigning 0. Signed-off-by: Philippe Langlais Signed-off-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index af955de..65d2535 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) { struct stedma40_platform_data *plat = chan->base->plat_data; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; - dma_addr_t addr; + dma_addr_t addr = 0; if (chan->runtime_addr) return chan->runtime_addr; -- cgit v1.1 From 543aabc7d295bfe2489f184259395e3467520d48 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 6 May 2011 19:56:51 +0200 Subject: dmaengine: at_hdmac: set residue as total len in atc_tx_status If transfer status is !=DMA_SUCCESS, return total transfer len as residue, instead of zero. Idea from dw_dmac patch by Viresh Kumar. Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index f52c9e3..ba0b5ec 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1026,7 +1026,12 @@ atc_tx_status(struct dma_chan *chan, spin_unlock_bh(&atchan->lock); - dma_set_tx_state(txstate, last_complete, last_used, 0); + if (ret != DMA_SUCCESS) + dma_set_tx_state(txstate, last_complete, last_used, + atc_first_active(atchan)->len); + else + dma_set_tx_state(txstate, last_complete, last_used, 0); + dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", cookie, last_complete ? last_complete : 0, last_used ? last_used : 0); -- cgit v1.1 From 23b5e3ad68a3c26a6a36039ea907997664aedcab Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 6 May 2011 19:56:52 +0200 Subject: dmaengine: at_hdmac: implement pause and resume in atc_control Pause and resume controls are useful for audio devices. This also returns correct status from atc_tx_status() in case chan is paused. Idea from dw_dmac patch by Linus Walleij. Signed-off-by: Nicolas Ferre Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 97 ++++++++++++++++++++++++++++++++------------- drivers/dma/at_hdmac_regs.h | 1 + 2 files changed, 71 insertions(+), 27 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ba0b5ec..5968245 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -508,7 +508,8 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { if (pending & AT_DMA_ERR(i)) { /* Disable channel on AHB error */ - dma_writel(atdma, CHDR, atchan->mask); + dma_writel(atdma, CHDR, + AT_DMA_RES(i) | atchan->mask); /* Give information to tasklet */ set_bit(ATC_IS_ERROR, &atchan->status); } @@ -952,39 +953,78 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc, *_desc; + int chan_id = atchan->chan_common.chan_id; + LIST_HEAD(list); - /* Only supports DMA_TERMINATE_ALL */ - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; + dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); - /* - * This is only called when something went wrong elsewhere, so - * we don't really care about the data. Just disable the - * channel. We still have to poll the channel enable bit due - * to AHB/HSB limitations. - */ - spin_lock_bh(&atchan->lock); + if (cmd == DMA_PAUSE) { + int pause_timeout = 1000; - dma_writel(atdma, CHDR, atchan->mask); + spin_lock_bh(&atchan->lock); - /* confirm that this channel is disabled */ - while (dma_readl(atdma, CHSR) & atchan->mask) - cpu_relax(); + dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); + + /* wait for FIFO to be empty */ + while (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) { + if (pause_timeout-- > 0) { + /* the FIFO can only drain if the peripheral + * is still requesting data: + * -> timeout if it is not the case. */ + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); + spin_unlock_bh(&atchan->lock); + return -ETIMEDOUT; + } + cpu_relax(); + } - /* active_list entries will end up before queued entries */ - list_splice_init(&atchan->queue, &list); - list_splice_init(&atchan->active_list, &list); + set_bit(ATC_IS_PAUSED, &atchan->status); - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + spin_unlock_bh(&atchan->lock); + } else if (cmd == DMA_RESUME) { + if (!test_bit(ATC_IS_PAUSED, &atchan->status)) + return 0; - /* if channel dedicated to cyclic operations, free it */ - clear_bit(ATC_IS_CYCLIC, &atchan->status); + spin_lock_bh(&atchan->lock); - spin_unlock_bh(&atchan->lock); + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); + clear_bit(ATC_IS_PAUSED, &atchan->status); + + spin_unlock_bh(&atchan->lock); + } else if (cmd == DMA_TERMINATE_ALL) { + struct at_desc *desc, *_desc; + /* + * This is only called when something went wrong elsewhere, so + * we don't really care about the data. Just disable the + * channel. We still have to poll the channel enable bit due + * to AHB/HSB limitations. + */ + spin_lock_bh(&atchan->lock); + + /* disabling channel: must also remove suspend state */ + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); + + /* confirm that this channel is disabled */ + while (dma_readl(atdma, CHSR) & atchan->mask) + cpu_relax(); + + /* active_list entries will end up before queued entries */ + list_splice_init(&atchan->queue, &list); + list_splice_init(&atchan->active_list, &list); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); + + clear_bit(ATC_IS_PAUSED, &atchan->status); + /* if channel dedicated to cyclic operations, free it */ + clear_bit(ATC_IS_CYCLIC, &atchan->status); + + spin_unlock_bh(&atchan->lock); + } else { + return -ENXIO; + } return 0; } @@ -1032,8 +1072,11 @@ atc_tx_status(struct dma_chan *chan, else dma_set_tx_state(txstate, last_complete, last_used, 0); - dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", - cookie, last_complete ? last_complete : 0, + if (test_bit(ATC_IS_PAUSED, &atchan->status)) + ret = DMA_PAUSED; + + dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", + ret, cookie, last_complete ? last_complete : 0, last_used ? last_used : 0); return ret; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index ae3056d..087dbf1 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -191,6 +191,7 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) */ enum atc_status { ATC_IS_ERROR = 0, + ATC_IS_PAUSED = 1, ATC_IS_CYCLIC = 24, }; -- cgit v1.1 From e257e1563f28890f54b5f82861373bb4b32dd770 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 6 May 2011 19:56:53 +0200 Subject: dmaengine: at_hdmac: use descriptor chaining help function A little function helps to chain descriptors: it is already used in cyclic dma operations, now use it in memcpy and slave_sg preparation functions. Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 33 +++------------------------------ 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 5968245..2c6bc3a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -626,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, desc->txd.cookie = 0; - if (!first) { - first = desc; - } else { - /* inform the HW lli about chaining */ - prev->lli.dscr = desc->txd.phys; - /* insert the link descriptor to the LD ring */ - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; + atc_desc_chain(&first, &prev, desc); } /* First descriptor of the chain embedds additional information */ @@ -726,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | len >> mem_width; desc->lli.ctrlb = ctrlb; - if (!first) { - first = desc; - } else { - /* inform the HW lli about chaining */ - prev->lli.dscr = desc->txd.phys; - /* insert the link descriptor to the LD ring */ - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; + atc_desc_chain(&first, &prev, desc); total_len += len; } break; @@ -769,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | len >> reg_width; desc->lli.ctrlb = ctrlb; - if (!first) { - first = desc; - } else { - /* inform the HW lli about chaining */ - prev->lli.dscr = desc->txd.phys; - /* insert the link descriptor to the LD ring */ - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; + atc_desc_chain(&first, &prev, desc); total_len += len; } break; -- cgit v1.1 From c8fcba600c46c5d7667ec230b1d9ce3ce5859f9c Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Mon, 9 May 2011 16:09:35 +0900 Subject: pch_dma: fix dma direction issue for ML7213 IOH video-in Currently, even-channel number is set as tx direction and odd is set as rx. However, though video-in uses ch6, the direction is not tx but rx. This patch sets video-in's DMA direction correctly. Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 6eebc62..d28c47a 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -478,7 +478,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) spin_unlock_bh(&pd_chan->lock); pdc_enable_irq(chan, 1); - pdc_set_dir(chan); return pd_chan->descs_allocated; } @@ -561,6 +560,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, else return NULL; + pd_chan->dir = direction; + pdc_set_dir(chan); + for_each_sg(sgl, sg, sg_len, i) { desc = pdc_desc_get(pd_chan); @@ -850,8 +852,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev, pd_chan->membase = ®s->desc[i]; - pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - spin_lock_init(&pd_chan->lock); INIT_LIST_HEAD(&pd_chan->active_list); -- cgit v1.1 From 08645fdc7bab4564f7dfd07525da8a1761f8f106 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Mon, 9 May 2011 16:09:36 +0900 Subject: pch_dma: modify for checkpatch Fix checkpatch warnings. Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index d28c47a..afd6fba0 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -138,7 +138,8 @@ struct pch_dma { #define dma_writel(pd, name, val) \ writel((val), (pd)->membase + PCH_DMA_##name) -static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) +static inline +struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) { return container_of(txd, struct pch_dma_desc, txd); } @@ -163,13 +164,15 @@ static inline struct device *chan2parent(struct dma_chan *chan) return chan->dev->device.parent; } -static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) +static inline +struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) { return list_first_entry(&pd_chan->active_list, struct pch_dma_desc, desc_node); } -static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) +static inline +struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) { return list_first_entry(&pd_chan->queue, struct pch_dma_desc, desc_node); -- cgit v1.1 From 60092d0bde4c8741198da4a69b693d3709385bf1 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Mon, 9 May 2011 16:09:37 +0900 Subject: pch_dma: Fix DMA setting issue Currently, Direct-Start mode(*) is enabled. Our IOH's devices must not use this mode. This causes unexpected behavior. This patch deletes Direct-Start setting. (*) This mode is used in order for CPU to generate the DMA request. Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index afd6fba0..4e46672 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -254,9 +254,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan) static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) { - struct pch_dma *pd = to_pd(pd_chan->chan.device); - u32 val; - if (!pdc_is_idle(pd_chan)) { dev_err(chan2dev(&pd_chan->chan), "BUG: Attempt to start non-idle channel\n"); @@ -282,10 +279,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) channel_writel(pd_chan, NEXT, desc->txd.phys); pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); } - - val = dma_readl(pd, CTL2); - val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); - dma_writel(pd, CTL2, val); } static void pdc_chain_complete(struct pch_dma_chan *pd_chan, -- cgit v1.1 From 194f5f2706c7472f9c6bb2d17fa788993606581f Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Mon, 9 May 2011 16:09:38 +0900 Subject: pch_dma: Support I2S for ML7213 IOH Support I2S device for ML7213 IOH Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 62 ++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 4e46672..2edcc9c 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -77,10 +77,10 @@ struct pch_dma_regs { u32 dma_ctl0; u32 dma_ctl1; u32 dma_ctl2; - u32 reserved1; + u32 dma_ctl3; u32 dma_sts0; u32 dma_sts1; - u32 reserved2; + u32 dma_sts2; u32 reserved3; struct pch_dma_desc_regs desc[MAX_CHAN_NR]; }; @@ -130,6 +130,7 @@ struct pch_dma { #define PCH_DMA_CTL0 0x00 #define PCH_DMA_CTL1 0x04 #define PCH_DMA_CTL2 0x08 +#define PCH_DMA_CTL3 0x0C #define PCH_DMA_STS0 0x10 #define PCH_DMA_STS1 0x14 @@ -202,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan) struct pch_dma *pd = to_pd(chan->device); u32 val; - val = dma_readl(pd, CTL0); + if (chan->chan_id < 8) { + val = dma_readl(pd, CTL0); - if (pd_chan->dir == DMA_TO_DEVICE) - val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + - DMA_CTL0_DIR_SHIFT_BITS); - else - val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + - DMA_CTL0_DIR_SHIFT_BITS)); + if (pd_chan->dir == DMA_TO_DEVICE) + val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + + DMA_CTL0_DIR_SHIFT_BITS); + else + val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + + DMA_CTL0_DIR_SHIFT_BITS)); + + dma_writel(pd, CTL0, val); + } else { + int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ + val = dma_readl(pd, CTL3); - dma_writel(pd, CTL0, val); + if (pd_chan->dir == DMA_TO_DEVICE) + val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + + DMA_CTL0_DIR_SHIFT_BITS); + else + val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + + DMA_CTL0_DIR_SHIFT_BITS)); + + dma_writel(pd, CTL3, val); + } dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", chan->chan_id, val); @@ -222,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) struct pch_dma *pd = to_pd(chan->device); u32 val; - val = dma_readl(pd, CTL0); + if (chan->chan_id < 8) { + val = dma_readl(pd, CTL0); - val &= ~(DMA_CTL0_MODE_MASK_BITS << - (DMA_CTL0_BITS_PER_CH * chan->chan_id)); - val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); + val &= ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); - dma_writel(pd, CTL0, val); + dma_writel(pd, CTL0, val); + } else { + int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ + + val = dma_readl(pd, CTL3); + + val &= ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * ch)); + val |= mode << (DMA_CTL0_BITS_PER_CH * ch); + + dma_writel(pd, CTL3, val); + + } dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", chan->chan_id, val); @@ -701,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd) pd->regs.dma_ctl0 = dma_readl(pd, CTL0); pd->regs.dma_ctl1 = dma_readl(pd, CTL1); pd->regs.dma_ctl2 = dma_readl(pd, CTL2); + pd->regs.dma_ctl3 = dma_readl(pd, CTL3); list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { pd_chan = to_pd_chan(chan); @@ -723,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd) dma_writel(pd, CTL0, pd->regs.dma_ctl0); dma_writel(pd, CTL1, pd->regs.dma_ctl1); dma_writel(pd, CTL2, pd->regs.dma_ctl2); + dma_writel(pd, CTL3, pd->regs.dma_ctl3); list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { pd_chan = to_pd_chan(chan); @@ -925,6 +955,7 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 +#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 static const struct pci_device_id pch_dma_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, @@ -932,6 +963,7 @@ static const struct pci_device_id pch_dma_id_table[] = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ { 0, }, }; -- cgit v1.1 From c0dfc04ac96847913a791f5459f4ac83a81a4745 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Mon, 9 May 2011 16:09:39 +0900 Subject: pch_dma: Support new device ML7223 IOH Support new device OKI SEMICONDUCTOR ML7223 IOH(Input/Output Hub). The ML7223 IOH is for MP(Media Phone) use. The ML7223 is companion chip for Intel Atom E6xx series. The ML7223 is completely compatible for Intel EG20T PCH. Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 12 +++++++----- drivers/dma/pch_dma.c | 8 ++++++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a572600..25cf327 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -200,16 +200,18 @@ config PL330_DMA platform_data for a dma-pl330 device. config PCH_DMA - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support" + tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" depends on PCI && X86 select DMA_ENGINE help Enable support for Intel EG20T PCH DMA engine. - This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/ - Output Hub) which is for IVI(In-Vehicle Infotainment) use. - ML7213 is companion chip for Intel Atom E6xx series. - ML7213 is completely compatible for Intel EG20T PCH. + This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ + Output Hub), ML7213 and ML7223. + ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is + for MP(Media Phone) use. + ML7213/ML7223 is companion chip for Intel Atom E6xx series. + ML7213/ML7223 is completely compatible for Intel EG20T PCH. config IMX_SDMA tristate "i.MX SDMA support" diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 2edcc9c..083e698 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -956,6 +956,10 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 +#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B +#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E +#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 +#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B static const struct pci_device_id pch_dma_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, @@ -964,6 +968,10 @@ static const struct pci_device_id pch_dma_id_table[] = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ { 0, }, }; -- cgit v1.1 From eb8590b504caacb029dea4540e0b0dcc98da4381 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Mon, 9 May 2011 16:09:40 +0900 Subject: pch_dma: modify pci device table definition Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 083e698..ff5b38f 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -961,7 +961,7 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B -static const struct pci_device_id pch_dma_id_table[] = { +DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ -- cgit v1.1 From de7a2f9f7b6f5b48d8531ff4c9c9b95cab8a8ce8 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Mon, 9 May 2011 18:11:37 +0200 Subject: dmaengine: at_hdmac: pause: no need to wait for FIFO empty With the addition of the "pause" feature, an active wait was introduced to check the "FIFO empty" event. This event was not always happening and a timout contition was needed. But, in some cases, this event depend on the peripheral connected to the channel that is paused: FIFO becomes empty if the peripheral consumes data. The timeout is pretty difficult to evaluate. Moreover, this check is not needed. In conclusion, it seems sensible to entirely remove the checking of "FIFO empty" status when pausing. Signed-off-by: Nicolas Ferre [commit msg edited for grammer] Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 2c6bc3a..3134003 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -933,25 +933,9 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); if (cmd == DMA_PAUSE) { - int pause_timeout = 1000; - spin_lock_bh(&atchan->lock); dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); - - /* wait for FIFO to be empty */ - while (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) { - if (pause_timeout-- > 0) { - /* the FIFO can only drain if the peripheral - * is still requesting data: - * -> timeout if it is not the case. */ - dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); - spin_unlock_bh(&atchan->lock); - return -ETIMEDOUT; - } - cpu_relax(); - } - set_bit(ATC_IS_PAUSED, &atchan->status); spin_unlock_bh(&atchan->lock); -- cgit v1.1 From 5fedefb87bd0a64281d28edd295f29e3b989d78c Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 15 Apr 2011 16:03:35 +0530 Subject: dmaengine/dw_dmac: don't call callback routine in case dmaengine_terminate_all() is called If dmaengine_terminate_all() is called for dma channel, then it doesn't make much sense to call registered callback routine. While in case of success or failure it must be called. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index b15c32c..265d43c 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -195,18 +195,21 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) /*----------------------------------------------------------------------*/ static void -dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) +dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, + bool callback_required) { - dma_async_tx_callback callback; - void *param; + dma_async_tx_callback callback = NULL; + void *param = NULL; struct dma_async_tx_descriptor *txd = &desc->txd; struct dw_desc *child; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); dwc->completed = txd->cookie; - callback = txd->callback; - param = txd->callback_param; + if (callback_required) { + callback = txd->callback; + param = txd->callback_param; + } dwc_sync_desc_for_cpu(dwc, desc); @@ -238,11 +241,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) } } - /* - * The API requires that no submissions are done from a - * callback, so we don't need to drop the lock here - */ - if (callback) + if (callback_required && callback) callback(param); } @@ -272,7 +271,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) } list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc); + dwc_descriptor_complete(dwc, desc, true); } static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) @@ -322,7 +321,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) * No descriptors so far seem to be in progress, i.e. * this one must be done. */ - dwc_descriptor_complete(dwc, desc); + dwc_descriptor_complete(dwc, desc, true); } dev_err(chan2dev(&dwc->chan), @@ -384,7 +383,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_dump_lli(dwc, &child->lli); /* Pretend the descriptor completed successfully */ - dwc_descriptor_complete(dwc, bad_desc); + dwc_descriptor_complete(dwc, bad_desc, true); } /* --------------------- Cyclic DMA API extensions -------------------- */ @@ -831,7 +830,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc); + dwc_descriptor_complete(dwc, desc, false); return 0; } -- cgit v1.1 From abf53902dcc6d44d2e06b09817fa67857aa686fe Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 15 Apr 2011 16:03:35 +0530 Subject: dmaengine/dw_dmac: set residue as total len in dwc_tx_status if status is !DMA_SUCCESS If transfer status is !=DMA_SUCCESS, return total transfer len as residue, instead of zero. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 265d43c..f209ff8 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -860,7 +860,11 @@ dwc_tx_status(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); } - dma_set_tx_state(txstate, last_complete, last_used, 0); + if (ret != DMA_SUCCESS) + dma_set_tx_state(txstate, last_complete, last_used, + dwc_first_active(dwc)->len); + else + dma_set_tx_state(txstate, last_complete, last_used, 0); return ret; } -- cgit v1.1 From 69dc14b51c1aad9d82afd8f96bf4e4835089bffc Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 18 Apr 2011 14:54:56 +0530 Subject: dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater than DWC_MAX_COUNT If len passed in sg for slave_sg transfers is greater than DWC_MAX_COUNT, then driver programmes controller incorrectly. This patch adds code to handle this situation by allocation more than one desc for same sg. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 65 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index f209ff8..a9755e3 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -693,9 +693,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, reg = dws->tx_reg; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; - u32 len; - u32 mem; + u32 len, dlen, mem; + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + +slave_sg_todev_fill_desc: desc = dwc_desc_get(dwc); if (!desc) { dev_err(chan2dev(chan), @@ -703,16 +709,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, goto err_desc_get; } - mem = sg_phys(sg); - len = sg_dma_len(sg); - mem_width = 2; - if (unlikely(mem & 3 || len & 3)) - mem_width = 0; - desc->lli.sar = mem; desc->lli.dar = reg; desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); - desc->lli.ctlhi = len >> mem_width; + if ((len >> mem_width) > DWC_MAX_COUNT) { + dlen = DWC_MAX_COUNT << mem_width; + mem += dlen; + len -= dlen; + } else { + dlen = len; + len = 0; + } + + desc->lli.ctlhi = dlen >> mem_width; if (!first) { first = desc; @@ -726,7 +735,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, &first->tx_list); } prev = desc; - total_len += len; + total_len += dlen; + + if (len) + goto slave_sg_todev_fill_desc; } break; case DMA_FROM_DEVICE: @@ -739,15 +751,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, reg = dws->rx_reg; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; - u32 len; - u32 mem; - - desc = dwc_desc_get(dwc); - if (!desc) { - dev_err(chan2dev(chan), - "not enough descriptors available\n"); - goto err_desc_get; - } + u32 len, dlen, mem; mem = sg_phys(sg); len = sg_dma_len(sg); @@ -755,10 +759,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(mem & 3 || len & 3)) mem_width = 0; +slave_sg_fromdev_fill_desc: + desc = dwc_desc_get(dwc); + if (!desc) { + dev_err(chan2dev(chan), + "not enough descriptors available\n"); + goto err_desc_get; + } + desc->lli.sar = reg; desc->lli.dar = mem; desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); - desc->lli.ctlhi = len >> reg_width; + if ((len >> reg_width) > DWC_MAX_COUNT) { + dlen = DWC_MAX_COUNT << reg_width; + mem += dlen; + len -= dlen; + } else { + dlen = len; + len = 0; + } + desc->lli.ctlhi = dlen >> reg_width; if (!first) { first = desc; @@ -772,7 +792,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, &first->tx_list); } prev = desc; - total_len += len; + total_len += dlen; + + if (len) + goto slave_sg_fromdev_fill_desc; } break; default: -- cgit v1.1 From 69cea5a00d3135677939fce1fefe54ed522055a0 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 15 Apr 2011 16:03:35 +0530 Subject: dmaengine/dw_dmac: Replace spin_lock* with irqsave variants and enable submission from callback dmaengine routines can be called from interrupt context and with interrupts disabled. Whereas spin_unlock_bh can't be called from such contexts. So this patch converts all spin_*_bh routines to irqsave variants. Also, spin_lock() used in tasklet is converted to irqsave variants, as tasklet can be interrupted, and dma requests from such interruptions may also call spin_lock. Now, submission from callbacks are permitted as per dmaengine framework. So we shouldn't hold any locks while calling callbacks. As locks were taken by parent routines, so releasing them before calling callbacks doesn't look clean enough. So, locks are taken inside all routine now, whereever they are required. And dwc_descriptor_complete is always called without taking locks. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 116 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 77 insertions(+), 39 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a9755e3..442b98b 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -93,8 +93,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) struct dw_desc *desc, *_desc; struct dw_desc *ret = NULL; unsigned int i = 0; + unsigned long flags; - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { if (async_tx_test_ack(&desc->txd)) { list_del(&desc->desc_node); @@ -104,7 +105,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); i++; } - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); @@ -130,12 +131,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) */ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) { + unsigned long flags; + if (desc) { struct dw_desc *child; dwc_sync_desc_for_cpu(dwc, desc); - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&dwc->chan), "moving child desc %p to freelist\n", @@ -143,7 +146,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) list_splice_init(&desc->tx_list, &dwc->free_list); dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); list_add(&desc->desc_node, &dwc->free_list); - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); } } @@ -202,9 +205,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, void *param = NULL; struct dma_async_tx_descriptor *txd = &desc->txd; struct dw_desc *child; + unsigned long flags; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); + spin_lock_irqsave(&dwc->lock, flags); dwc->completed = txd->cookie; if (callback_required) { callback = txd->callback; @@ -241,6 +246,8 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, } } + spin_unlock_irqrestore(&dwc->lock, flags); + if (callback_required && callback) callback(param); } @@ -249,7 +256,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) { struct dw_desc *desc, *_desc; LIST_HEAD(list); + unsigned long flags; + spin_lock_irqsave(&dwc->lock, flags); if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "BUG: XFER bit set, but channel not idle!\n"); @@ -270,6 +279,8 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_dostart(dwc, dwc_first_active(dwc)); } + spin_unlock_irqrestore(&dwc->lock, flags); + list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc, true); } @@ -280,7 +291,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) struct dw_desc *desc, *_desc; struct dw_desc *child; u32 status_xfer; + unsigned long flags; + spin_lock_irqsave(&dwc->lock, flags); /* * Clear block interrupt flag before scanning so that we don't * miss any, and read LLP before RAW_XFER to ensure it is @@ -293,35 +306,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) if (status_xfer & dwc->mask) { /* Everything we've submitted is done */ dma_writel(dw, CLEAR.XFER, dwc->mask); + spin_unlock_irqrestore(&dwc->lock, flags); + dwc_complete_all(dw, dwc); return; } - if (list_empty(&dwc->active_list)) + if (list_empty(&dwc->active_list)) { + spin_unlock_irqrestore(&dwc->lock, flags); return; + } dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { /* check first descriptors addr */ - if (desc->txd.phys == llp) + if (desc->txd.phys == llp) { + spin_unlock_irqrestore(&dwc->lock, flags); return; + } /* check first descriptors llp */ - if (desc->lli.llp == llp) + if (desc->lli.llp == llp) { /* This one is currently in progress */ + spin_unlock_irqrestore(&dwc->lock, flags); return; + } list_for_each_entry(child, &desc->tx_list, desc_node) - if (child->lli.llp == llp) + if (child->lli.llp == llp) { /* Currently in progress */ + spin_unlock_irqrestore(&dwc->lock, flags); return; + } /* * No descriptors so far seem to be in progress, i.e. * this one must be done. */ + spin_unlock_irqrestore(&dwc->lock, flags); dwc_descriptor_complete(dwc, desc, true); + spin_lock_irqsave(&dwc->lock, flags); } dev_err(chan2dev(&dwc->chan), @@ -336,6 +361,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) list_move(dwc->queue.next, &dwc->active_list); dwc_dostart(dwc, dwc_first_active(dwc)); } + spin_unlock_irqrestore(&dwc->lock, flags); } static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) @@ -350,9 +376,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) { struct dw_desc *bad_desc; struct dw_desc *child; + unsigned long flags; dwc_scan_descriptors(dw, dwc); + spin_lock_irqsave(&dwc->lock, flags); + /* * The descriptor currently at the head of the active list is * borked. Since we don't have any way to report errors, we'll @@ -382,6 +411,8 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) list_for_each_entry(child, &bad_desc->tx_list, desc_node) dwc_dump_lli(dwc, &child->lli); + spin_unlock_irqrestore(&dwc->lock, flags); + /* Pretend the descriptor completed successfully */ dwc_descriptor_complete(dwc, bad_desc, true); } @@ -406,6 +437,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, u32 status_block, u32 status_err, u32 status_xfer) { + unsigned long flags; + if (status_block & dwc->mask) { void (*callback)(void *param); void *callback_param; @@ -416,11 +449,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, callback = dwc->cdesc->period_callback; callback_param = dwc->cdesc->period_callback_param; - if (callback) { - spin_unlock(&dwc->lock); + + if (callback) callback(callback_param); - spin_lock(&dwc->lock); - } } /* @@ -434,6 +465,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " "interrupt, stopping DMA transfer\n", status_xfer ? "xfer" : "error"); + + spin_lock_irqsave(&dwc->lock, flags); + dev_err(chan2dev(&dwc->chan), " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", channel_readl(dwc, SAR), @@ -457,6 +491,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, for (i = 0; i < dwc->cdesc->periods; i++) dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); + + spin_unlock_irqrestore(&dwc->lock, flags); } } @@ -480,7 +516,6 @@ static void dw_dma_tasklet(unsigned long data) for (i = 0; i < dw->dma.chancnt; i++) { dwc = &dw->chan[i]; - spin_lock(&dwc->lock); if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) dwc_handle_cyclic(dw, dwc, status_block, status_err, status_xfer); @@ -488,7 +523,6 @@ static void dw_dma_tasklet(unsigned long data) dwc_handle_error(dw, dwc); else if ((status_block | status_xfer) & (1 << i)) dwc_scan_descriptors(dw, dwc); - spin_unlock(&dwc->lock); } /* @@ -543,8 +577,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) struct dw_desc *desc = txd_to_dw_desc(tx); struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); dma_cookie_t cookie; + unsigned long flags; - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); cookie = dwc_assign_cookie(dwc, desc); /* @@ -564,7 +599,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) list_add_tail(&desc->desc_node, &dwc->queue); } - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); return cookie; } @@ -826,6 +861,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; + unsigned long flags; LIST_HEAD(list); /* Only supports DMA_TERMINATE_ALL */ @@ -838,7 +874,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * channel. We still have to poll the channel enable bit due * to AHB/HSB limitations. */ - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); channel_clear_bit(dw, CH_EN, dwc->mask); @@ -849,7 +885,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, list_splice_init(&dwc->queue, &list); list_splice_init(&dwc->active_list, &list); - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) @@ -873,9 +909,7 @@ dwc_tx_status(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { - spin_lock_bh(&dwc->lock); dwc_scan_descriptors(to_dw_dma(chan->device), dwc); - spin_unlock_bh(&dwc->lock); last_complete = dwc->completed; last_used = chan->cookie; @@ -896,10 +930,8 @@ static void dwc_issue_pending(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - spin_lock_bh(&dwc->lock); if (!list_empty(&dwc->queue)) dwc_scan_descriptors(to_dw_dma(chan->device), dwc); - spin_unlock_bh(&dwc->lock); } static int dwc_alloc_chan_resources(struct dma_chan *chan) @@ -911,6 +943,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) int i; u32 cfghi; u32 cfglo; + unsigned long flags; dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); @@ -948,16 +981,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) * doesn't mean what you think it means), and status writeback. */ - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); i = dwc->descs_allocated; while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); if (!desc) { dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); break; } @@ -969,7 +1002,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) sizeof(desc->lli), DMA_TO_DEVICE); dwc_desc_put(dwc, desc); - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); i = ++dwc->descs_allocated; } @@ -978,7 +1011,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) channel_set_bit(dw, MASK.BLOCK, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); dev_dbg(chan2dev(chan), "alloc_chan_resources allocated %d descriptors\n", i); @@ -991,6 +1024,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; + unsigned long flags; LIST_HEAD(list); dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", @@ -1001,7 +1035,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) BUG_ON(!list_empty(&dwc->queue)); BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); list_splice_init(&dwc->free_list, &list); dwc->descs_allocated = 0; @@ -1010,7 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) channel_clear_bit(dw, MASK.BLOCK, dwc->mask); channel_clear_bit(dw, MASK.ERROR, dwc->mask); - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); list_for_each_entry_safe(desc, _desc, &list, desc_node) { dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); @@ -1035,13 +1069,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long flags; if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); return -ENODEV; } - spin_lock(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); /* assert channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { @@ -1054,7 +1089,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) channel_readl(dwc, LLP), channel_readl(dwc, CTL_HI), channel_readl(dwc, CTL_LO)); - spin_unlock(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); return -EBUSY; } @@ -1069,7 +1104,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) channel_set_bit(dw, CH_EN, dwc->mask); - spin_unlock(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); return 0; } @@ -1085,14 +1120,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long flags; - spin_lock(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); - spin_unlock(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); } EXPORT_SYMBOL(dw_dma_cyclic_stop); @@ -1121,17 +1157,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, unsigned int reg_width; unsigned int periods; unsigned int i; + unsigned long flags; - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); dev_dbg(chan2dev(&dwc->chan), "queue and/or active list are not empty\n"); return ERR_PTR(-EBUSY); } was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); if (was_cyclic) { dev_dbg(chan2dev(&dwc->chan), "channel already prepared for cyclic DMA\n"); @@ -1245,13 +1282,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan) struct dw_dma *dw = to_dw_dma(dwc->chan.device); struct dw_cyclic_desc *cdesc = dwc->cdesc; int i; + unsigned long flags; dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); if (!cdesc) return; - spin_lock_bh(&dwc->lock); + spin_lock_irqsave(&dwc->lock, flags); channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) @@ -1261,7 +1299,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); - spin_unlock_bh(&dwc->lock); + spin_unlock_irqrestore(&dwc->lock, flags); for (i = 0; i < cdesc->periods; i++) dwc_desc_put(dwc, cdesc->desc[i]); -- cgit v1.1 From a7c57cf7d4327c41510f8cbf45b1b970e02c34f8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 19 Apr 2011 08:31:32 +0800 Subject: dmaengine/dw_dmac: implement pause and resume in dwc_control Some peripherals like amba-pl011 needs pause to be implemented in DMA controller drivers. This also returns correct status from dwc_tx_status() in case chan is paused. Signed-off-by: Linus Walleij Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 59 ++++++++++++++++++++++++++++++---------------- drivers/dma/dw_dmac_regs.h | 1 + 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 442b98b..eec675b 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -862,34 +862,50 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; unsigned long flags; + u32 cfglo; LIST_HEAD(list); - /* Only supports DMA_TERMINATE_ALL */ - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; + if (cmd == DMA_PAUSE) { + spin_lock_irqsave(&dwc->lock, flags); - /* - * This is only called when something went wrong elsewhere, so - * we don't really care about the data. Just disable the - * channel. We still have to poll the channel enable bit due - * to AHB/HSB limitations. - */ - spin_lock_irqsave(&dwc->lock, flags); + cfglo = channel_readl(dwc, CFG_LO); + channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) + cpu_relax(); - channel_clear_bit(dw, CH_EN, dwc->mask); + dwc->paused = true; + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_RESUME) { + if (!dwc->paused) + return 0; - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + spin_lock_irqsave(&dwc->lock, flags); - /* active_list entries will end up before queued entries */ - list_splice_init(&dwc->queue, &list); - list_splice_init(&dwc->active_list, &list); + cfglo = channel_readl(dwc, CFG_LO); + channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); + dwc->paused = false; - spin_unlock_irqrestore(&dwc->lock, flags); + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_TERMINATE_ALL) { + spin_lock_irqsave(&dwc->lock, flags); - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc, false); + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + dwc->paused = false; + + /* active_list entries will end up before queued entries */ + list_splice_init(&dwc->queue, &list); + list_splice_init(&dwc->active_list, &list); + + spin_unlock_irqrestore(&dwc->lock, flags); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, false); + } else + return -ENXIO; return 0; } @@ -923,6 +939,9 @@ dwc_tx_status(struct dma_chan *chan, else dma_set_tx_state(txstate, last_complete, last_used, 0); + if (dwc->paused) + return DMA_PAUSED; + return ret; } diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 720f821..c968597 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -138,6 +138,7 @@ struct dw_dma_chan { void __iomem *ch_regs; u8 mask; u8 priority; + bool paused; spinlock_t lock; -- cgit v1.1 From a0eb221a446f2f6c988430f0b0a13f74b7c2b799 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 18 May 2011 14:18:57 +0200 Subject: dmaengine: move link order Move the dmaengine subsystem up early in the drivers Makefile so DMA is made available early to all drivers, just like e.g. regulators. Now even regulators can use DMA on the same initlevel. As a result we can bump the ste_dma40 and coh901318 dmaengine drivers down one initlevel to subsys_init(). Signed-off-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/Makefile | 4 +++- drivers/dma/coh901318.c | 2 +- drivers/dma/ste_dma40.c | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/Makefile b/drivers/Makefile index 3f135b6..e68423d 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/ # was used and do nothing if so obj-$(CONFIG_PNP) += pnp/ obj-$(CONFIG_ARM_AMBA) += amba/ +# Many drivers will want to use DMA so this has to be made available +# really early. +obj-$(CONFIG_DMA_ENGINE) += dma/ obj-$(CONFIG_VIRTIO) += virtio/ obj-$(CONFIG_XEN) += xen/ @@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/ obj-y += lguest/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ -obj-$(CONFIG_DMA_ENGINE) += dma/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 00deabd..b5a3189 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1610,7 +1610,7 @@ int __init coh901318_init(void) { return platform_driver_probe(&coh901318_driver, coh901318_probe); } -arch_initcall(coh901318_init); +subsys_initcall(coh901318_init); void __exit coh901318_exit(void) { diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 65d2535..5d054bb 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2962,4 +2962,4 @@ static int __init stedma40_init(void) { return platform_driver_probe(&d40_driver, d40_probe); } -arch_initcall(stedma40_init); +subsys_initcall(stedma40_init); -- cgit v1.1 From aecb7b64dd9e2512c7a4c7e61dd781415d3dac5a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 24 May 2011 14:04:09 +0530 Subject: dmaengine/dw_dmac: Update maintainer-ship Nobody is currently maintaining dw_dmac. We are using dw_dmac for SPEAr13xx and are currently maintaining it. After discussing with Vinod, sending this patch to update maintainer-ship of dw_dmac. Signed-off-by: Viresh Kumar Acked-by: Havard Skinnemoen Signed-off-by: Vinod Koul --- MAINTAINERS | 7 +++++++ drivers/dma/dw_dmac.c | 2 ++ drivers/dma/dw_dmac_regs.h | 1 + include/linux/dw_dmac.h | 1 + 4 files changed, 11 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 6b4b9cd..8f44130 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5401,6 +5401,13 @@ L: linux-serial@vger.kernel.org S: Maintained F: drivers/tty/serial +SYNOPSYS DESIGNWARE DMAC DRIVER +M: Viresh Kumar +S: Maintained +F: include/linux/dw_dmac.h +F: drivers/dma/dw_dmac_regs.h +F: drivers/dma/dw_dmac.c + TIMEKEEPING, NTP M: John Stultz M: Thomas Gleixner diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index eec675b..efd836d 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -3,6 +3,7 @@ * AVR32 systems.) * * Copyright (C) 2007-2008 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1575,3 +1576,4 @@ module_exit(dw_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); MODULE_AUTHOR("Haavard Skinnemoen "); +MODULE_AUTHOR("Viresh Kumar "); diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index c968597..c341951 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -2,6 +2,7 @@ * Driver for the Synopsys DesignWare AHB DMA Controller * * Copyright (C) 2005-2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 6998d93..4bfe0a2 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -3,6 +3,7 @@ * AVR32 systems.) * * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as -- cgit v1.1 From 46b2903c05b248ed78304113ecfba368b4c55def Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 25 May 2011 14:49:20 -0700 Subject: dmaengine: Add API documentation for slave dma usage Signed-off-by: Vinod Koul Signed-off-by: Dan Williams --- Documentation/dmaengine.txt | 97 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 96 insertions(+), 1 deletion(-) diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt index 0c1c2f6..5a0cb1e 100644 --- a/Documentation/dmaengine.txt +++ b/Documentation/dmaengine.txt @@ -1 +1,96 @@ -See Documentation/crypto/async-tx-api.txt + DMA Engine API Guide + ==================== + + Vinod Koul + +NOTE: For DMA Engine usage in async_tx please see: + Documentation/crypto/async-tx-api.txt + + +Below is a guide to device driver writers on how to use the Slave-DMA API of the +DMA Engine. This is applicable only for slave DMA usage only. + +The slave DMA usage consists of following steps +1. Allocate a DMA slave channel +2. Set slave and controller specific parameters +3. Get a descriptor for transaction +4. Submit the transaction and wait for callback notification + +1. Allocate a DMA slave channel +Channel allocation is slightly different in the slave DMA context, client +drivers typically need a channel from a particular DMA controller only and even +in some cases a specific channel is desired. To request a channel +dma_request_channel() API is used. + +Interface: +struct dma_chan *dma_request_channel(dma_cap_mask_t mask, + dma_filter_fn filter_fn, + void *filter_param); +where dma_filter_fn is defined as: +typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); + +When the optional 'filter_fn' parameter is set to NULL dma_request_channel +simply returns the first channel that satisfies the capability mask. Otherwise, +when the mask parameter is insufficient for specifying the necessary channel, +the filter_fn routine can be used to disposition the available channels in the +system. The filter_fn routine is called once for each free channel in the +system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags +that channel to be the return value from dma_request_channel. A channel +allocated via this interface is exclusive to the caller, until +dma_release_channel() is called. + +2. Set slave and controller specific parameters +Next step is always to pass some specific information to the DMA driver. Most of +the generic information which a slave DMA can use is in struct dma_slave_config. +It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA +burst lengths etc. If some DMA controllers have more parameters to be sent then +they should try to embed struct dma_slave_config in their controller specific +structure. That gives flexibility to client to pass more parameters, if +required. + +Interface: +int dmaengine_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) + +3. Get a descriptor for transaction +For slave usage the various modes of slave transfers supported by the +DMA-engine are: +slave_sg - DMA a list of scatter gather buffers from/to a peripheral +dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the + operation is explicitly stopped. +The non NULL return of this transfer API represents a "descriptor" for the given +transaction. + +Interface: +struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)( + struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags); +struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_data_direction direction); + +4. Submit the transaction and wait for callback notification +To schedule the transaction to be scheduled by dma device, the "descriptor" +returned in above (3) needs to be submitted. +To tell the dma driver that a transaction is ready to be serviced, the +descriptor->submit() callback needs to be invoked. This chains the descriptor to +the pending queue. +The transactions in the pending queue can be activated by calling the +issue_pending API. If channel is idle then the first transaction in queue is +started and subsequent ones queued up. +On completion of the DMA operation the next in queue is submitted and a tasklet +triggered. The tasklet would then call the client driver completion callback +routine for notification, if set. +Interface: +void dma_async_issue_pending(struct dma_chan *chan); + +============================================================================== + +Additional usage notes for dma driver writers +1/ Although DMA engine specifies that completion callback routines cannot submit +any new operations, but typically for slave DMA subsequent transaction may not +be available for submit prior to callback routine being called. This requirement +is not a requirement for DMA-slave devices. But they should take care to drop +the spin-lock they might be holding before calling the callback routine -- cgit v1.1 From bc9af76b1e87e4f925f97368dae6c22266922e8b Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 25 May 2011 16:56:34 +0530 Subject: dmaengine: add TODO items for future work on dma drivers Signed-off-by: Vinod Koul Signed-off-by: Dan Williams --- drivers/dma/TODO | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 drivers/dma/TODO diff --git a/drivers/dma/TODO b/drivers/dma/TODO new file mode 100644 index 0000000..a4af858 --- /dev/null +++ b/drivers/dma/TODO @@ -0,0 +1,14 @@ +TODO for slave dma + +1. Move remaining drivers to use new slave interface +2. Remove old slave pointer machansim +3. Make issue_pending to start the transaction in below drivers + - mpc512x_dma + - imx-dma + - imx-sdma + - mxs-dma.c + - dw_dmac + - intel_mid_dma + - ste_dma40 +4. Check other subsystems for dma drivers and merge/move to dmaengine +5. Remove dma_slave_config's dma direction. -- cgit v1.1 From 5dbd05d46fb7d849570c8fa09d48591aa7ce1766 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 25 May 2011 23:36:30 +0530 Subject: maintainers: add dma engine tree details Signed-off-by: Vinod Koul Signed-off-by: Dan Williams --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 8f44130..4435893 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2169,6 +2169,8 @@ M: Dan Williams S: Supported F: drivers/dma/ F: include/linux/dma* +T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git +T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma) DME1737 HARDWARE MONITOR DRIVER M: Juerg Haefliger -- cgit v1.1 From 19d78a61be6dd707dcec298c486303d4ba2c840a Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Fri, 6 May 2011 10:33:44 -0500 Subject: x86: poll waiting for I/OAT DMA channel status For certain system configurations a 5 usec udelay before checking I/OAT DMA channel status is sometimes not sufficient, resulting in a false failure status and unnecessary freeing of channel resources. Conversely, for many configurations 5 usec is longer than necessary. Loop for up to 20 usec waiting for successful status before failing. Signed-off-by: Dimitri Sivanich Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v2.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index effd140..54d1a3c 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -507,6 +507,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) struct ioat_ring_ent **ring; u64 status; int order; + int i = 0; /* have we already been set up? */ if (ioat->ring) @@ -547,8 +548,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ioat2_start_null_desc(ioat); /* check that we got off the ground */ - udelay(5); - status = ioat_chansts(chan); + do { + udelay(1); + status = ioat_chansts(chan); + } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); + if (is_ioat_active(status) || is_ioat_idle(status)) { set_bit(IOAT_RUN, &chan->state); return 1 << ioat->alloc_order; -- cgit v1.1