summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 09:24:48 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 09:24:48 -0800
commit5115f3c19d17851aaff5a857f55b4a019c908775 (patch)
tree0d02cf01e12e86365f4f5e3b234f986daef181a7 /drivers
parentc41b3810c09e60664433548c5218cc6ece6a8903 (diff)
parent17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff)
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul: "This is fairly big pull by my standards as I had missed last merge window. So we have the support for device tree for slave-dmaengine, large updates to dw_dmac driver from Andy for reusing on different architectures. Along with this we have fixes on bunch of the drivers" Fix up trivial conflicts, usually due to #include line movement next to each other. * 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits) Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT" ARM: dts: pl330: Add #dma-cells for generic dma binding support DMA: PL330: Register the DMA controller with the generic DMA helpers DMA: PL330: Add xlate function DMA: PL330: Add new pl330 filter for DT case. dma: tegra20-apb-dma: remove unnecessary assignment edma: do not waste memory for dma_mask dma: coh901318: set residue only if dma is in progress dma: coh901318: avoid unbalanced locking dmaengine.h: remove redundant else keyword dma: of-dma: protect list write operation by spin_lock dmaengine: ste_dma40: do not remove descriptors for cyclic transfers dma: of-dma.c: fix memory leakage dw_dmac: apply default dma_mask if needed dmaengine: ioat - fix spare sparse complain dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING dw_dmac: add support for Lynxpoint DMA controllers dw_dmac: return proper residue value dw_dmac: fill individual length of descriptor ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dca/dca-core.c5
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c10
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_hdmac_regs.h8
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/coh901318_lli.c2
-rw-r--r--drivers/dma/dmaengine.c21
-rw-r--r--drivers/dma/dmatest.c22
-rw-r--r--drivers/dma/dw_dmac.c523
-rw-r--r--drivers/dma/dw_dmac_regs.h27
-rw-r--r--drivers/dma/edma.c61
-rw-r--r--drivers/dma/ep93xx_dma.c3
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c113
-rw-r--r--drivers/dma/ioat/dma_v3.c216
-rw-r--r--drivers/dma/ioat/hw.h11
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/iop-adma.c45
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/ipu/ipu_irq.c1
-rw-r--r--drivers/dma/mmp_pdma.c6
-rw-r--r--drivers/dma/mv_xor.c40
-rw-r--r--drivers/dma/mxs-dma.c8
-rw-r--r--drivers/dma/of-dma.c267
-rw-r--r--drivers/dma/pch_dma.c13
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/dma/sh/shdma-base.c3
-rw-r--r--drivers/dma/sh/shdma.c2
-rw-r--r--drivers/dma/sirf-dma.c73
-rw-r--r--drivers/dma/ste_dma40.c491
-rw-r--r--drivers/dma/ste_dma40_ll.c29
-rw-r--r--drivers/dma/ste_dma40_ll.h130
-rw-r--r--drivers/dma/tegra20-apb-dma.c55
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c6
-rw-r--r--drivers/mtd/nand/fsmc_nand.c22
39 files changed, 1725 insertions, 643 deletions
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bc6f5faa1e9e..819dfda88236 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
raw_spin_lock_irqsave(&dca_lock, flags);
+ if (list_empty(&dca_domains)) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
list_del(&dca->node);
pci_rc = dca_pci_rc_from_dev(dev);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 40179e749f08..80b69971cf28 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -51,7 +51,7 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
- depends on ARM_AMBA && EXPERIMENTAL
+ depends on ARM_AMBA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -83,7 +83,6 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
- depends on HAVE_CLK
select DMA_ENGINE
default y if CPU_AT32AP7000
help
@@ -215,8 +214,8 @@ config TIMB_DMA
Enable support for the Timberdale FPGA DMA engine.
config SIRF_DMA
- tristate "CSR SiRFprimaII DMA support"
- depends on ARCH_PRIMA2
+ tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+ depends on ARCH_SIRF
select DMA_ENGINE
help
Enable support for the CSR SiRFprimaII DMA engine.
@@ -328,6 +327,10 @@ config DMA_ENGINE
config DMA_VIRTUAL_CHANNELS
tristate
+config DMA_OF
+ def_bool y
+ depends on OF
+
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 642d96736cf5..488e3ff85b52 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,6 +3,8 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+obj-$(CONFIG_DMA_OF) += of-dma.o
+
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index d1cc5791476b..8bad254a498d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,7 +83,7 @@
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <asm/hardware/pl080.h>
+#include <linux/amba/pl080.h>
#include "dmaengine.h"
#include "virt-dma.h"
@@ -1096,15 +1096,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
struct pl08x_dma_chan *plchan)
{
LIST_HEAD(head);
- struct pl08x_txd *txd;
vchan_get_all_descriptors(&plchan->vc, &head);
-
- while (!list_empty(&head)) {
- txd = list_first_entry(&head, struct pl08x_txd, vd.node);
- list_del(&txd->vd.node);
- pl08x_desc_free(&txd->vd);
- }
+ vchan_dma_desc_free_list(&plchan->vc, &head);
}
/*
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 13a02f4425b0..6e13f262139a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -778,7 +778,7 @@ err:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -786,8 +786,6 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
- goto err_out;
return 0;
@@ -886,14 +884,16 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
return NULL;
}
+ if (unlikely(!is_slave_direction(direction)))
+ goto err_out;
+
if (sconfig->direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
/* Check for too big/unaligned periods and unaligned DMA buffer */
- if (atc_dma_cyclic_check_values(reg_width, buf_addr,
- period_len, direction))
+ if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
goto err_out;
/* build cyclic linked list */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 116e4adffb08..0eb3c1388667 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -369,10 +369,10 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
- " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
- lli->saddr, lli->daddr,
- lli->ctrla, lli->ctrlb, lli->dscr);
+ dev_crit(chan2dev(&atchan->chan_common),
+ " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
+ lli->saddr, lli->daddr,
+ lli->ctrla, lli->ctrlb, lli->dscr);
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a2f079aca550..797940e532ff 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2355,7 +2355,9 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- /* FIXME: should be conditional on ret != DMA_SUCCESS? */
+ if (ret == DMA_SUCCESS)
+ return ret;
+
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
if (ret == DMA_IN_PROGRESS && cohc->stopped)
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 3e96610e18e2..702112d547c8 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -61,7 +61,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
dma_addr_t phy;
if (len == 0)
- goto err;
+ return NULL;
spin_lock(&pool->lock);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a815d44c70a4..242b8c0a3de8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,6 +62,7 @@
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/of_dma.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDR(dma_idr);
@@ -266,7 +267,10 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
pr_err("%s: timeout!\n", __func__);
return DMA_ERROR;
}
- } while (status == DMA_IN_PROGRESS);
+ if (status != DMA_IN_PROGRESS)
+ break;
+ cpu_relax();
+ } while (1);
return status;
}
@@ -546,6 +550,21 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
}
EXPORT_SYMBOL_GPL(__dma_request_channel);
+/**
+ * dma_request_slave_channel - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ */
+struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
+{
+ /* If device-tree is present get slave info from here */
+ if (dev->of_node)
+ return of_dma_request_slave_channel(dev->of_node, name);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+
void dma_release_channel(struct dma_chan *chan)
{
mutex_lock(&dma_list_mutex);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 64b048d7fba7..a2c8904b63ea 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
}
+static unsigned int min_odd(unsigned int x, unsigned int y)
+{
+ unsigned int val = min(x, y);
+
+ return val % 2 ? val : val - 1;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -262,6 +269,7 @@ static int dmatest_func(void *data)
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
+ struct dma_device *dev;
const char *thread_name;
unsigned int src_off, dst_off, len;
unsigned int error_count;
@@ -283,13 +291,16 @@ static int dmatest_func(void *data)
smp_rmb();
chan = thread->chan;
+ dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
else if (thread->type == DMA_XOR) {
- src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(xor_sources | 1, dev->max_xor);
dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
- src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
@@ -327,7 +338,6 @@ static int dmatest_func(void *data)
while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
- struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
@@ -526,7 +536,9 @@ err_srcs:
thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ if (ret)
+ dmaengine_terminate_all(chan);
+
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -551,7 +563,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
}
/* terminate all transfers on specified channels */
- dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(dtc->chan);
kfree(dtc);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index b33d1f6e1333..51c3ea2ed41a 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1,6 +1,5 @@
/*
- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
- * AVR32 systems.)
+ * Core driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
@@ -9,11 +8,13 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -47,15 +48,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
return slave ? slave->src_master : 1;
}
+#define SRC_MASTER 0
+#define DST_MASTER 1
+
+static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_slave *dws = chan->private;
+ unsigned int m;
+
+ if (master == SRC_MASTER)
+ m = dwc_get_sms(dws);
+ else
+ m = dwc_get_dms(dws);
+
+ return min_t(unsigned int, dw->nr_masters - 1, m);
+}
+
#define DWC_DEFAULT_CTLLO(_chan) ({ \
- struct dw_dma_slave *__slave = (_chan->private); \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
- int _dms = dwc_get_dms(__slave); \
- int _sms = dwc_get_sms(__slave); \
- u8 _smsize = __slave ? _sconfig->src_maxburst : \
+ bool _is_slave = is_slave_direction(_dwc->direction); \
+ int _dms = dwc_get_master(_chan, DST_MASTER); \
+ int _sms = dwc_get_master(_chan, SRC_MASTER); \
+ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
DW_DMA_MSIZE_16; \
- u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
+ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
DW_DMA_MSIZE_16; \
\
(DWC_CTLL_DST_MSIZE(_dmsize) \
@@ -73,15 +91,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
*/
#define NR_DESCS_PER_CHANNEL 64
-/*----------------------------------------------------------------------*/
+static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
-/*
- * Because we're not relying on writeback from the controller (it may not
- * even be configured into the core!) we don't need to use dma_pool. These
- * descriptors -- and associated data -- are cacheable. We do need to make
- * sure their dcache entries are written back before handing them off to
- * the controller, though.
- */
+ return dw->data_width[dwc_get_master(chan, master)];
+}
+
+/*----------------------------------------------------------------------*/
static struct device *chan2dev(struct dma_chan *chan)
{
@@ -94,7 +111,7 @@ static struct device *chan2parent(struct dma_chan *chan)
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
- return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
+ return to_dw_desc(dwc->active_list.next);
}
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
@@ -121,19 +138,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
return ret;
}
-static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
-{
- struct dw_desc *child;
-
- list_for_each_entry(child, &desc->tx_list, desc_node)
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- child->txd.phys, sizeof(child->lli),
- DMA_TO_DEVICE);
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- desc->txd.phys, sizeof(desc->lli),
- DMA_TO_DEVICE);
-}
-
/*
* Move a descriptor, including any children, to the free list.
* `desc' must not be on any lists.
@@ -145,8 +149,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
if (desc) {
struct dw_desc *child;
- dwc_sync_desc_for_cpu(dwc, desc);
-
spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
@@ -179,9 +181,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
cfghi = dws->cfg_hi;
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
} else {
- if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ if (dwc->direction == DMA_MEM_TO_DEV)
cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
- else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+ else if (dwc->direction == DMA_DEV_TO_MEM)
cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
}
@@ -223,7 +225,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
channel_readl(dwc, CTL_LO));
}
-
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
channel_clear_bit(dw, CH_EN, dwc->mask);
@@ -249,6 +250,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, ctllo);
channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
channel_set_bit(dw, CH_EN, dwc->mask);
+
+ /* Move pointer to next descriptor */
+ dwc->tx_node_active = dwc->tx_node_active->next;
}
/* Called with dwc->lock held and bh disabled */
@@ -279,9 +283,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
dwc_initialize(dwc);
- dwc->tx_list = &first->tx_list;
- dwc->tx_node_active = first->tx_list.next;
+ dwc->residue = first->total_len;
+ dwc->tx_node_active = &first->tx_list;
+ /* Submit first block */
dwc_do_single_block(dwc, first);
return;
@@ -317,8 +322,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
param = txd->callback_param;
}
- dwc_sync_desc_for_cpu(dwc, desc);
-
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
async_tx_ack(&child->txd);
@@ -327,29 +330,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
- if (!dwc->chan.private) {
+ if (!is_slave_direction(dwc->direction)) {
struct device *parent = chan2parent(&dwc->chan);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
else
dma_unmap_page(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
}
}
spin_unlock_irqrestore(&dwc->lock, flags);
- if (callback_required && callback)
+ if (callback)
callback(param);
}
@@ -384,6 +387,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, desc, true);
}
+/* Returns how many bytes were already received from source */
+static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
+{
+ u32 ctlhi = channel_readl(dwc, CTL_HI);
+ u32 ctllo = channel_readl(dwc, CTL_LO);
+
+ return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+}
+
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
dma_addr_t llp;
@@ -399,6 +411,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ struct list_head *head, *active = dwc->tx_node_active;
+
+ /*
+ * We are inside first active descriptor.
+ * Otherwise something is really wrong.
+ */
+ desc = dwc_first_active(dwc);
+
+ head = &desc->tx_list;
+ if (active != head) {
+ /* Update desc to reflect last sent one */
+ if (active != head->next)
+ desc = to_dw_desc(active->prev);
+
+ dwc->residue -= desc->len;
+
+ child = to_dw_desc(active);
+
+ /* Submit next block */
+ dwc_do_single_block(dwc, child);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* We are done here */
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+ }
+
+ dwc->residue = 0;
+
spin_unlock_irqrestore(&dwc->lock, flags);
dwc_complete_all(dw, dwc);
@@ -406,6 +451,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
}
if (list_empty(&dwc->active_list)) {
+ dwc->residue = 0;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
@@ -414,6 +466,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
(unsigned long long)llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+ /* initial residue value */
+ dwc->residue = desc->total_len;
+
/* check first descriptors addr */
if (desc->txd.phys == llp) {
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -423,16 +478,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* check first descriptors llp */
if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- list_for_each_entry(child, &desc->tx_list, desc_node)
+ dwc->residue -= desc->len;
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
if (child->lli.llp == llp) {
/* Currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
+ dwc->residue -= child->len;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
@@ -458,9 +518,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
- lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+ dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
}
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -488,16 +547,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
/*
- * KERN_CRITICAL may seem harsh, but since this only happens
+ * WARN may seem harsh, but since this only happens
* when someone submits a bad physical address in a
* descriptor, we should consider ourselves lucky that the
* controller flagged an error instead of scribbling over
* random memory locations.
*/
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- "Bad descriptor submitted for DMA!\n");
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " cookie: %d\n", bad_desc->txd.cookie);
+ dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
+ " cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
@@ -598,36 +655,8 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
- else if (status_xfer & (1 << i)) {
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
- if (dwc->tx_node_active != dwc->tx_list) {
- struct dw_desc *desc =
- list_entry(dwc->tx_node_active,
- struct dw_desc,
- desc_node);
-
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- /* move pointer to next descriptor */
- dwc->tx_node_active =
- dwc->tx_node_active->next;
-
- dwc_do_single_block(dwc, desc);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
- continue;
- } else {
- /* we are done here */
- clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
- }
- }
- spin_unlock_irqrestore(&dwc->lock, flags);
-
+ else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc);
- }
}
/*
@@ -709,7 +738,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
struct d