summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-13 15:42:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-13 15:42:44 -0700
commit3f06962273e73c5f7a651726b191d531cadef788 (patch)
tree370219bd166ac6dda105901dd316a54c2f9ac3d0 /drivers/dma
parent2260840592fbed5be98ca03c97eb8172941f27ac (diff)
parent46ce10df799fb0647a9c0e3f793e66463a8d6773 (diff)
Merge tag 'mtd/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
Pull MTD updates from Miquel Raynal: "This contains the following changes for MTD: MTD core changes: - New Hyperbus framework - New _is_locked (concat) implementation - Various cleanups NAND core changes: - use longest matching pattern in ->exec_op() default parser - export NAND operation tracer - add flag to indicate panic_write in MTD - use kzalloc() instead of kmalloc() and memset() Raw NAND controller drivers changes: - brcmnand: - fix BCH ECC layout for large page NAND parts - fallback to detected ecc-strength, ecc-step-size - when oops in progress use pio and interrupt polling - code refactor code to introduce helper functions - add support for v7.3 controller - FSMC: - use nand_op_trace for operation tracing - GPMI: - move all driver code into single file - various cleanups (including dmaengine changes) - use runtime PM to manage clocks - implement exec_op - MTK: - correct low level time calculation of r/w cycle - improve data sampling timing for read cycle - add validity check for CE# pin setting - fix wrongly assigned OOB buffer pointer issue - re-license MTK NAND driver as Dual MIT/GPL - STM32: - manage the get_irq error case - increase DMA completion timeouts Raw NAND chips drivers changes: - Macronix: add read-retry support Onenand driver changes: - add support for 8Gb datasize chips - avoid fall-through warnings SPI-NAND changes: - define macros for page-read ops with three-byte addresses - add support for two-byte device IDs and then for GigaDevice GD5F1GQ4UFxxG - add initial support for Paragon PN26G0xA - handle the case where the last page read has bitflips SPI-NOR core changes: - add support for the mt25ql02g and w25q16jv flashes - print error in case of jedec read id fails - is25lp256: add post BFPT fix to correct the addr_width SPI NOR controller drivers changes: - intel-spi: Add support for Intel Elkhart Lake SPI serial flash - smt32: remove the driver as the driver was replaced by spi-stm32-qspi.c - cadence-quadspi: add reset control" * tag 'mtd/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (60 commits) mtd: concat: implement _is_locked mtd operation mtd: concat: refactor concat_lock/concat_unlock mtd: abi: do not use C++ style comments in uapi header mtd: afs: remove unneeded NULL check mtd: rawnand: stm32_fmc2: increase DMA completion timeouts mtd: rawnand: Use kzalloc() instead of kmalloc() and memset() mtd: hyperbus: Add driver for TI's HyperBus memory controller mtd: spinand: read returns badly if the last page has bitflips mtd: spinand: Add initial support for Paragon PN26G0xA mtd: rawnand: mtk: Re-license MTK NAND driver as Dual MIT/GPL mtd: rawnand: gpmi: remove double assignment to block_size dt-bindings: mtd: brcmnand: Add brcmnand, brcmnand-v7.3 support mtd: rawnand: brcmnand: Add support for v7.3 controller mtd: rawnand: brcmnand: Refactored code to introduce helper functions mtd: rawnand: brcmnand: When oops in progress use pio and interrupt polling mtd: Add flag to indicate panic_write mtd: rawnand: Add Macronix NAND read retry support mtd: onenand: Avoid fall-through warnings mtd: spinand: Add support for GigaDevice GD5F1GQ4UFxxG mtd: spinand: Add support for two-byte device IDs ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/mxs-dma.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 22cc7f68ef6e..20a9cb7cb6d3 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -24,6 +24,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/list.h>
+#include <linux/dma/mxs-dma.h>
#include <asm/irq.h>
@@ -77,6 +78,7 @@
#define BM_CCW_COMMAND (3 << 0)
#define CCW_CHAIN (1 << 2)
#define CCW_IRQ (1 << 3)
+#define CCW_WAIT4RDY (1 << 5)
#define CCW_DEC_SEM (1 << 6)
#define CCW_WAIT4END (1 << 7)
#define CCW_HALT_ON_TERM (1 << 8)
@@ -477,16 +479,16 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
* ......
* ->device_prep_slave_sg(0);
* ......
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ * ->device_prep_slave_sg(DMA_CTRL_ACK);
* ......
* [3] If there are more than two DMA commands in the DMA chain, the code
* should be:
* ......
* ->device_prep_slave_sg(0); // First
* ......
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ * ->device_prep_slave_sg(DMA_CTRL_ACK]);
* ......
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ * ->device_prep_slave_sg(DMA_CTRL_ACK); // Last
* ......
*/
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
@@ -500,13 +502,12 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
u32 i, j;
u32 *pio;
- bool append = flags & DMA_PREP_INTERRUPT;
- int idx = append ? mxs_chan->desc_count : 0;
+ int idx = 0;
- if (mxs_chan->status == DMA_IN_PROGRESS && !append)
- return NULL;
+ if (mxs_chan->status == DMA_IN_PROGRESS)
+ idx = mxs_chan->desc_count;
- if (sg_len + (append ? idx : 0) > NUM_CCW) {
+ if (sg_len + idx > NUM_CCW) {
dev_err(mxs_dma->dma_device.dev,
"maximum number of sg exceeded: %d > %d\n",
sg_len, NUM_CCW);
@@ -520,7 +521,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
* If the sg is prepared with append flag set, the sg
* will be appended to the last prepared sg.
*/
- if (append) {
+ if (idx) {
BUG_ON(idx < 1);
ccw = &mxs_chan->ccw[idx - 1];
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
@@ -541,12 +542,14 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits = 0;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- if (flags & DMA_CTRL_ACK)
+ if (flags & MXS_DMA_CTRL_WAIT4END)
ccw->bits |= CCW_WAIT4END;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= BF_CCW(sg_len, PIO_NUM);
ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
+ if (flags & MXS_DMA_CTRL_WAIT4RDY)
+ ccw->bits |= CCW_WAIT4RDY;
} else {
for_each_sg(sgl, sg, sg_len, i) {
if (sg_dma_len(sg) > MAX_XFER_BYTES) {
@@ -573,7 +576,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits &= ~CCW_CHAIN;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- if (flags & DMA_CTRL_ACK)
+ if (flags & MXS_DMA_CTRL_WAIT4END)
ccw->bits |= CCW_WAIT4END;
}
}