summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/ti/k3-udma.c1264
-rw-r--r--drivers/dma/ti/k3-udma.h12
2 files changed, 1185 insertions, 91 deletions
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b89afa602532..f327d436f8ad 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -26,6 +26,7 @@
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/k3-event-router.h>
#include <linux/dma/ti-cppi5.h>
#include "../virt-dma.h"
@@ -55,14 +56,25 @@ struct udma_static_tr {
struct udma_chan;
+enum k3_dma_type {
+ DMA_TYPE_UDMA = 0,
+ DMA_TYPE_BCDMA,
+};
+
enum udma_mmr {
MMR_GCFG = 0,
+ MMR_BCHANRT,
MMR_RCHANRT,
MMR_TCHANRT,
MMR_LAST,
};
-static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+static const char * const mmr_names[] = {
+ [MMR_GCFG] = "gcfg",
+ [MMR_BCHANRT] = "bchanrt",
+ [MMR_RCHANRT] = "rchanrt",
+ [MMR_TCHANRT] = "tchanrt",
+};
struct udma_tchan {
void __iomem *reg_rt;
@@ -72,6 +84,8 @@ struct udma_tchan {
struct k3_ring *tc_ring; /* Transmit Completion ring */
};
+#define udma_bchan udma_tchan
+
struct udma_rflow {
int id;
struct k3_ring *fd_ring; /* Free Descriptor ring */
@@ -84,11 +98,25 @@ struct udma_rchan {
int id;
};
+struct udma_oes_offsets {
+ /* K3 UDMA Output Event Offset */
+ u32 udma_rchan;
+
+ /* BCDMA Output Event Offsets */
+ u32 bcdma_bchan_data;
+ u32 bcdma_bchan_ring;
+ u32 bcdma_tchan_data;
+ u32 bcdma_tchan_ring;
+ u32 bcdma_rchan_data;
+ u32 bcdma_rchan_ring;
+};
+
#define UDMA_FLAG_PDMA_ACC32 BIT(0)
#define UDMA_FLAG_PDMA_BURST BIT(1)
#define UDMA_FLAG_TDTYPE BIT(2)
struct udma_match_data {
+ enum k3_dma_type type;
u32 psil_base;
bool enable_memcpy_support;
u32 flags;
@@ -96,7 +124,8 @@ struct udma_match_data {
};
struct udma_soc_data {
- u32 rchan_oes_offset;
+ struct udma_oes_offsets oes;
+ u32 bcdma_trigger_event_offset;
};
struct udma_hwdesc {
@@ -139,16 +168,19 @@ struct udma_dev {
struct udma_rx_flush rx_flush;
+ int bchan_cnt;
int tchan_cnt;
int echan_cnt;
int rchan_cnt;
int rflow_cnt;
+ unsigned long *bchan_map;
unsigned long *tchan_map;
unsigned long *rchan_map;
unsigned long *rflow_gp_map;
unsigned long *rflow_gp_map_allocated;
unsigned long *rflow_in_use;
+ struct udma_bchan *bchans;
struct udma_tchan *tchans;
struct udma_rchan *rchans;
struct udma_rflow *rflows;
@@ -156,6 +188,7 @@ struct udma_dev {
struct udma_chan *channels;
u32 psil_base;
u32 atype;
+ u32 asel;
};
struct udma_desc {
@@ -200,6 +233,7 @@ struct udma_chan_config {
bool notdpkt; /* Suppress sending TDC packet */
int remote_thread_id;
u32 atype;
+ u32 asel;
u32 src_thread;
u32 dst_thread;
enum psil_endpoint_type ep_type;
@@ -207,6 +241,8 @@ struct udma_chan_config {
bool enable_burst;
enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+ u32 tr_trigger_type;
+
enum dma_transfer_direction dir;
};
@@ -214,11 +250,13 @@ struct udma_chan {
struct virt_dma_chan vc;
struct dma_slave_config cfg;
struct udma_dev *ud;
+ struct device *dma_dev;
struct udma_desc *desc;
struct udma_desc *terminated_desc;
struct udma_static_tr static_tr;
char *name;
+ struct udma_bchan *bchan;
struct udma_tchan *tchan;
struct udma_rchan *rchan;
struct udma_rflow *rflow;
@@ -354,6 +392,30 @@ static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
src_thread, dst_thread);
}
+static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
+{
+ struct device *chan_dev = &chan->dev->device;
+
+ if (asel == 0) {
+ /* No special handling for the channel */
+ chan->dev->chan_dma_dev = false;
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ } else if (asel == 14 || asel == 15) {
+ chan->dev->chan_dma_dev = true;
+
+ chan_dev->dma_coherent = true;
+ dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
+ chan_dev->dma_parms = chan_dev->parent->dma_parms;
+ } else {
+ dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ }
+}
+
static void udma_reset_uchan(struct udma_chan *uc)
{
memset(&uc->config, 0, sizeof(uc->config));
@@ -440,9 +502,7 @@ static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
d->hwdesc[i].cppi5_desc_vaddr = NULL;
}
} else if (d->hwdesc[0].cppi5_desc_vaddr) {
- struct udma_dev *ud = uc->ud;
-
- dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
d->hwdesc[0].cppi5_desc_vaddr,
d->hwdesc[0].cppi5_desc_paddr);
@@ -671,8 +731,10 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
- val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
- udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ if (!uc->bchan) {
+ val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
}
if (uc->rchan) {
@@ -1233,6 +1295,42 @@ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
UDMA_RESERVE_RESOURCE(tchan);
UDMA_RESERVE_RESOURCE(rchan);
+static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
+{
+ if (id >= 0) {
+ if (test_bit(id, ud->bchan_map)) {
+ dev_err(ud->dev, "bchan%d is in use\n", id);
+ return ERR_PTR(-ENOENT);
+ }
+ } else {
+ id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
+ if (id == ud->bchan_cnt)
+ return ERR_PTR(-ENOENT);
+ }
+
+ set_bit(id, ud->bchan_map);
+ return &ud->bchans[id];
+}
+
+static int bcdma_get_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
+ uc->id, uc->bchan->id);
+ return 0;
+ }
+
+ uc->bchan = __bcdma_reserve_bchan(ud, -1);
+ if (IS_ERR(uc->bchan))
+ return PTR_ERR(uc->bchan);
+
+ uc->tchan = uc->bchan;
+
+ return 0;
+}
+
static int udma_get_tchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1325,6 +1423,19 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
return PTR_ERR_OR_ZERO(uc->rflow);
}
+static void bcdma_put_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
+ uc->bchan->id);
+ clear_bit(uc->bchan->id, ud->bchan_map);
+ uc->bchan = NULL;
+ uc->tchan = NULL;
+ }
+}
+
static void udma_put_rchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1361,6 +1472,65 @@ static void udma_put_rflow(struct udma_chan *uc)
}
}
+static void bcdma_free_bchan_resources(struct udma_chan *uc)
+{
+ if (!uc->bchan)
+ return;
+
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->tc_ring = NULL;
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+
+ bcdma_put_bchan(uc);
+}
+
+static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = bcdma_get_bchan(uc);
+ if (ret)
+ return ret;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
+ &uc->bchan->t_ring,
+ &uc->bchan->tc_ring);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
+
+ k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
+ ring_cfg.asel = ud->asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+
+ ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ uc->bchan->tc_ring = NULL;
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+err_ring:
+ bcdma_put_bchan(uc);
+
+ return ret;
+}
+
static void udma_free_tx_resources(struct udma_chan *uc)
{
if (!uc->tchan)
@@ -1378,15 +1548,19 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
{
struct k3_ring_cfg ring_cfg;
struct udma_dev *ud = uc->ud;
- int ret;
+ struct udma_tchan *tchan;
+ int ring_idx, ret;
ret = udma_get_tchan(uc);
if (ret)
return ret;
- ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
- &uc->tchan->t_ring,
- &uc->tchan->tc_ring);
+ tchan = uc->tchan;
+ ring_idx = ud->bchan_cnt + tchan->id;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
+ &tchan->t_ring,
+ &tchan->tc_ring);
if (ret) {
ret = -EBUSY;
goto err_ring;
@@ -1395,10 +1569,18 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
+
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
- ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
- ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+ ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
if (ret)
goto err_ringcfg;
@@ -1458,7 +1640,8 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
}
rflow = uc->rflow;
- fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
+ uc->rchan->id;
ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
&rflow->fd_ring, &rflow->r_ring);
if (ret) {
@@ -1468,15 +1651,25 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
- if (uc->config.pkt_mode)
- ring_cfg.size = SG_MAX_SEGMENTS;
- else
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
- ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
@@ -1498,7 +1691,18 @@ err_rflow:
return ret;
}
-#define TISCI_TCHAN_VALID_PARAMS ( \
+#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
+
+#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
+
+#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
+
+#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
@@ -1508,7 +1712,7 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
-#define TISCI_RCHAN_VALID_PARAMS ( \
+#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
@@ -1533,7 +1737,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
@@ -1547,7 +1751,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
@@ -1562,6 +1766,27 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
+static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct udma_bchan *bchan = uc->bchan;
+ int ret = 0;
+
+ req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
+ req_tx.index = bchan->id;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
+
+ return ret;
+}
+
static int udma_tisci_tx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1582,7 +1807,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = mode;
@@ -1605,6 +1830,33 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
return ret;
}
+static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ int ret = 0;
+
+ req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
+ /* wait for peer to complete the teardown for PDMAs */
+ req_tx.valid_params |=
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
+ req_tx.tx_tdtype = 1;
+ }
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
static int udma_tisci_rx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1627,7 +1879,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = fetch_size >> 2;
@@ -1686,6 +1938,26 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
return 0;
}
+static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ int ret = 0;
+
+ req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
static int udma_alloc_chan_resources(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
@@ -1695,6 +1967,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
u32 irq_udma_idx;
int ret;
+ uc->dma_dev = ud->dev;
+
if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
uc->use_dma_pool = true;
/* in case of MEM_TO_MEM we have maximum of two TRs */
@@ -1790,7 +2064,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
K3_PSIL_DST_THREAD_ID_OFFSET;
irq_ring = uc->rflow->r_ring;
- irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
+ irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
ret = udma_tisci_rx_channel_config(uc);
break;
@@ -1890,6 +2164,218 @@ err_cleanup:
return ret;
}
+static int bcdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 irq_udma_idx, irq_ring_idx;
+ int ret;
+
+ /* Only TR mode is supported */
+ uc->config.pkt_mode = false;
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = bcdma_alloc_bchan_resources(uc);
+ if (ret)
+ return ret;
+
+ irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
+ irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
+
+ ret = bcdma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
+ irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
+
+ ret = bcdma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
+ irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
+
+ ret = bcdma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
+ goto err_res_free;
+ }
+ }
+
+ uc->dma_dev = dmaengine_get_dma_device(chan);
+ if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+
+ uc->use_dma_pool = true;
+ } else if (uc->config.dir != DMA_MEM_TO_MEM) {
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev,
+ "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+ }
+
+ uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ irq_ring_idx);
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from BCDMA (TR events) only needed for slave channels */
+ if (is_slave_direction(uc->config.dir)) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ if (uc->psil_paired)
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ bcdma_free_bchan_resources(uc);
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int bcdma_router_config(struct dma_chan *chan)
+{
+ struct k3_event_route_data *router_data = chan->route_data;
+ struct udma_chan *uc = to_udma_chan(chan);
+ u32 trigger_event;
+
+ if (!uc->bchan)
+ return -EINVAL;
+
+ if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
+ return -EINVAL;
+
+ trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
+ trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
+
+ return router_data->set_event(router_data->priv, trigger_event);
+}
+
static int udma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -2034,6 +2520,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
size_t tr_size;
int num_tr = 0;
int tr_idx = 0;
+ u64 asel;
/* estimate the number of TRs we will need */
for_each_sg(sgl, sgent, sglen, i) {
@@ -2051,6 +2538,11 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->sglen = sglen;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ asel = 0;
+ else
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
tr_req = d->hwdesc[0].tr_req_base;
for_each_sg(sgl, sgent, sglen, i) {
dma_addr_t sg_addr = sg_dma_address(sgent);
@@ -2069,6 +2561,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ sg_addr |= asel;
tr_req[tr_idx].addr = sg_addr;
tr_req[tr_idx].icnt0 = tr0_cnt0;
tr_req[tr_idx].icnt1 = tr0_cnt1;
@@ -2098,6 +2591,205 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
return d;
}
+static struct udma_desc *
+udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen,
+ enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_tr_type15_t *tr_req = NULL;
+ enum dma_slave_buswidth dev_width;
+ u16 tr_cnt0, tr_cnt1;
+ dma_addr_t dev_addr;
+ struct udma_desc *d;
+ unsigned int i;
+ size_t tr_size, sg_len;
+ int num_tr = 0;
+ int tr_idx = 0;
+ u32 burst, trigger_size, port_window;
+ u64 asel;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = uc->cfg.src_addr;
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ port_window = uc->cfg.src_port_window_size;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = uc->cfg.dst_addr;
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ port_window = uc->cfg.dst_port_window_size;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (port_window) {
+ if (port_window != burst) {
+ dev_err(uc->ud->dev,
+ "The burst must be equal to port_window\n");
+ return NULL;
+ }
+
+ tr_cnt0 = dev_width * port_window;
+ tr_cnt1 = 1;
+ } else {
+ tr_cnt0 = dev_width;
+ tr_cnt1 = burst;
+ }
+ trigger_size = tr_cnt0 * tr_cnt1;
+
+ /* estimate the number of TRs we will need */
+ for_each_sg(sgl, sgent, sglen, i) {
+ sg_len = sg_dma_len(sgent);
+
+ if (sg_len % trigger_size) {
+ dev_err(uc->ud->dev,
+ "Not aligned SG entry (%zu for %u)\n", sg_len,
+ trigger_size);
+ return NULL;
+ }
+
+ if (sg_len / trigger_size < SZ_64K)
+ num_tr++;
+ else
+ num_tr += 2;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type15_t);
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
+ asel = 0;
+ } else {
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+ dev_addr |= asel;
+ }
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+
+ sg_len = sg_dma_len(sgent);
+ num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
+ &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ sg_len);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
+ true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
+
+ sg_addr |= asel;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+ tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+
+ tr_idx++;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
+ false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ 0, 0);
+
+ sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+ tr_idx++;
+ }
+
+ d->residue += sg_len;
+ }
+
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
enum dma_slave_buswidth dev_width,
u16 elcnt)
@@ -2339,7 +3031,8 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct udma_desc *d;
u32 burst;
- if (dir != uc->config.dir) {
+ if (dir != uc->config.dir &&
+ (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
dev_err(chan->device->dev,
"%s: chan%d is for %s, not supporting %s\n",
__func__, uc->id,
@@ -2365,9 +3058,12 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (uc->config.pkt_mode)
d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
context);
- else
+ else if (is_slave_direction(uc->config.dir))
d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
context);
+ else
+ d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
+ tx_flags, context);
if (!d)
return NULL;
@@ -2421,7 +3117,12 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
return NULL;
tr_req = d->hwdesc[0].tr_req_base;
- period_addr = buf_addr;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ period_addr = buf_addr;
+ else
+ period_addr = buf_addr |
+ ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -2627,6 +3328,11 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d->tr_idx = 0;
d->residue = len;
+ if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
+ src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ }
+
tr_req = d->hwdesc[0].tr_req_base;
cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
@@ -2984,6 +3690,7 @@ static void udma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&uc->vc);
tasklet_kill(&uc->vc.task);
+ bcdma_free_bchan_resources(uc);
udma_free_tx_resources(uc);
udma_free_rx_resources(uc);
udma_reset_uchan(uc);
@@ -2995,10 +3702,13 @@ static void udma_free_chan_resources(struct dma_chan *chan)
}
static struct platform_driver udma_driver;
+static struct platform_driver bcdma_driver;
struct udma_filter_param {
int remote_thread_id;
u32 atype;
+ u32 asel;
+ u32 tr_trigger_type;
};
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
@@ -3009,7 +3719,8 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
struct udma_chan *uc;
struct udma_dev *ud;
- if (chan->device->dev->driver != &udma_driver.driver)
+ if (chan->device->dev->driver != &udma_driver.driver &&
+ chan->device->dev->driver != &bcdma_driver.driver)
return false;
uc = to_udma_chan(chan);
@@ -3023,13 +3734,25 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
return false;
}
+ if (filter_param->asel > 15) {
+ dev_err(ud->dev, "Invalid channel asel: %u\n",
+ filter_param->asel);
+ return false;
+ }
+
ucc->remote_thread_id = filter_param->remote_thread_id;
ucc->atype = filter_param->atype;
+ ucc->asel = filter_param->asel;
+ ucc->tr_trigger_type = filter_param->tr_trigger_type;
- if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ if (ucc->tr_trigger_type) {
+ ucc->dir = DMA_MEM_TO_MEM;
+ goto triggered_bchan;
+ } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
ucc->dir = DMA_MEM_TO_DEV;
- else
+ } else {
ucc->dir = DMA_DEV_TO_ME