From 0ebcf1a274c5467c8ed55d0e01db4b414fe4518d Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 18 Feb 2020 16:31:26 +0200 Subject: dmaengine: ti: k3-udma: Implement support for atype (for virtualization) The DT for virtualized hosts have dma-cells == 2 where the second parameter is the ATYPE for the channel. In case of dma-cells == 1 we can configure the ATYPE as 0 (reset value). The ATYPE defined for j721e are: 0: pointers are physical addresses (no translation) 1: pointers are intermediate addresses (PVU) 2: pointers are virtual addresses (SMMU) Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200218143126.11361-3-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma-glue.c | 18 ++++++++++++++-- drivers/dma/ti/k3-udma.c | 50 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 58 insertions(+), 10 deletions(-) diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index c1511298ece2..dbccdc7c0ed5 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -32,6 +32,7 @@ struct k3_udma_glue_common { bool epib; u32 psdata_size; u32 swdata_size; + u32 atype; }; struct k3_udma_glue_tx_channel { @@ -121,6 +122,15 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, return -ENOENT; thread_id = dma_spec.args[0]; + if (dma_spec.args_count == 2) { + if (dma_spec.args[1] > 2) { + dev_err(common->dev, "Invalid channel atype: %u\n", + dma_spec.args[1]); + ret = -EINVAL; + goto out_put_spec; + } + common->atype = dma_spec.args[1]; + } if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { ret = -EINVAL; @@ -202,7 +212,8 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | - TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.index = tx_chn->udma_tchan_id; if (tx_chn->tx_pause_on_err) @@ -216,6 +227,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) req.tx_supr_tdpkt = 1; req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + req.tx_atype = tx_chn->common.atype; return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); } @@ -502,7 +514,8 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | - TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.index = rx_chn->udma_rchan_id; @@ -519,6 +532,7 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) req.flowid_cnt = rx_chn->flow_num; } req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + req.rx_atype = rx_chn->common.atype; ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); if (ret) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index ea79c2df28e0..205141494fc1 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -128,6 +128,7 @@ struct udma_dev { struct udma_chan *channels; u32 psil_base; + u32 atype; }; struct udma_hwdesc { @@ -181,6 +182,7 @@ struct udma_chan_config { u32 hdesc_size; /* Size of a packet descriptor in packet mode */ bool notdpkt; /* Suppress sending TDC packet */ int remote_thread_id; + u32 atype; u32 src_thread; u32 dst_thread; enum psil_endpoint_type ep_type; @@ -1507,7 +1509,8 @@ err_rflow: TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ - TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID) + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) #define TISCI_RCHAN_VALID_PARAMS ( \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ @@ -1517,7 +1520,8 @@ err_rflow: TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ - TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID) + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) static int udma_tisci_m2m_channel_config(struct udma_chan *uc) { @@ -1539,6 +1543,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc) req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; req_tx.txcq_qnum = tc_ring; + req_tx.tx_atype = ud->atype; ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) { @@ -1552,6 +1557,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc) req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; req_rx.rxcq_qnum = tc_ring; req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + req_rx.rx_atype = ud->atype; ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); if (ret) @@ -1587,6 +1593,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc) req_tx.tx_supr_tdpkt = uc->config.notdpkt; req_tx.tx_fetch_size = fetch_size >> 2; req_tx.txcq_qnum = tc_ring; + req_tx.tx_atype = uc->config.atype; ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) @@ -1623,6 +1630,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc) req_rx.rx_fetch_size = fetch_size >> 2; req_rx.rxcq_qnum = rx_ring; req_rx.rx_chan_type = mode; + req_rx.rx_atype = uc->config.atype; ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); if (ret) { @@ -2930,13 +2938,18 @@ static void udma_free_chan_resources(struct dma_chan *chan) static struct platform_driver udma_driver; +struct udma_filter_param { + int remote_thread_id; + u32 atype; +}; + static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) { struct udma_chan_config *ucc; struct psil_endpoint_config *ep_config; + struct udma_filter_param *filter_param; struct udma_chan *uc; struct udma_dev *ud; - u32 *args; if (chan->device->dev->driver != &udma_driver.driver) return false; @@ -2944,9 +2957,16 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) uc = to_udma_chan(chan); ucc = &uc->config; ud = uc->ud; - args = param; + filter_param = param; + + if (filter_param->atype > 2) { + dev_err(ud->dev, "Invalid channel atype: %u\n", + filter_param->atype); + return false; + } - ucc->remote_thread_id = args[0]; + ucc->remote_thread_id = filter_param->remote_thread_id; + ucc->atype = filter_param->atype; if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) ucc->dir = DMA_MEM_TO_DEV; @@ -2959,6 +2979,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) ucc->remote_thread_id); ucc->dir = DMA_MEM_TO_MEM; ucc->remote_thread_id = -1; + ucc->atype = 0; return false; } @@ -2997,13 +3018,20 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, { struct udma_dev *ud = ofdma->of_dma_data; dma_cap_mask_t mask = ud->ddev.cap_mask; + struct udma_filter_param filter_param; struct dma_chan *chan; - if (dma_spec->args_count != 1) + if (dma_spec->args_count != 1 && dma_spec->args_count != 2) return NULL; - chan = __dma_request_channel(&mask, udma_dma_filter_fn, - &dma_spec->args[0], ofdma->of_node); + filter_param.remote_thread_id = dma_spec->args[0]; + if (dma_spec->args_count == 2) + filter_param.atype = dma_spec->args[1]; + else + filter_param.atype = 0; + + chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, + ofdma->of_node); if (!chan) { dev_err(ud->dev, "get channel fail in %s.\n", __func__); return ERR_PTR(-EINVAL); @@ -3294,6 +3322,12 @@ static int udma_probe(struct platform_device *pdev) return ret; } + ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype); + if (!ret && ud->atype > 2) { + dev_err(dev, "Invalid atype: %u\n", ud->atype); + return -EINVAL; + } + ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; -- cgit v1.2.3