summaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorIoana Radulescu <ruxandra.radulescu@nxp.com>2017-04-28 04:50:29 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-28 14:25:17 +0200
commit6e2387e8f19ed5f7ea47dec74675f256188f0b1f (patch)
tree684a87f1479455a8a0292fd5c7106ee55631a9bb /drivers/staging
parent0352d1d852017953a5bdc5d735e3b9ff24fe63bd (diff)
staging: fsl-dpaa2/eth: Add Freescale DPAA2 Ethernet driver
Introduce the DPAA2 Ethernet driver, which manages Datapath Network Interface (DPNI) objects discovered on the MC bus. In addition to DPNIs, the Ethernet driver uses several other MC objects to build a network interface abstraction: buffer pools (DPBPs), I/O Portals (DPIOs) and concentrators (DPCONs). A more detailed description of the driver can be found in the associated README file. Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com> Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc@nxp.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/Makefile2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c2455
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h304
3 files changed, 2760 insertions, 1 deletions
diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
index 83b62644a44b..4897d39a1c21 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
-fsl-dpaa2-eth-objs := dpni.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpni.o
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
new file mode 100644
index 000000000000..abd700e57aeb
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -0,0 +1,2455 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+
+#include "../../fsl-mc/include/mc.h"
+#include "../../fsl-mc/include/mc-sys.h"
+#include "dpaa2-eth.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+static void validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* HW checksum validation is disabled, nothing to do here */
+ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* Read checksum validation bits */
+ if (!((fd_status & DPAA2_FAS_L3CV) &&
+ (fd_status & DPAA2_FAS_L4CV)))
+ return;
+
+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+static void free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct dpaa2_sg_entry *sgt;
+ void *sg_vaddr;
+ int i;
+
+ /* If single buffer frame, just free the data buffer */
+ if (fd_format == dpaa2_fd_single)
+ goto free_buf;
+ else if (fd_format != dpaa2_fd_sg)
+ /* We don't support any other format */
+ return;
+
+ /* For S/G frames, we first need to free all SG entries */
+ sgt = vaddr + dpaa2_fd_get_offset(fd);
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ addr = dpaa2_sg_get_addr(&sgt[i]);
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ sg_vaddr = phys_to_virt(addr);
+ skb_free_frag(sg_vaddr);
+
+ if (dpaa2_sg_is_final(&sgt[i]))
+ break;
+ }
+
+free_buf:
+ skb_free_frag(vaddr);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct sk_buff *skb = NULL;
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ ch->buf_count--;
+
+ return skb;
+}
+
+/* Build a non linear (fragmented) skb based on a S/G table */
+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
+{
+ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sg_vaddr;
+ dma_addr_t sg_addr;
+ u16 sg_offset;
+ u32 sg_length;
+ struct page *page, *head_page;
+ int page_offset;
+ int i;
+
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ struct dpaa2_sg_entry *sge = &sgt[i];
+
+ /* NOTE: We only support SG entries in dpaa2_sg_single format,
+ * but this is the only format we may receive from HW anyway
+ */
+
+ /* Get the address and length from the S/G entry */
+ sg_addr = dpaa2_sg_get_addr(sge);
+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ sg_vaddr = phys_to_virt(sg_addr);
+ sg_length = dpaa2_sg_get_len(sge);
+
+ if (i == 0) {
+ /* We build the skb around the first data buffer */
+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb))
+ return NULL;
+
+ sg_offset = dpaa2_sg_get_offset(sge);
+ skb_reserve(skb, sg_offset);
+ skb_put(skb, sg_length);
+ } else {
+ /* Rest of the data buffers are stored as skb frags */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Offset in page (which may be compound).
+ * Data in subsequent SG entries is stored from the
+ * beginning of the buffer, so we don't need to add the
+ * sg_offset.
+ */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ }
+
+ if (dpaa2_sg_is_final(sge))
+ break;
+ }
+
+ /* Count all data buffers + SG table buffer */
+ ch->buf_count -= i + 2;
+
+ return skb;
+}
+
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ void *vaddr;
+ struct sk_buff *skb;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_fas *fas;
+ u32 status = 0;
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ vaddr = phys_to_virt(addr);
+
+ prefetch(vaddr + priv->buf_layout.private_data_size);
+ prefetch(vaddr + dpaa2_fd_get_offset(fd));
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ if (fd_format == dpaa2_fd_single) {
+ skb = build_linear_skb(priv, ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ struct dpaa2_sg_entry *sgt =
+ vaddr + dpaa2_fd_get_offset(fd);
+ skb = build_frag_skb(priv, ch, sgt);
+ skb_free_frag(vaddr);
+ } else {
+ /* We don't support any other format */
+ goto err_frame_format;
+ }
+
+ if (unlikely(!skb))
+ goto err_build_skb;
+
+ prefetch(skb->data);
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ fas = (struct dpaa2_fas *)
+ (vaddr + priv->buf_layout.private_data_size);
+ status = le32_to_cpu(fas->status);
+ validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+
+ if (priv->net_dev->features & NETIF_F_GRO)
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+
+ return;
+
+err_build_skb:
+ free_rx_fd(priv, fd, vaddr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+/* Consume all frames pull-dequeued into the store. This is the simplest way to
+ * make sure we don't accidentally issue another volatile dequeue which would
+ * overwrite (leak) frames already in the store.
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+static int consume_frames(struct dpaa2_eth_channel *ch)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_dq *dq;
+ const struct dpaa2_fd *fd;
+ int cleaned = 0;
+ int is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ch->store, &is_last);
+ if (unlikely(!dq)) {
+ /* If we're here, we *must* have placed a
+ * volatile dequeue comnmand, so keep reading through
+ * the store until we get some sort of valid response
+ * token (either a valid frame or an "empty dequeue")
+ */
+ continue;
+ }
+
+ fd = dpaa2_dq_fd(dq);
+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
+
+ fq->consume(priv, ch, fd, &ch->napi);
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+/* Create a frame descriptor based on a fragmented skb */
+static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sgt_buf = NULL;
+ void *hwa;
+ dma_addr_t addr;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct dpaa2_sg_entry *sgt;
+ int i, err;
+ int sgt_buf_size;
+ struct scatterlist *scl, *crt_scl;
+ int num_sg;
+ int num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+
+ /* Create and map scatterlist.
+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+ * to go beyond nr_frags+1.
+ * Note: We don't support chained scatterlists
+ */
+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
+ return -EINVAL;
+
+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (unlikely(!scl))
+ return -ENOMEM;
+
+ sg_init_table(scl, nr_frags + 1);
+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+ if (unlikely(!num_dma_bufs)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
+ if (unlikely(!sgt_buf)) {
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+
+ /* PTA from egress side is passed as is to the confirmation side so
+ * we need to clear some fields here in order to find consistent values
+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
+ * field from the hardware annotation area
+ */
+ hwa = sgt_buf + priv->buf_layout.private_data_size;
+ memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Fill in the HW SGT structure.
+ *
+ * sgt_buf is zeroed out, so the following fields are implicit
+ * in all sgt entries:
+ * - offset is 0
+ * - format is 'dpaa2_sg_single'
+ */
+ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+ }
+ dpaa2_sg_set_final(&sgt[i - 1], true);
+
+ /* Store the skb backpointer in the SGT buffer.
+ * Fit the scatterlist and the number of buffers alongside the
+ * skb backpointer in the software annotation area. We'll need
+ * all of them on Tx Conf.
+ */
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->skb = skb;
+ swa->scl = scl;
+ swa->num_sg = num_sg;
+ swa->num_dma_bufs = num_dma_bufs;
+
+ /* Separately map the SGT buffer */
+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ err = -ENOMEM;
+ goto dma_map_single_failed;
+ }
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+ DPAA2_FD_CTRL_PTV1);
+
+ return 0;
+
+dma_map_single_failed:
+ kfree(sgt_buf);
+sgt_buf_alloc_failed:
+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+dma_map_sg_failed:
+ kfree(scl);
+ return err;
+}
+
+/* Create a frame descriptor based on a linear skb */
+static int build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u8 *buffer_start;
+ void *hwa;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+
+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
+ DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+
+ /* PTA from egress side is passed as is to the confirmation side so
+ * we need to clear some fields here in order to find consistent values
+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
+ * field from the hardware annotation area
+ */
+ hwa = buffer_start + priv->buf_layout.private_data_size;
+ memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buffer_start,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+ DPAA2_FD_CTRL_PTV1);
+
+ return 0;
+}
+
+/* FD freeing routine on the Tx path
+ *
+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ * Optionally, return the frame annotation status word (FAS), which needs
+ * to be checked if we're on the confirmation path.
+ */
+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ u32 *status)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t fd_addr;
+ struct sk_buff **skbh, *skb;
+ unsigned char *buffer_start;
+ int unmap_size;
+ struct scatterlist *scl;
+ int num_sg, num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct dpaa2_fas *fas;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ skbh = phys_to_virt(fd_addr);
+
+ if (fd_format == dpaa2_fd_single) {
+ skb = *skbh;
+ buffer_start = (unsigned char *)skbh;
+ /* Accessing the skb buffer is safe before dma unmap, because
+ * we didn't map the actual skb shell.
+ */
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+ } else if (fd_format == dpaa2_fd_sg) {
+ swa = (struct dpaa2_eth_swa *)skbh;
+ skb = swa->skb;
+ scl = swa->scl;
+ num_sg = swa->num_sg;
+ num_dma_bufs = swa->num_dma_bufs;
+
+ /* Unmap the scatterlist */
+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+ kfree(scl);
+
+ /* Unmap the SGT buffer */
+ unmap_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
+ } else {
+ /* Unsupported format, mark it as errored and give up */
+ if (status)
+ *status = ~0;
+ return;
+ }
+
+ /* Read the status from the Frame Annotation after we unmap the first
+ * buffer but before we free it. The caller function is responsible
+ * for checking the status value.
+ */
+ if (status && (dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ fas = (struct dpaa2_fas *)
+ ((void *)skbh + priv->buf_layout.private_data_size);
+ *status = le32_to_cpu(fas->status);
+ }
+
+ /* Free SGT buffer kmalloc'ed on tx */
+ if (fd_format != dpaa2_fd_single)
+ kfree(skbh);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+}
+
+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_fd fd;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_fq *fq;
+ u16 queue_mapping;
+ int err, i;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
+ struct sk_buff *ns;
+
+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
+ if (unlikely(!ns)) {
+ percpu_stats->tx_dropped++;
+ goto err_alloc_headroom;
+ }
+ dev_kfree_skb(skb);
+ skb = ns;
+ }
+
+ /* We'll be holding a back-reference to the skb until Tx Confirmation;
+ * we don't want that overwritten by a concurrent Tx with a cloned skb.
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ percpu_stats->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* Setup the FD fields */
+ memset(&fd, 0, sizeof(fd));
+
+ if (skb_is_nonlinear(skb))
+ err = build_sg_fd(priv, skb, &fd);
+ else
+ err = build_single_fd(priv, skb, &fd);
+ if (unlikely(err)) {
+ percpu_stats->tx_dropped++;
+ goto err_build_fd;
+ }
+
+ /* TxConf FQ selection primarily based on cpu affinity; this is
+ * non-migratable context, so it's safe to call smp_processor_id().
+ */
+ queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
+ fq = &priv->fq[queue_mapping];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
+ fq->tx_qdbin, &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ /* Clean up everything, including freeing the skb */
+ free_tx_fd(priv, &fd, NULL);
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += skb->len;
+ }
+
+ return NETDEV_TX_OK;
+
+err_build_fd:
+err_alloc_headroom:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* Tx confirmation frame processing routine */
+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi __always_unused)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ u32 status = 0;
+
+ free_tx_fd(priv, fd, &status);
+
+ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ /* Tx-conf logically pertains to the egress path. */
+ percpu_stats->tx_errors++;
+ }
+}
+
+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ void *buf;
+ dma_addr_t addr;
+ int i;
+
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Allocate buffer visible to WRIOP + skb shared info +
+ * alignment padding
+ */
+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
+ if (unlikely(!buf))
+ goto err_alloc;
+
+ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
+
+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful.
+ * The buffer release function would only fail if the QBMan portal
+ * was busy, which implies portal contention (i.e. more CPUs than
+ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
+ * there is little we can realistically do, short of giving up -
+ * in which case we'd risk depleting the buffer pool and never again
+ * receiving the Rx interrupt which would kick-start the refill logic.
+ * So just keep retrying, at the risk of being moved to ksoftirqd.
+ */
+ while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
+ cpu_relax();
+ return i;
+
+err_map:
+ skb_free_frag(buf);
+err_alloc:
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+ int i, j;
+ int new_count;
+
+ /* This is the lazy seeding of Rx buffer pools.
+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
+ * napi_alloc_frag(). The trouble with that is that it in turn ends up
+ * calling this_cpu_ptr(), which mandates execution in atomic context.
+ * Rather than splitting up the code, do a one-off preempt disable.
+ */
+ preempt_disable();
+ for (j = 0; j < priv->num_channels; j++) {
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+ i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = add_bufs(priv, bpid);
+ priv->channel[j]->buf_count += new_count;
+
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+ preempt_enable();
+ return -ENOMEM;
+ }
+ }
+ }
+ preempt_enable();
+
+ return 0;
+}
+
+/**
+ * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
+ */
+static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ void *vaddr;
+ int ret, i;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
+ buf_array, count);
+ if (ret < 0) {
+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
+ return;
+ }
+ for (i = 0; i < ret; i++) {
+ /* Same logic as on regular Rx path */
+ dma_unmap_single(dev, buf_array[i],
+ DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ vaddr = phys_to_virt(buf_array[i]);
+ skb_free_frag(vaddr);
+ }
+ } while (ret);
+}
+
+static void drain_pool(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+ drain_bufs(priv, 1);
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->buf_count = 0;
+}
+
+/* Function is called from softirq context only, so we don't need to guard
+ * the access to percpu count
+ */
+static int refill_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ u16 bpid)
+{
+ int new_count;
+
+ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
+ return 0;
+
+ do {
+ new_count = add_bufs(priv, bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll try later on */
+ break;
+ }
+ ch->buf_count += new_count;
+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+
+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int pull_channel(struct dpaa2_eth_channel *ch)
+{
+ int err;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
+ cpu_relax();
+ } while (err == -EBUSY);
+
+ return err;
+}
+
+/* NAPI poll routine
+ *
+ * Frames are dequeued from the QMan channel associated with this NAPI context.
+ * Rx, Tx confirmation and (if configured) Rx error frames all count
+ * towards the NAPI budget.
+ */
+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_eth_channel *ch;
+ int cleaned = 0, store_cleaned;
+ struct dpaa2_eth_priv *priv;
+ int err;
+
+ ch = container_of(napi, struct dpaa2_eth_channel, napi);
+ priv = ch->priv;
+
+ while (cleaned < budget) {
+ err = pull_channel(ch);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ refill_pool(priv, ch, priv->dpbp_attrs.bpid);
+
+ store_cleaned = consume_frames(ch);
+ cleaned += store_cleaned;
+
+ /* If we have enough budget left for a full store,
+ * try a new pull dequeue, otherwise we're done here
+ */
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_ETH_STORE_SIZE)
+ break;
+ }
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ /* Re-enable data available notifications */
+ do {
+ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY);
+ }
+
+ return cleaned;
+}
+
+static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_enable(&ch->napi);
+ }
+}
+
+static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_disable(&ch->napi);
+ }
+}
+
+static int link_state_update(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_link_state state;
+ int err;
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (unlikely(err)) {
+ netdev_err(priv->net_dev,
+ "dpni_get_link_state() failed\n");
+ return err;
+ }
+
+ /* Chech link state; speed / duplex changes are not treated yet */
+ if (priv->link_state.up == state.up)
+ return 0;
+
+ priv->link_state = state;
+ if (state.up) {
+ netif_carrier_on(priv->net_dev);
+ netif_tx_start_all_queues(priv->net_dev);
+ } else {
+ netif_tx_stop_all_queues(priv->net_dev);
+ netif_carrier_off(priv->net_dev);
+ }
+
+ netdev_info(priv->net_dev, "Link Event: state %s",
+ state.up ? "up" : "down");
+
+ return 0;
+}
+
+static int dpaa2_eth_open(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = seed_pool(priv, priv->dpbp_attrs.bpid);
+ if (err) {
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+ * to limp on.
+ */
+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
+ }
+
+ /* We'll only start the txqs when the link is actually ready; make sure
+ * we don't race against the link up notification, which may come
+ * immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+ enable_ch_napi(priv);
+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
+ * return true and cause 'ip link show' to report the LOWER_UP flag,
+ * even though the link notification wasn't even received.
+ */
+ netif_carrier_off(net_dev);
+
+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+ if (err < 0) {
+ netdev_err(net_dev, "dpni_enable() failed\n");
+ goto enable_err;
+ }
+
+ /* If the DPMAC object has already processed the link up interrupt,
+ * we have to learn the link state ourselves.
+ */
+ err = link_state_update(priv);
+ if (err < 0) {
+ netdev_err(net_dev, "Can't update link state\n");
+ goto link_state_err;
+ }
+
+ return 0;
+
+link_state_err:
+enable_err:
+ disable_ch_napi(priv);
+ drain_pool(priv);
+ return err;
+}
+
+/* The DPIO store must be empty when we call this,
+ * at the end of every NAPI cycle.
+ */
+static u32 drain_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
+{
+ u32 drained = 0, total = 0;
+
+ do {
+ pull_channel(ch);
+ drained = consume_frames(ch);
+ total += drained;
+ } while (drained);
+
+ return total;
+}
+
+static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+ u32 drained = 0;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ drained += drain_channel(priv, ch);
+ }
+
+ return drained;
+}
+
+static int dpaa2_eth_stop(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int dpni_enabled;
+ int retries = 10;
+ u32 drained;
+
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+
+ /* Loop while dpni_disable() attempts to drain the egress FQs
+ * and confirm them back to us.
+ */
+ do {
+ dpni_disable(priv->mc_io, 0, priv->mc_token);
+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
+ if (dpni_enabled)
+ /* Allow the hardware some slack */
+ msleep(100);
+ } while (dpni_enabled && --retries);
+ if (!retries) {
+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
+ /* Must go on and disable NAPI nonetheless, so we don't crash at
+ * the next "ifconfig up"
+ */
+ }
+
+ /* Wait for NAPI to complete on every core and disable it.
+ * In particular, this will also prevent NAPI from being rescheduled if
+ * a new CDAN is serviced, effectively discarding the CDAN. We therefore
+ * don't even need to disarm the channels, except perhaps for the case
+ * of a huge coalescing value.
+ */
+ disable_ch_napi(priv);
+
+ /* Manually drain the Rx and TxConf queues */
+ drained = drain_ingress_frames(priv);
+ if (drained)
+ netdev_dbg(net_dev, "Drained %d frames.\n", drained);
+
+ /* Empty the buffer pool */
+ drain_pool(priv);
+
+ return 0;
+}
+
+static int dpaa2_eth_init(struct net_device *net_dev)
+{
+ u64 supported = 0;
+ u64 not_supported = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u32 options = priv->dpni_attrs.options;
+
+ /* Capabilities listing */
+ supported |= IFF_LIVE_ADDR_CHANGE;
+
+ if (options & DPNI_OPT_NO_MAC_FILTER)
+ not_supported |= IFF_UNICAST_FLT;
+ else
+ supported |= IFF_UNICAST_FLT;
+
+ net_dev->priv_flags |= supported;
+ net_dev->priv_flags &= ~not_supported;
+
+ /* Features */
+ net_dev->features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_LLTX;
+ net_dev->hw_features = net_dev->features;
+
+ return 0;
+}
+
+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/** Fill in counters maintained by the GPP driver. These may be different from
+ * the hardware counters obtained by ethtool.
+ */
+void dpaa2_eth_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);