summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorIoana Radulescu <ruxandra.radulescu@nxp.com>2018-08-29 04:42:40 -0500
committerDavid S. Miller <davem@davemloft.net>2018-09-01 17:16:59 -0700
commit34ff68465a17d0c1f022ec9086504cb27dbb2fd7 (patch)
treefbbb4826b8e490fb05fafebe8a6347c2ab163198 /drivers/net/ethernet
parent7f12c8a3670f15ab8592c287279a4050960a7fc6 (diff)
dpaa2-eth: Move DPAA2 Ethernet driver from staging to drivers/net
The DPAA2 Ethernet driver supports Freescale/NXP SoCs with DPAA2 (DataPath Acceleration Architecture v2). The driver manages network objects discovered on the fsl-mc bus. Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/freescale/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h158
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2654
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h412
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c280
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpkg.h480
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h518
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c1600
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h824
11 files changed, 6947 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a580a3dcbe59..7a30276e1ba6 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -97,4 +97,12 @@ config GIANFAR
source "drivers/net/ethernet/freescale/dpaa/Kconfig"
+config FSL_DPAA2_ETH
+ tristate "Freescale DPAA2 Ethernet"
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ depends on NETDEVICES && ETHERNET
+ ---help---
+ Ethernet driver for Freescale DPAA2 SoCs, using the
+ Freescale MC bus driver
+
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 0914a3ea4405..3b4ff08e3841 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -21,3 +21,5 @@ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
obj-$(CONFIG_FSL_FMAN) += fman/
obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
new file mode 100644
index 000000000000..9315ecdba612
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
new file mode 100644
index 000000000000..9801528db2a5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa2_eth
+
+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA2_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa2-eth.h"
+#include <linux/tracepoint.h>
+
+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
+/* trace_printk format for raw buffer event class */
+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor */
+DECLARE_EVENT_CLASS(dpaa2_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fd),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u64, fd_addr)
+ __field(u32, fd_len)
+ __field(u16, fd_offset)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->fd_addr = dpaa2_fd_get_addr(fd);
+ __entry->fd_len = dpaa2_fd_get_len(fd);
+ __entry->fd_offset = dpaa2_fd_get_offset(fd);
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_FMT,
+ __get_str(name),
+ __entry->fd_addr,
+ __entry->fd_len,
+ __entry->fd_offset)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Log data about raw buffers. Useful for tracing DPBP content. */
+TRACE_EVENT(dpaa2_eth_buf_seed,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ /* virtual address and size */
+ void *vaddr,
+ size_t size,
+ /* dma map address and size */
+ dma_addr_t dma_addr,
+ size_t map_size,
+ /* buffer pool id, if relevant */
+ u16 bpid),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(void *, vaddr)
+ __field(size_t, size)
+ __field(dma_addr_t, dma_addr)
+ __field(size_t, map_size)
+ __field(u16, bpid)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->vaddr = vaddr;
+ __entry->size = size;
+ __entry->dma_addr = dma_addr;
+ __entry->map_size = map_size;
+ __entry->bpid = bpid;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_BUF_FMT,
+ __get_str(name),
+ __entry->vaddr,
+ __entry->size,
+ &__entry->dma_addr,
+ __entry->map_size,
+ __entry->bpid)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA2_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa2-eth-trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
new file mode 100644
index 000000000000..559154a5d9e9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -0,0 +1,2654 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/iommu.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/mc.h>
+
+#include <net/sock.h>
+
+#include "dpaa2-eth.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa2-eth-trace.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+static void validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* HW checksum validation is disabled, nothing to do here */
+ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* Read checksum validation bits */
+ if (!((fd_status & DPAA2_FAS_L3CV) &&
+ (fd_status & DPAA2_FAS_L4CV)))
+ return;
+
+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+static void free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct dpaa2_sg_entry *sgt;
+ void *sg_vaddr;
+ int i;
+
+ /* If single buffer frame, just free the data buffer */
+ if (fd_format == dpaa2_fd_single)
+ goto free_buf;
+ else if (fd_format != dpaa2_fd_sg)
+ /* We don't support any other format */
+ return;
+
+ /* For S/G frames, we first need to free all SG entries
+ * except the first one, which was taken care of already
+ */
+ sgt = vaddr + dpaa2_fd_get_offset(fd);
+ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ addr = dpaa2_sg_get_addr(&sgt[i]);
+ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_free_frag(sg_vaddr);
+ if (dpaa2_sg_is_final(&sgt[i]))
+ break;
+ }
+
+free_buf:
+ skb_free_frag(vaddr);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct sk_buff *skb = NULL;
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ ch->buf_count--;
+
+ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ return skb;
+}
+
+/* Build a non linear (fragmented) skb based on a S/G table */
+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
+{
+ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sg_vaddr;
+ dma_addr_t sg_addr;
+ u16 sg_offset;
+ u32 sg_length;
+ struct page *page, *head_page;
+ int page_offset;
+ int i;
+
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ struct dpaa2_sg_entry *sge = &sgt[i];
+
+ /* NOTE: We only support SG entries in dpaa2_sg_single format,
+ * but this is the only format we may receive from HW anyway
+ */
+
+ /* Get the address and length from the S/G entry */
+ sg_addr = dpaa2_sg_get_addr(sge);
+ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ sg_length = dpaa2_sg_get_len(sge);
+
+ if (i == 0) {
+ /* We build the skb around the first data buffer */
+ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
+ if (unlikely(!skb)) {
+ /* Free the first SG entry now, since we already
+ * unmapped it and obtained the virtual address
+ */
+ skb_free_frag(sg_vaddr);
+
+ /* We still need to subtract the buffers used
+ * by this FD from our software counter
+ */
+ while (!dpaa2_sg_is_final(&sgt[i]) &&
+ i < DPAA2_ETH_MAX_SG_ENTRIES)
+ i++;
+ break;
+ }
+
+ sg_offset = dpaa2_sg_get_offset(sge);
+ skb_reserve(skb, sg_offset);
+ skb_put(skb, sg_length);
+ } else {
+ /* Rest of the data buffers are stored as skb frags */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Offset in page (which may be compound).
+ * Data in subsequent SG entries is stored from the
+ * beginning of the buffer, so we don't need to add the
+ * sg_offset.
+ */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ }
+
+ if (dpaa2_sg_is_final(sge))
+ break;
+ }
+
+ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
+ /* Count all data buffers + SG table buffer */
+ ch->buf_count -= i + 2;
+
+ return skb;
+}
+
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi,
+ u16 queue_id)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ void *vaddr;
+ struct sk_buff *skb;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_fas *fas;
+ void *buf_data;
+ u32 status = 0;
+
+ /* Tracing point */
+ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ fas = dpaa2_get_fas(vaddr, false);
+ prefetch(fas);
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+ prefetch(buf_data);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ if (fd_format == dpaa2_fd_single) {
+ skb = build_linear_skb(priv, ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ skb = build_frag_skb(priv, ch, buf_data);
+ skb_free_frag(vaddr);
+ percpu_extras->rx_sg_frames++;
+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
+ } else {
+ /* We don't support any other format */
+ goto err_frame_format;
+ }
+
+ if (unlikely(!skb))
+ goto err_build_skb;
+
+ prefetch(skb->data);
+
+ /* Get the timestamp value */
+ if (priv->rx_tstamp) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ __le64 *ts = dpaa2_get_ts(vaddr, false);
+ u64 ns;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ status = le32_to_cpu(fas->status);
+ validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ skb_record_rx_queue(skb, queue_id);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+
+ napi_gro_receive(napi, skb);
+
+ return;
+
+err_build_skb:
+ free_rx_fd(priv, fd, vaddr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+/* Consume all frames pull-dequeued into the store. This is the simplest way to
+ * make sure we don't accidentally issue another volatile dequeue which would
+ * overwrite (leak) frames already in the store.
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+static int consume_frames(struct dpaa2_eth_channel *ch)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_dq *dq;
+ const struct dpaa2_fd *fd;
+ int cleaned = 0;
+ int is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ch->store, &is_last);
+ if (unlikely(!dq)) {
+ /* If we're here, we *must* have placed a
+ * volatile dequeue comnmand, so keep reading through
+ * the store until we get some sort of valid response
+ * token (either a valid frame or an "empty dequeue")
+ */
+ continue;
+ }
+
+ fd = dpaa2_dq_fd(dq);
+ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+ fq->stats.frames++;
+
+ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+/* Configure the egress frame annotation for timestamp update */
+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+{
+ struct dpaa2_faead *faead;
+ u32 ctrl, frc;
+
+ /* Mark the egress frame annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+
+ /* Set hardware annotation size */
+ ctrl = dpaa2_fd_get_ctrl(fd);
+ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
+
+ /* enable UPD (update prepanded data) bit in FAEAD field of
+ * hardware frame annotation area
+ */
+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
+ faead = dpaa2_get_faead(buf_start, true);
+ faead->ctrl = cpu_to_le32(ctrl);
+}
+
+/* Create a frame descriptor based on a fragmented skb */
+static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sgt_buf = NULL;
+ dma_addr_t addr;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct dpaa2_sg_entry *sgt;
+ int i, err;
+ int sgt_buf_size;
+ struct scatterlist *scl, *crt_scl;
+ int num_sg;
+ int num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+
+ /* Create and map scatterlist.
+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+ * to go beyond nr_frags+1.
+ * Note: We don't support chained scatterlists
+ */
+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
+ return -EINVAL;
+
+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (unlikely(!scl))
+ return -ENOMEM;
+
+ sg_init_table(scl, nr_frags + 1);
+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+ if (unlikely(!num_dma_bufs)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
+ sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ if (unlikely(!sgt_buf)) {
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Fill in the HW SGT structure.
+ *
+ * sgt_buf is zeroed out, so the following fields are implicit
+ * in all sgt entries:
+ * - offset is 0
+ * - format is 'dpaa2_sg_single'
+ */
+ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+ }
+ dpaa2_sg_set_final(&sgt[i - 1], true);
+
+ /* Store the skb backpointer in the SGT buffer.
+ * Fit the scatterlist and the number of buffers alongside the
+ * skb backpointer in the software annotation area. We'll need
+ * all of them on Tx Conf.
+ */
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->skb = skb;
+ swa->scl = scl;
+ swa->num_sg = num_sg;
+ swa->sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ err = -ENOMEM;
+ goto dma_map_single_failed;
+ }
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ enable_tx_tstamp(fd, sgt_buf);
+
+ return 0;
+
+dma_map_single_failed:
+ skb_free_frag(sgt_buf);
+sgt_buf_alloc_failed:
+ dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+dma_map_sg_failed:
+ kfree(scl);
+ return err;
+}
+
+/* Create a frame descriptor based on a linear skb */
+static int build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u8 *buffer_start, *aligned_start;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+
+ /* If there's enough room to align the FD address, do it.
+ * It will help hardware optimize accesses.
+ */
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= skb->head)
+ buffer_start = aligned_start;
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buffer_start,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ enable_tx_tstamp(fd, buffer_start);
+
+ return 0;
+}
+
+/* FD freeing routine on the Tx path
+ *
+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ */
+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t fd_addr;
+ struct sk_buff **skbh, *skb;
+ unsigned char *buffer_start;
+ struct dpaa2_eth_swa *swa;
+ u8 fd_format = dpaa2_fd_get_format(fd);
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+
+ if (fd_format == dpaa2_fd_single) {
+ skb = *skbh;
+ buffer_start = (unsigned char *)skbh;
+ /* Accessing the skb buffer is safe before dma unmap, because
+ * we didn't map the actual skb shell.
+ */
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
+ } else if (fd_format == dpaa2_fd_sg) {
+ swa = (struct dpaa2_eth_swa *)skbh;
+ skb = swa->skb;
+
+ /* Unmap the scatterlist */
+ dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
+ kfree(swa->scl);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->sgt_size,
+ DMA_BIDIRECTIONAL);
+ } else {
+ netdev_dbg(priv->net_dev, "Invalid FD format\n");
+ return;
+ }
+
+ /* Get the timestamp value */
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(skbh, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ /* Free SGT buffer allocated on tx */
+ if (fd_format != dpaa2_fd_single)
+ skb_free_frag(skbh);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+}
+
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_fd fd;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct dpaa2_eth_fq *fq;
+ u16 queue_mapping;
+ unsigned int needed_headroom;
+ int err, i;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+ if (skb_headroom(skb) < needed_headroom) {
+ struct sk_buff *ns;
+
+ ns = skb_realloc_headroom(skb, needed_headroom);
+ if (unlikely(!ns)) {
+ percpu_stats->tx_dropped++;
+ goto err_alloc_headroom;
+ }
+ percpu_extras->tx_reallocs++;
+
+ if (skb->sk)
+ skb_set_owner_w(ns, skb->sk);
+
+ dev_kfree_skb(skb);
+ skb = ns;
+ }
+
+ /* We'll be holding a back-reference to the skb until Tx Confirmation;
+ * we don't want that overwritten by a concurrent Tx with a cloned skb.
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ percpu_stats->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* Setup the FD fields */
+ memset(&fd, 0, sizeof(fd));
+
+ if (skb_is_nonlinear(skb)) {
+ err = build_sg_fd(priv, skb, &fd);
+ percpu_extras->tx_sg_frames++;
+ percpu_extras->tx_sg_bytes += skb->len;
+ } else {
+ err = build_single_fd(priv, skb, &fd);
+ }
+
+ if (unlikely(err)) {
+ percpu_stats->tx_dropped++;
+ goto err_build_fd;
+ }
+
+ /* Tracing point */
+ trace_dpaa2_tx_fd(net_dev, &fd);
+
+ /* TxConf FQ selection relies on queue id from the stack.
+ * In case of a forwarded frame from another DPNI interface, we choose
+ * a queue affined to the same core that processed the Rx frame
+ */
+ queue_mapping = skb_get_queue_mapping(skb);
+ fq = &priv->fq[queue_mapping];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, 0,
+ fq->tx_qdbin, &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ percpu_extras->tx_portal_busy += i;
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ /* Clean up everything, including freeing the skb */
+ free_tx_fd(priv, &fd);
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ }
+
+ return NETDEV_TX_OK;
+
+err_build_fd:
+err_alloc_headroom:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* Tx confirmation frame processing routine */
+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi __always_unused,
+ u16 queue_id __always_unused)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 fd_errors;
+
+ /* Tracing point */
+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ percpu_extras->tx_conf_frames++;
+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+
+ /* Check frame errors in the FD field */
+ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+ free_tx_fd(priv, fd);
+
+ if (likely(!fd_errors))
+ return;
+
+ if (net_ratelimit())
+ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
+ fd_errors);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ /* Tx-conf logically pertains to the egress path. */
+ percpu_stats->tx_errors++;
+}
+
+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ skb_free_frag(vaddr);
+ }
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int add_bufs(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch, u16 bpid)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ void *buf;
+ dma_addr_t addr;
+ int i, err;
+
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Allocate buffer visible to WRIOP + skb shared info +
+ * alignment padding
+ */
+ buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
+ if (unlikely(!buf))
+ goto err_alloc;
+
+ buf = PTR_ALIGN(buf, priv->rx_buf_align);
+
+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev,
+ buf, dpaa2_eth_buf_raw_size(priv),
+ addr, DPAA2_ETH_RX_BUF_SIZE,
+ bpid);
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful */
+ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+ buf_array, i)) == -EBUSY)
+ cpu_relax();
+
+ /* If release command failed, clean up and bail out;
+ * not much else we can do about it
+ */
+ if (err) {
+ free_bufs(priv, buf_array, i);
+ return 0;
+ }
+
+ return i;
+
+err_map:
+ skb_free_frag(buf);
+err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+ int i, j;
+ int new_count;
+
+ /* This is the lazy seeding of Rx buffer pools.
+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
+ * napi_alloc_frag(). The trouble with that is that it in turn ends up
+ * calling this_cpu_ptr(), which mandates execution in atomic context.
+ * Rather than splitting up the code, do a one-off preempt disable.
+ */
+ preempt_disable();
+ for (j = 0; j < priv->num_channels; j++) {
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+ i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = add_bufs(priv, priv->channel[j], bpid);
+ priv->channel[j]->buf_count += new_count;
+
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+ preempt_enable();
+ return -ENOMEM;
+ }
+ }
+ }
+ preempt_enable();