summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-17 20:17:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-17 20:17:04 -0800
commit52f40e9d657cc126b766304a5dd58ad73b02ff46 (patch)
tree7cbc22b9a89fe41103e6a61a52b7f9c71ef5ba43 /drivers/net/ethernet/freescale
parent231753ef780012eb6f3922c3dfc0a7186baa33c2 (diff)
parent3e3397e7b11ce1b9526975ddfbe8dd569fc1f316 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes and cleanups from David Miller: 1) Revert bogus nla_ok() change, from Alexey Dobriyan. 2) Various bpf validator fixes from Daniel Borkmann. 3) Add some necessary SET_NETDEV_DEV() calls to hsis_femac and hip04 drivers, from Dongpo Li. 4) Several ethtool ksettings conversions from Philippe Reynes. 5) Fix bugs in inet port management wrt. soreuseport, from Tom Herbert. 6) XDP support for virtio_net, from John Fastabend. 7) Fix NAT handling within a vrf, from David Ahern. 8) Endianness fixes in dpaa_eth driver, from Claudiu Manoil * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (63 commits) net: mv643xx_eth: fix build failure isdn: Constify some function parameters mlxsw: spectrum: Mark split ports as such cgroup: Fix CGROUP_BPF config qed: fix old-style function definition net: ipv6: check route protocol when deleting routes r6040: move spinlock in r6040_close as SOFTIRQ-unsafe lock order detected irda: w83977af_ir: cleanup an indent issue net: sfc: use new api ethtool_{get|set}_link_ksettings net: davicom: dm9000: use new api ethtool_{get|set}_link_ksettings net: cirrus: ep93xx: use new api ethtool_{get|set}_link_ksettings net: chelsio: cxgb3: use new api ethtool_{get|set}_link_ksettings net: chelsio: cxgb2: use new api ethtool_{get|set}_link_ksettings bpf: fix mark_reg_unknown_value for spilled regs on map value marking bpf: fix overflow in prog accounting bpf: dynamically allocate digest scratch buffer gtp: Fix initialization of Flags octet in GTPv1 header gtp: gtp_check_src_ms_ipv4() always return success net/x25: use designated initializers isdn: use designated initializers ...
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c71
2 files changed, 38 insertions, 35 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index f3a3454805f9..a654736237a9 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -1,6 +1,6 @@
menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
- depends on FSL_SOC && FSL_DPAA && FSL_FMAN
+ depends on FSL_DPAA && FSL_FMAN
select PHYLIB
select FSL_FMAN_MAC
---help---
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 3c48a84dec86..624ba9058dc4 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -733,7 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
/* Enable Congestion State Change Notifications and CS taildrop */
- initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
/* Set different thresholds based on the MAC speed.
@@ -747,7 +747,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
cs_th = DPAA_CS_THRESHOLD_1G;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
@@ -896,18 +896,18 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (dpaa_fq->init) {
memset(&initfq, 0, sizeof(initfq));
- initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
/* Note: we may get to keep an empty FQ in cache */
- initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
/* Try to reduce the number of portal interrupts for
* Tx Confirmation FQs.
*/
if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
/* FQ placement */
- initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
@@ -920,8 +920,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (dpaa_fq->fq_type == FQ_TYPE_TX ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
/* Set a fixed overhead accounting, in an attempt to
* reduce the impact of fixed-size skb shells and the
@@ -932,7 +932,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* insufficient value, but even that is better than
* no overhead accounting at all.
*/
- initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
qm_fqd_set_oal(&initfq.fqd,
min(sizeof(struct sk_buff) +
@@ -941,9 +941,9 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
}
if (td_enable) {
- initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
- initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
}
if (dpaa_fq->fq_type == FQ_TYPE_TX) {
@@ -951,7 +951,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (queue_id >= 0)
confq = priv->conf_fqs[queue_id];
if (confq) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
/* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
* A2V=1 (contextA A2 field is valid)
* A0V=1 (contextA A0 field is valid)
@@ -959,8 +960,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* ContextA A2: EBD=1 (deallocate buffers inside FMan)
* ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
*/
- initfq.fqd.context_a.hi = 0x1e000000;
- initfq.fqd.context_a.lo = 0x80000000;
+ qm_fqd_context_a_set64(&initfq.fqd,
+ 0x1e00000080000000ULL);
}
}
@@ -968,13 +969,13 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (priv->use_ingress_cgr &&
(dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
/* Set a fixed overhead accounting, just like for the
* egress CGR.
*/
- initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
qm_fqd_set_oal(&initfq.fqd,
min(sizeof(struct sk_buff) +
@@ -984,9 +985,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Initialization common to all ingress queues */
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- initfq.fqd.fq_ctrl |=
- QM_FQCTRL_HOLDACTIVE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
initfq.fqd.context_a.stashing.exclusive =
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_ANNOTATION;
@@ -1350,7 +1350,7 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
parse_result->l4_off = (u8)skb_transport_offset(skb);
/* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
/* On P1023 and similar platforms fd->cmd interpretation could
* be disabled by setting CONTEXT_A bit ICMD; currently this bit
@@ -1732,7 +1732,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
/* Fill in the rest of the FD fields */
qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
- fd->cmd |= FM_FD_CMD_FCO;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */
addr = dma_map_single(dev, skbh,
@@ -1840,7 +1840,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
}
fd->bpid = FSL_DPAA_BPID_INV;
- fd->cmd |= FM_FD_CMD_FCO;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
qm_fd_addr_set64(fd, addr);
return 0;
@@ -1867,7 +1867,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
egress_fq = priv->egress_fqs[queue];
if (fd->bpid == FSL_DPAA_BPID_INV)
- fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
+ fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
/* Trace this Tx fd */
trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
@@ -1960,17 +1960,17 @@ static void dpaa_rx_error(struct net_device *net_dev,
{
if (net_ratelimit())
netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_RX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
percpu_priv->stats.rx_errors++;
- if (fd->status & FM_FD_ERR_DMA)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
percpu_priv->rx_errors.dme++;
- if (fd->status & FM_FD_ERR_PHYSICAL)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
percpu_priv->rx_errors.fpe++;
- if (fd->status & FM_FD_ERR_SIZE)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
percpu_priv->rx_errors.fse++;
- if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
percpu_priv->rx_errors.phe++;
dpaa_fd_release(net_dev, fd);
@@ -1986,7 +1986,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
@@ -2020,10 +2020,11 @@ static void dpaa_tx_conf(struct net_device *net_dev,
{
struct sk_buff *skb;
- if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) &
+ FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
}
@@ -2100,6 +2101,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct sk_buff *skb;
int *count_ptr;
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
@@ -2417,12 +2420,12 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
}
/* Enable CS TD, but disable Congestion State Change Notifications. */
- initcgr.we_mask = QM_CGR_WE_CS_THRES;
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
cs_th = DPAA_INGRESS_CS_THRESHOLD;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
/* This CGR will be associated with the SWP affined to the current CPU.