summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-18 17:53:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-18 17:53:46 -0700
commitebfc45ee7004088d610cd399d2dee6d95f87199b (patch)
tree7e3edfaeb7660d20eb187e9d2b1d6c9a9b3ceece
parent6e66d5dab5d530a368314eb631201a02aabb075d (diff)
parentb14878ccb7fac0242db82720b784ab62c467c0dc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull more networking fixes from David Miller: 1) Fix mlx4_en_netpoll implementation, it needs to schedule a NAPI context, not synchronize it. From Chris Mason. 2) Ipv4 flow input interface should never be zero, it should be LOOPBACK_IFINDEX instead. From Cong Wang and Julian Anastasov. 3) Properly configure MAC to PHY connection in mvneta devices, from Thomas Petazzoni. 4) sys_recv should use SYSCALL_DEFINE. From Jan Glauber. 5) Tunnel driver ioctls do not use the correct namespace, fix from Nicolas Dichtel. 6) Fix memory leak on seccomp filter attach, from Kees Cook. 7) Fix lockdep warning for nested vlans, from Ding Tianhong. 8) Crashes can happen in SCTP due to how the auth_enable value is managed, fix from Vlad Yasevich. 9) Wireless fixes from John W Linville and co. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (45 commits) net: sctp: cache auth_enable per endpoint tg3: update rx_jumbo_pending ring param only when jumbo frames are enabled vlan: Fix lockdep warning when vlan dev handle notification seccomp: fix memory leak on filter attach isdn: icn: buffer overflow in icn_command() ip6_tunnel: use the right netns in ioctl handler sit: use the right netns in ioctl handler ip_tunnel: use the right netns in ioctl handler net: use SYSCALL_DEFINEx for sys_recv net: mdio-gpio: Add support for separate MDI and MDO gpio pins net: mdio-gpio: Add support for active low gpio pins net: mdio-gpio: Use devm_ functions where possible ipv4, route: pass 0 instead of LOOPBACK_IFINDEX to fib_validate_source() ipv4, fib: pass LOOPBACK_IFINDEX instead of 0 to flowi4_iif mlx4_en: don't use napi_synchronize inside mlx4_en_netpoll net: mvneta: properly configure the MAC <-> PHY connection in all situations net: phy: add minimal support for QSGMII PHY sfc:On MCDI timeout, issue an FLR (and mark MCDI to fail-fast) mwifiex: fix hung task on command timeout mwifiex: process event before command response ...
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt2
-rw-r--r--drivers/isdn/icn/icn.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.c19
-rw-r--r--drivers/net/ethernet/sfc/enum.h23
-rw-r--r--drivers/net/ethernet/sfc/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c55
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/phy/mdio-gpio.c68
-rw-r--r--drivers/net/wireless/cw1200/debug.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c261
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c21
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h20
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c5
-rw-r--r--include/linux/mdio-gpio.h5
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/net/flow.h10
-rw-r--r--include/net/net_namespace.h9
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--kernel/seccomp.c2
-rw-r--r--net/8021q/vlan_dev.c46
-rw-r--r--net/core/dev.c1
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_semantics.c1
-rw-r--r--net/ipv4/ip_tunnel.c15
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c5
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/sit.c17
-rw-r--r--net/mac80211/chan.c11
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/offchannel.c1
-rw-r--r--net/mac80211/status.c1
-rw-r--r--net/sctp/auth.c17
-rw-r--r--net/sctp/endpointola.c3
-rw-r--r--net/sctp/sm_make_chunk.c32
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c54
-rw-r--r--net/sctp/sysctl.c36
-rw-r--r--net/socket.c4
60 files changed, 659 insertions, 339 deletions
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 9ecd43d8792c..3fc360523bc9 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -10,7 +10,7 @@ The following properties are common to the Ethernet controllers:
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
the maximum frame size (there's contradiction in ePAPR).
- phy-mode: string, operation mode of the PHY interface; supported values are
- "mii", "gmii", "sgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
+ "mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
"rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii"; this is now a de-facto
standard property;
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 53d487f0c79d..6a7447c304ac 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1155,7 +1155,7 @@ icn_command(isdn_ctrl *c, icn_card *card)
ulong a;
ulong flags;
int i;
- char cbuf[60];
+ char cbuf[80];
isdn_ctrl cmd;
icn_cdef cdef;
char __user *arg;
@@ -1309,7 +1309,6 @@ icn_command(isdn_ctrl *c, icn_card *card)
break;
if ((c->arg & 255) < ICN_BCH) {
char *p;
- char dial[50];
char dcode[4];
a = c->arg;
@@ -1321,10 +1320,10 @@ icn_command(isdn_ctrl *c, icn_card *card)
} else
/* Normal Dial */
strcpy(dcode, "CAL");
- strcpy(dial, p);
- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
- dcode, dial, c->parm.setup.si1,
- c->parm.setup.si2, c->parm.setup.eazmsn);
+ snprintf(cbuf, sizeof(cbuf),
+ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+ dcode, p, c->parm.setup.si1,
+ c->parm.setup.si2, c->parm.setup.eazmsn);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b9f7022f4e81..e5d95c5ce1ad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12286,7 +12286,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (tg3_flag(tp, MAX_RXPEND_64) &&
tp->rx_pending > 63)
tp->rx_pending = 63;
- tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
+ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].tx_pending = ering->tx_pending;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b248bcbdae63..14786c8bf99e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -89,8 +89,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
-#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+#define MVNETA_QSGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -711,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- if (enable)
- val |= MVNETA_GMAC2_PORT_RGMII;
- else
- val &= ~MVNETA_GMAC2_PORT_RGMII;
-
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_PCS_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
- mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -2749,26 +2721,44 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
}
/* Power up the port */
-static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{
- u32 val;
+ u32 ctrl;
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvneta_port_sgmii_config(pp);
+ ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- mvneta_gmac_rgmii_set(pp, 1);
+ /* Even though it might look weird, when we're configured in
+ * SGMII or QSGMII mode, the RGMII bit needs to be set.
+ */
+ switch(phy_mode) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
+ ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+ ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ ctrl |= MVNETA_GMAC2_PORT_RGMII;
+ break;
+ default:
+ return -EINVAL;
+ }
/* Cancel Port Reset */
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val &= ~MVNETA_GMAC2_PORT_RESET;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+ ctrl &= ~MVNETA_GMAC2_PORT_RESET;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
MVNETA_GMAC2_PORT_RESET) != 0)
continue;
+
+ return 0;
}
/* Device initialization routine */
@@ -2879,7 +2869,12 @@ static int mvneta_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "can't init eth hal\n");
goto err_free_stats;
}
- mvneta_port_power_up(pp, phy_mode);
+
+ err = mvneta_port_power_up(pp, phy_mode);
+ if (err < 0) {
+ dev_err(&pdev->dev, "can't power up port\n");
+ goto err_deinit;
+ }
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 70e95324a97d..c2cd8d31bcad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
- spin_lock_init(&cq->lock);
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f085c2df5e69..7e4b1720c3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_cq *cq;
- unsigned long flags;
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
- spin_lock_irqsave(&cq->lock, flags);
- napi_synchronize(&cq->napi);
- mlx4_en_process_rx_cq(dev, cq, 0);
- spin_unlock_irqrestore(&cq->lock, flags);
+ napi_schedule(&cq->napi);
}
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7a733c287744..04d9b6fe3e80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -319,7 +319,6 @@ struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
- spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
int size;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21c20ea0dad0..b5ed30a39144 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -738,8 +738,11 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
/* If it was a port reset, trigger reallocation of MC resources.
* Note that on an MC reset nothing needs to be done now because we'll
* detect the MC reset later and handle it then.
+ * For an FLR, we never get an MC reset event, but the MC has reset all
+ * resources assigned to us, so we have to trigger reallocation now.
*/
- if (reset_type == RESET_TYPE_ALL && !rc)
+ if ((reset_type == RESET_TYPE_ALL ||
+ reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
efx_ef10_reset_mc_allocations(efx);
return rc;
}
@@ -2141,6 +2144,11 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
return 0;
}
+static void efx_ef10_prepare_flr(struct efx_nic *efx)
+{
+ atomic_set(&efx->active_queues, 0);
+}
+
static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right)
{
@@ -3603,6 +3611,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 57b971e5e6b2..63d595fd3cc5 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -76,6 +76,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
@@ -83,7 +84,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
- [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -1739,7 +1740,8 @@ static void efx_start_all(struct efx_nic *efx)
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
- if (efx->port_enabled || !netif_running(efx->net_dev))
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
return;
efx_start_port(efx);
@@ -2334,6 +2336,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
{
EFX_ASSERT_RESET_SERIALISED(efx);
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
efx_stop_all(efx);
efx_disable_interrupts(efx);
@@ -2354,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
EFX_ASSERT_RESET_SERIALISED(efx);
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
@@ -2417,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
/* Clear flags for the scopes we covered. We assume the NIC and
* driver are now quiescent so that there is no race here.
*/
- efx->reset_pending &= -(1 << (method + 1));
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
/* Reinitialise bus-mastering, which may have been turned off before
* the reset was scheduled. This is still appropriate, even in the
@@ -2546,6 +2558,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 75ef7ef6450b..d1dbb5fb31bb 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
+ * @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,14 +151,16 @@ enum efx_loopback_mode {
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
+ * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
*/
enum reset_type {
- RESET_TYPE_INVISIBLE = 0,
- RESET_TYPE_RECOVER_OR_ALL = 1,
- RESET_TYPE_ALL = 2,
- RESET_TYPE_WORLD = 3,
- RESET_TYPE_RECOVER_OR_DISABLE = 4,
- RESET_TYPE_DISABLE = 5,
+ RESET_TYPE_INVISIBLE,
+ RESET_TYPE_RECOVER_OR_ALL,
+ RESET_TYPE_ALL,
+ RESET_TYPE_WORLD,
+ RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_MC_BIST,
+ RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
@@ -165,7 +168,13 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
- RESET_TYPE_MC_BIST,
+ /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
+ * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
+ * We encode this by having its enum value be greater than
+ * RESET_TYPE_MAX_METHOD. This also prevents issuing it with
+ * efx_ioctl_reset.
+ */
+ RESET_TYPE_MCDI_TIMEOUT,
RESET_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ec20b713cc6..fae25a418647 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2696,6 +2696,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
@@ -2790,6 +2792,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index a08761360cdf..0537381cd2f6 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -741,6 +741,28 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
return rc;
}
+/* Reset queue and flush accounting after FLR
+ *
+ * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
+ * mastering was disabled), in which case we don't receive (RXQ) flush
+ * completion events. This means that efx->rxq_flush_outstanding remained at 4
+ * after the FLR; also, efx->active_queues was non-zero (as no flush completion
+ * events were received, and we didn't go through efx_check_tx_flush_complete())
+ * If we don't fix this up, on the next call to efx_realloc_channels() we won't
+ * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
+ * for batched flush requests; and the efx->active_queues gets messed up because
+ * we keep incrementing for the newly initialised queues, but it never went to
+ * zero previously. Then we get a timeout every time we try to restart the
+ * queues, as it doesn't go back to zero when we should be flushing the queues.
+ */
+void efx_farch_finish_flr(struct efx_nic *efx)
+{
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ atomic_set(&efx->active_queues, 0);
+}
+
+
/**************************************************************************
*
* Event queue processing
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 7bd4b14bf3b3..5239cf9bdc56 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -52,12 +52,7 @@ static void efx_mcdi_timeout_async(unsigned long context);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
static bool efx_mcdi_poll_once(struct efx_nic *efx);
-
-static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
-{
- EFX_BUG_ON_PARANOID(!efx->mcdi);
- return &efx->mcdi->iface;
-}
+static void efx_mcdi_abandon(struct efx_nic *efx);
int efx_mcdi_init(struct efx_nic *efx)
{
@@ -558,6 +553,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
rc = 0;
}
+ efx_mcdi_abandon(efx);
+
/* Close the race with efx_mcdi_ev_cpl() executing just too late
* and completing a request we've just cancelled, by ensuring
* that the seqno check therein fails.
@@ -672,6 +669,9 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (efx->mc_bist_for_other_fn)
return -ENETDOWN;
+ if (mcdi->mode == MCDI_MODE_FAIL)
+ return -ENETDOWN;
+
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
@@ -812,7 +812,11 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
return;
mcdi = efx_mcdi(efx);
- if (mcdi->mode == MCDI_MODE_POLL)
+ /* If already in polling mode, nothing to do.
+ * If in fail-fast state, don't switch to polled completion.
+ * FLR recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
return;
/* We can switch from event completion to polled completion, because
@@ -841,8 +845,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
mcdi = efx_mcdi(efx);
- /* We must be in polling mode so no more requests can be queued */
- BUG_ON(mcdi->mode != MCDI_MODE_POLL);
+ /* We must be in poll or fail mode so no more requests can be queued */
+ BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
del_timer_sync(&mcdi->async_timer);
@@ -875,8 +879,11 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
return;
mcdi = efx_mcdi(efx);
-
- if (mcdi->mode == MCDI_MODE_EVENTS)
+ /* If already in event completion mode, nothing to do.
+ * If in fail-fast state, don't switch to event completion. FLR
+ * recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
return;
<