summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/brocade/bna/bna_enet.c
diff options
context:
space:
mode:
authorRasesh Mody <rmody@brocade.com>2011-08-08 16:21:36 +0000
committerDavid S. Miller <davem@davemloft.net>2011-08-11 07:30:12 -0700
commit45979c1e424f6a14495a4988343df176cb745f84 (patch)
tree6cca0a555549c5d98534af18ca3f9c71c41c8ea6 /drivers/net/ethernet/brocade/bna/bna_enet.c
parentaf027a34f34a8c0794a72dae8367e268eae89dbb (diff)
bna: Introduce ENET as New Driver and FW Interface
Change details: - This patch contains the messages, opcodes and structure format for the messages and responses exchanged between driver and the FW. In addition this patch contains the state machine implementation for Ethport, Enet, IOCEth. - Ethport object is responsible for receiving link state events, sending port enable/disable commands to FW. - Enet object is responsible for synchronizing initialization/teardown of tx & rx datapath configuration. - IOCEth object is responsible for init/un-init of IO Controller in the adapter which runs the FW. - This patch also contains code for initialization and resource assignment for Ethport, Enet, IOCEth, Tx, Rx objects. Signed-off-by: Rasesh Mody <rmody@brocade.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/brocade/bna/bna_enet.c')
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c2129
1 files changed, 2129 insertions, 0 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
new file mode 100644
index 000000000000..68a275d66fcf
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -0,0 +1,2129 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+
+static inline int
+ethport_can_be_up(struct bna_ethport *ethport)
+{
+ int ready = 0;
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
+ (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
+ (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
+ else
+ ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
+ (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
+ !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
+ return ready;
+}
+
+#define ethport_is_up ethport_can_be_up
+
+enum bna_ethport_event {
+ ETHPORT_E_START = 1,
+ ETHPORT_E_STOP = 2,
+ ETHPORT_E_FAIL = 3,
+ ETHPORT_E_UP = 4,
+ ETHPORT_E_DOWN = 5,
+ ETHPORT_E_FWRESP_UP_OK = 6,
+ ETHPORT_E_FWRESP_DOWN = 7,
+ ETHPORT_E_FWRESP_UP_FAIL = 8,
+};
+
+enum bna_enet_event {
+ ENET_E_START = 1,
+ ENET_E_STOP = 2,
+ ENET_E_FAIL = 3,
+ ENET_E_PAUSE_CFG = 4,
+ ENET_E_MTU_CFG = 5,
+ ENET_E_FWRESP_PAUSE = 6,
+ ENET_E_CHLD_STOPPED = 7,
+};
+
+enum bna_ioceth_event {
+ IOCETH_E_ENABLE = 1,
+ IOCETH_E_DISABLE = 2,
+ IOCETH_E_IOC_RESET = 3,
+ IOCETH_E_IOC_FAILED = 4,
+ IOCETH_E_IOC_READY = 5,
+ IOCETH_E_ENET_ATTR_RESP = 6,
+ IOCETH_E_ENET_STOPPED = 7,
+ IOCETH_E_IOC_DISABLED = 8,
+};
+
+#define bna_stats_copy(_name, _type) \
+do { \
+ count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
+ stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
+ stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
+ for (i = 0; i < count; i++) \
+ stats_dst[i] = be64_to_cpu(stats_src[i]); \
+} while (0) \
+
+/*
+ * FW response handlers
+ */
+
+static void
+bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport_can_be_up(ethport))
+ bfa_fsm_send_event(ethport, ETHPORT_E_UP);
+}
+
+static void
+bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ int ethport_up = ethport_is_up(ethport);
+
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport_up)
+ bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
+}
+
+static void
+bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_enable_req *admin_req =
+ &ethport->bfi_enet_cmd.admin_req;
+ struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+
+ switch (admin_req->enable) {
+ case BNA_STATUS_T_ENABLED:
+ if (rsp->error == BFI_ENET_CMD_OK)
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
+ else {
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
+ }
+ break;
+
+ case BNA_STATUS_T_DISABLED:
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+ break;
+ }
+}
+
+static void
+bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_diag_lb_req *diag_lb_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+ struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+
+ switch (diag_lb_req->enable) {
+ case BNA_STATUS_T_ENABLED:
+ if (rsp->error == BFI_ENET_CMD_OK)
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
+ else {
+ ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
+ }
+ break;
+
+ case BNA_STATUS_T_DISABLED:
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
+ break;
+ }
+}
+
+static void
+bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
+}
+
+static void
+bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
+
+ /**
+ * Store only if not set earlier, since BNAD can override the HW
+ * attributes
+ */
+ if (!ioceth->attr.num_txq)
+ ioceth->attr.num_txq = ntohl(rsp->max_cfg);
+ if (!ioceth->attr.num_rxp)
+ ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
+ ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
+ ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+ ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
+}
+
+static void
+bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
+ u64 *stats_src;
+ u64 *stats_dst;
+ u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
+ u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
+ int count;
+ int i;
+
+ bna_stats_copy(mac, mac);
+ bna_stats_copy(bpc, bpc);
+ bna_stats_copy(rad, rad);
+ bna_stats_copy(rlb, rad);
+ bna_stats_copy(fc_rx, fc_rx);
+ bna_stats_copy(fc_tx, fc_tx);
+
+ stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
+
+ /* Copy Rxf stats to SW area, scatter them while copying */
+ for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
+ stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
+ memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
+ if (rx_enet_mask & ((u32)(1 << i))) {
+ int k;
+ count = sizeof(struct bfi_enet_stats_rxf) /
+ sizeof(u64);
+ for (k = 0; k < count; k++) {
+ stats_dst[k] = be64_to_cpu(*stats_src);
+ stats_src++;
+ }
+ }
+ }
+
+ /* Copy Txf stats to SW area, scatter them while copying */
+ for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
+ stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
+ memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
+ if (tx_enet_mask & ((u32)(1 << i))) {
+ int k;
+ count = sizeof(struct bfi_enet_stats_txf) /
+ sizeof(u64);
+ for (k = 0; k < count; k++) {
+ stats_dst[k] = be64_to_cpu(*stats_src);
+ stats_src++;
+ }
+ }
+ }
+
+ bna->stats_mod.stats_get_busy = false;
+ bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
+}
+
+static void
+bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->link_status = BNA_LINK_UP;
+
+ /* Dispatch events */
+ ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
+}
+
+static void
+bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->link_status = BNA_LINK_DOWN;
+
+ /* Dispatch events */
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+}
+
+static void
+bna_err_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_HALT_INTR(bna, intr_status))
+ bna_halt_clear(bna);
+
+ bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
+}
+
+void
+bna_mbox_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_ERR_INTR(bna, intr_status)) {
+ bna_err_handler(bna, intr_status);
+ return;
+ }
+ if (BNA_IS_MBOX_INTR(bna, intr_status))
+ bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
+}
+
+static void
+bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bna *bna = (struct bna *)arg;
+ struct bna_tx *tx;
+ struct bna_rx *rx;
+
+ switch (msghdr->msg_id) {
+ case BFI_ENET_I2H_RX_CFG_SET_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rx_enet_start_rsp(rx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_RX_CFG_CLR_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rx_enet_stop_rsp(rx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_RIT_CFG_RSP:
+ case BFI_ENET_I2H_RSS_CFG_RSP:
+ case BFI_ENET_I2H_RSS_ENABLE_RSP:
+ case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
+ case BFI_ENET_I2H_RX_DEFAULT_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
+ case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
+ case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
+ case BFI_ENET_I2H_RX_VLAN_SET_RSP:
+ case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
+ break;
+
+ case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
+ break;
+
+ case BFI_ENET_I2H_TX_CFG_SET_RSP:
+ bna_tx_from_rid(bna, msghdr->enet_id, tx);
+ if (tx)
+ bna_bfi_tx_enet_start_rsp(tx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_TX_CFG_CLR_RSP:
+ bna_tx_from_rid(bna, msghdr->enet_id, tx);
+ if (tx)
+ bna_bfi_tx_enet_stop_rsp(tx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_ADMIN_RSP:
+ bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
+ bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_SET_PAUSE_RSP:
+ bna_bfi_pause_set_rsp(&bna->enet, msghdr);
+ break;
+
+ case BFI_ENET_I2H_GET_ATTR_RSP:
+ bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
+ break;
+
+ case BFI_ENET_I2H_STATS_GET_RSP:
+ bna_bfi_stats_get_rsp(bna, msghdr);
+ break;
+
+ case BFI_ENET_I2H_STATS_CLR_RSP:
+ /* No-op */
+ break;
+
+ case BFI_ENET_I2H_LINK_UP_AEN:
+ bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_LINK_DOWN_AEN:
+ bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_ENABLE_AEN:
+ bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_DISABLE_AEN:
+ bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_BW_UPDATE_AEN:
+ bna_bfi_bw_update_aen(&bna->tx_mod);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ETHPORT
+ */
+#define call_ethport_stop_cbfn(_ethport) \
+do { \
+ if ((_ethport)->stop_cbfn) { \
+ void (*cbfn)(struct bna_enet *); \
+ cbfn = (_ethport)->stop_cbfn; \
+ (_ethport)->stop_cbfn = NULL; \
+ cbfn(&(_ethport)->bna->enet); \
+ } \
+} while (0)
+
+#define call_ethport_adminup_cbfn(ethport, status) \
+do { \
+ if ((ethport)->adminup_cbfn) { \
+ void (*cbfn)(struct bnad *, enum bna_cb_status); \
+ cbfn = (ethport)->adminup_cbfn; \
+ (ethport)->adminup_cbfn = NULL; \
+ cbfn((ethport)->bna->bnad, status); \
+ } \
+} while (0)
+
+static void
+bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
+{
+ struct bfi_enet_enable_req *admin_up_req =
+ &ethport->bfi_enet_cmd.admin_req;
+
+ bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
+ admin_up_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ admin_up_req->enable = BNA_STATUS_T_ENABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
+{
+ struct bfi_enet_enable_req *admin_down_req =
+ &ethport->bfi_enet_cmd.admin_req;
+
+ bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
+ admin_down_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ admin_down_req->enable = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
+{
+ struct bfi_enet_diag_lb_req *lpbk_up_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+
+ bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
+ lpbk_up_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
+ lpbk_up_req->mode = (ethport->bna->enet.type ==
+ BNA_ENET_T_LOOPBACK_INTERNAL) ?
+ BFI_ENET_DIAG_LB_OPMODE_EXT :
+ BFI_ENET_DIAG_LB_OPMODE_CBL;
+ lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
+{
+ struct bfi_enet_diag_lb_req *lpbk_down_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+
+ bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
+ lpbk_down_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
+ lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_up(struct bna_ethport *ethport)
+{
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ bna_bfi_ethport_admin_up(ethport);
+ else
+ bna_bfi_ethport_lpbk_up(ethport);
+}
+
+static void
+bna_bfi_ethport_down(struct bna_ethport *ethport)
+{
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ bna_bfi_ethport_admin_down(ethport);
+ else
+ bna_bfi_ethport_lpbk_down(ethport);
+}
+
+bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+
+static void
+bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
+{
+ call_ethport_stop_cbfn(ethport);
+}
+
+static void
+bna_ethport_sm_stopped(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_START:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ case ETHPORT_E_STOP:
+ call_ethport_stop_cbfn(ethport);
+ break;
+
+ case ETHPORT_E_FAIL:
+ /* No-op */
+ break;
+
+ case ETHPORT_E_DOWN:
+ /* This event is received due to Rx objects failing */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_down_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_down(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_UP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
+ bna_bfi_ethport_up(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ break;
+
+ case ETHPORT_E_FAIL:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ case ETHPORT_E_FWRESP_DOWN:
+ /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
+ bna_bfi_ethport_up(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
+{
+ /**
+ * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
+ * mbox due to up_resp_wait -> down_resp_wait transition on event
+ * ETHPORT_E_DOWN
+ */
+}
+
+static void
+bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_UP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ case ETHPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_up_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_up(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ /**
+ * This event is received due to Rx objects stopping in
+ * parallel to ethport
+ */
+ /* No-op */
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ case ETHPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
+{
+ ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
+ ethport->bna = bna;
+
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn = bnad_cb_ethport_link_status;
+
+ ethport->rx_started_count = 0;
+
+ ethport->stop_cbfn = NULL;
+ ethport->adminup_cbfn = NULL;
+
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+}
+
+static void
+bna_ethport_uninit(struct bna_ethport *ethport)
+{
+ ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+
+ ethport->bna = NULL;
+}
+
+static void
+bna_ethport_start(struct bna_ethport *ethport)
+{
+ bfa_fsm_send_event(ethport, ETHPORT_E_START);
+}
+
+static void
+bna_enet_cb_ethport_stopped(struct bna_enet *enet)
+{
+ bfa_wc_down(&enet->chld_stop_wc);
+}
+
+static void
+bna_ethport_stop(struct bna_ethport *ethport)
+{
+ ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
+ bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
+}
+
+static void
+bna_ethport_fail(struct bna_ethport *ethport)
+{
+ /* Reset the physical port status to enabled */
+ ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport->link_status != BNA_LINK_DOWN) {
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+ }
+ bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
+}
+
+/* Should be called only when ethport is disabled */
+void
+bna_ethport_cb_rx_started(struct bna_ethport *ethport)
+{
+ ethport->rx_started_count++;
+
+ if (ethport->rx_started_count == 1) {
+ ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
+
+ if (ethport_can_be_up(ethport))
+ bfa_fsm_send_event(ethport, ETHPORT_E_UP);
+ }
+}
+
+void
+bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
+{
+ int ethport_up = ethport_is_up(ethport);
+
+ ethport->rx_started_count--;
+
+ if (ethport->rx_started_count == 0) {
+ ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
+
+ if (ethport_up)
+ bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
+ }
+}
+
+/**
+ * ENET
+ */
+#define bna_enet_chld_start(enet) \
+do { \
+ enum bna_tx_type tx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bna_ethport_start(&(enet)->bna->ethport); \
+ bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
+ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
+} while (0)
+
+#define bna_enet_chld_stop(enet) \
+do { \
+ enum bna_tx_type tx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_ethport_stop(&(enet)->bna->ethport); \
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
+ bfa_wc_wait(&(enet)->chld_stop_wc); \
+} while (0)
+
+#define bna_enet_chld_fail(enet) \
+do { \
+ bna_ethport_fail(&(enet)->bna->ethport); \
+ bna_tx_mod_fail(&(enet)->bna->tx_mod); \
+ bna_rx_mod_fail(&(enet)->bna->rx_mod); \
+} while (0)
+
+#define bna_enet_rx_start(enet) \
+do { \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
+} while (0)
+
+#define bna_enet_rx_stop(enet) \
+do { \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
+ bfa_wc_wait(&(enet)->chld_stop_wc); \
+} while (0)
+
+#define call_enet_stop_cbfn(enet) \
+do { \
+ if ((enet)->stop_cbfn) { \
+ void (*cbfn)(void *); \
+ void *cbarg; \
+ cbfn = (enet)->stop_cbfn; \
+ cbarg = (enet)->stop_cbarg; \
+ (enet)->stop_cbfn = NULL; \
+ (enet)->stop_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define call_enet_pause_cbfn(enet) \
+do { \
+ if ((enet)->pause_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ cbfn = (enet)->pause_cbfn; \
+ (enet)->pause_cbfn = NULL; \
+ cbfn((enet)->bna->bnad); \
+ } \
+} while (0)
+
+#define call_enet_mtu_cbfn(enet) \
+do { \
+ if ((enet)->mtu_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ cbfn = (enet)->mtu_cbfn; \
+ (enet)->mtu_cbfn = NULL; \
+ cbfn((enet)->bna->bnad); \
+ } \
+} while (0)
+
+static void bna_enet_cb_chld_stopped(void *arg);
+static void bna_bfi_pause_set(struct bna_enet *enet);
+
+bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
+ enum bna_enet_event);
+
+static void
+bna_enet_sm_stopped_entry(struct bna_enet *enet)
+{
+ call_enet_pause_cbfn(enet);
+ call_enet_mtu_cbfn(enet);
+ call_enet_stop_cbfn(enet);
+}
+
+static void
+bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_START:
+ bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
+ break;
+
+ case ENET_E_STOP:
+ call_enet_stop_cbfn(enet);
+ break;
+
+ case ENET_E_FAIL:
+ /* No-op */
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ call_enet_pause_cbfn(enet);
+ break;
+
+ case ENET_E_MTU_CFG:
+ call_enet_mtu_cbfn(enet);
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ /**
+ * This event is received due to Ethport, Tx and Rx objects
+ * failing
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
+{
+ bna_bfi_pause_set(enet);
+}
+
+static void
+bna_enet_sm_pause_init_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
+ break;
+
+ case ENET_E_FAIL:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
+ break;
+
+ case ENET_E_MTU_CFG:
+ /* No-op */
+ break;
+
+ case ENET_E_FWRESP_PAUSE:
+ if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bna_bfi_pause_set(enet);
+ } else {
+ bfa_fsm_set_state(enet, bna_enet_sm_started);
+ bna_enet_chld_start(enet);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+}
+
+static void
+bna_enet_sm_last_resp_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ case ENET_E_FWRESP_PAUSE:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_started_entry(struct bna_enet *enet)
+{
+ /**
+ * NOTE: Do not call bna_enet_chld_start() here, since it will be
+ * inadvertently called during cfg_wait->started transition as well
+ */
+ call_enet_pause_cbfn(enet);
+ call_enet_mtu_cbfn(enet);
+}
+
+static void
+bna_enet_sm_started(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
+ break;
+
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
+ bna_bfi_pause_set(enet);
+ break;
+
+ case ENET_E_MTU_CFG:
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
+ bna_enet_rx_stop(enet);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
+{
+}
+
+static void
+bna_enet_sm_cfg_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
+ break;
+
+ case ENET_E_FAIL:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
+ break;
+
+ case ENET_E_MTU_CFG:
+ enet->flags |= BNA_ENET_F_MTU_CHANGED;
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ bna_enet_rx_start(enet);
+ /* Fall through */
+ case ENET_E_FWRESP_PAUSE:
+ if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bna_bfi_pause_set(enet);
+ } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bna_enet_rx_stop(enet);
+ } else {
+ bfa_fsm_set_state(enet, bna_enet_sm_started);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+}
+
+static void
+bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_FWRESP_PAUSE:
+ case ENET_E_CHLD_STOPPED:
+ bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
+{
+ bna_enet_chld_stop(enet);
+}
+
+static void
+bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_pause_set(struct bna_enet *enet)
+{
+ struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
+
+ bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
+ pause_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
+ pause_req->tx_pause = enet->pause_config.tx_pause;
+ pause_req->rx_pause = enet->pau