summaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c3208
1 files changed, 2001 insertions, 1207 deletions
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 3696a4b6547b..119ca871f016 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -149,6 +149,242 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
* General service functions
****************************************************************************/
+static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+ u32 addr, dma_addr_t mapping)
+{
+ REG_WR(bp, addr, U64_LO(mapping));
+ REG_WR(bp, addr + 4, U64_HI(mapping));
+}
+
+static inline void __storm_memset_fill(struct bnx2x *bp,
+ u32 addr, size_t size, u32 val)
+{
+ int i;
+ for (i = 0; i < size/4; i++)
+ REG_WR(bp, addr + (i * 4), val);
+}
+
+static inline void storm_memset_ustats_zero(struct bnx2x *bp,
+ u8 port, u16 stat_id)
+{
+ size_t size = sizeof(struct ustorm_per_client_stats);
+
+ u32 addr = BAR_USTRORM_INTMEM +
+ USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+
+ __storm_memset_fill(bp, addr, size, 0);
+}
+
+static inline void storm_memset_tstats_zero(struct bnx2x *bp,
+ u8 port, u16 stat_id)
+{
+ size_t size = sizeof(struct tstorm_per_client_stats);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+
+ __storm_memset_fill(bp, addr, size, 0);
+}
+
+static inline void storm_memset_xstats_zero(struct bnx2x *bp,
+ u8 port, u16 stat_id)
+{
+ size_t size = sizeof(struct xstorm_per_client_stats);
+
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+
+ __storm_memset_fill(bp, addr, size, 0);
+}
+
+
+static inline void storm_memset_spq_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
+{
+ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
+}
+
+static inline void storm_memset_func_cfg(struct bnx2x *bp,
+ struct tstorm_eth_function_common_config *tcfg,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+}
+
+static inline void storm_memset_xstats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_tstats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_ustats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_cstats_flags(struct bnx2x *bp,
+ struct stats_indication_flags *flags,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct stats_indication_flags);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)flags);
+}
+
+static inline void storm_memset_xstats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_tstats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_ustats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_USTRORM_INTMEM +
+ USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_cstats_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+static inline void storm_memset_eq_data(struct bnx2x *bp,
+ struct event_ring_data *eq_data,
+ u16 pfid)
+{
+ size_t size = sizeof(struct event_ring_data);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+ u16 pfid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+ REG_WR16(bp, addr, eq_prod);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 ticks)
+{
+
+ int index_offset =
+ offsetof(struct hc_status_block_data_e1x, index_data);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ index_offset +
+ sizeof(struct hc_index_data)*sb_index +
+ offsetof(struct hc_index_data, timeout);
+ REG_WR8(bp, addr, ticks);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
+}
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 disable)
+{
+ u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+ int index_offset =
+ offsetof(struct hc_status_block_data_e1x, index_data);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ index_offset +
+ sizeof(struct hc_index_data)*sb_index +
+ offsetof(struct hc_index_data, flags);
+ u16 flags = REG_RD16(bp, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= enable_flag;
+ REG_WR16(bp, addr, flags);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
+}
+
/* used only at init
* locking is done by mcp
*/
@@ -538,7 +774,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
void bnx2x_panic_dump(struct bnx2x *bp)
{
int i;
- u16 j, start, end;
+ u16 j;
+ struct hc_sp_status_block_data sp_sb_data;
+ int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+ u16 start = 0, end = 0;
+#endif
bp->stats_state = STATS_STATE_DISABLED;
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
@@ -547,44 +788,124 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
- BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
- " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
" spq_prod_idx(0x%x)\n",
- bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
- bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
+ bp->def_idx, bp->def_att_idx,
+ bp->attn_state, bp->spq_prod_idx);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ bp->def_status_blk->atten_status_block.attn_bits,
+ bp->def_status_blk->atten_status_block.attn_bits_ack,
+ bp->def_status_blk->atten_status_block.status_block_id,
+ bp->def_status_blk->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ bp->def_status_blk->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
+ "pf_id(0x%x) vnic_id(0x%x) "
+ "vf_id(0x%x) vf_valid (0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid);
+
- /* Rx */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
-
+ int loop;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p =
+ sb_data_e1x.common.state_machine;
+ struct hc_index_data *hc_index_p =
+ sb_data_e1x.index_data;
+ int data_size;
+ u32 *sb_data_p;
+
+ /* Rx */
BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
- " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
+ " rx_comp_prod(0x%x)"
" rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
- le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
+ fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
- " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
+ " fp_hc_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
- le16_to_cpu(fp->fp_u_idx),
- fp->status_blk->u_status_block.status_block_index);
- }
-
- /* Tx */
- for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
+ le16_to_cpu(fp->fp_hc_idx));
+ /* Tx */
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
" tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
" *tx_cons_sb(0x%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
- " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
- fp->status_blk->c_status_block.status_block_index,
- fp->tx_db.data.prod);
+
+ loop = HC_SB_MAX_INDICES_E1X;
+
+ /* host sb data */
+
+ BNX2X_ERR(" run indexes (");
+ for (j = 0; j < HC_SB_MAX_SM; j++)
+ pr_cont("0x%x%s",
+ fp->sb_running_index[j],
+ (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+ BNX2X_ERR(" indexes (");
+ for (j = 0; j < loop; j++)
+ pr_cont("0x%x%s",
+ fp->sb_index_values[j],
+ (j == loop - 1) ? ")" : " ");
+ /* fw sb data */
+ data_size =
+ sizeof(struct hc_status_block_data_e1x);
+ data_size /= sizeof(u32);
+ sb_data_p = (u32 *)&sb_data_e1x;
+ /* copy sb data in here */
+ for (j = 0; j < data_size; j++)
+ *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+ j * sizeof(u32));
+
+ pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
+ sb_data_e1x.common.p_func.pf_id,
+ sb_data_e1x.common.p_func.vf_id,
+ sb_data_e1x.common.p_func.vf_valid,
+ sb_data_e1x.common.p_func.vnic_id,
+ sb_data_e1x.common.same_igu_sb_1b);
+
+ /* SB_SMs data */
+ for (j = 0; j < HC_SB_MAX_SM; j++) {
+ pr_cont("SM[%d] __flags (0x%x) "
+ "igu_sb_id (0x%x) igu_seg_id(0x%x) "
+ "time_to_expire (0x%x) "
+ "timer_value(0x%x)\n", j,
+ hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
+ }
+
+ /* Indecies data */
+ for (j = 0; j < loop; j++) {
+ pr_cont("INDEX[%d] flags (0x%x) "
+ "timeout (0x%x)\n", j,
+ hc_index_p[j].flags,
+ hc_index_p[j].timeout);
+ }
}
+#ifdef BNX2X_STOP_ON_ERROR
/* Rings */
/* Rx */
for_each_queue(bp, i) {
@@ -642,7 +963,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
}
}
-
+#endif
bnx2x_fw_dump(bp);
bnx2x_mc_assert(bp);
BNX2X_ERR("end crash dump -----------------\n");
@@ -708,7 +1029,7 @@ void bnx2x_int_enable(struct bnx2x *bp)
mmiowb();
}
-static void bnx2x_int_disable(struct bnx2x *bp)
+void bnx2x_int_disable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -817,76 +1138,35 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
- bp->spq_left++;
-
- if (fp->index) {
- switch (command | fp->state) {
- case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
- BNX2X_FP_STATE_OPENING):
- DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
- cid);
- fp->state = BNX2X_FP_STATE_OPEN;
- break;
-
- case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
- DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
- cid);
- fp->state = BNX2X_FP_STATE_HALTED;
- break;
-
- default:
- BNX2X_ERR("unexpected MC reply (%d) "
- "fp[%d] state is %x\n",
- command, fp->index, fp->state);
- break;
- }
- mb(); /* force bnx2x_wait_ramrod() to see the change */
- return;
- }
-
- switch (command | bp->state) {
- case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
- DP(NETIF_MSG_IFUP, "got setup ramrod\n");
- bp->state = BNX2X_STATE_OPEN;
+ switch (command | fp->state) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
+ DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
+ fp->state = BNX2X_FP_STATE_OPEN;
break;
- case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
- bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
+ case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
+ DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
fp->state = BNX2X_FP_STATE_HALTED;
break;
- case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
- bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
+ case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
+ DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
+ fp->state = BNX2X_FP_STATE_TERMINATED;
break;
-#ifdef BCM_CNIC
- case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
- DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
- bnx2x_cnic_cfc_comp(bp, cid);
+ default:
+ BNX2X_ERR("unexpected MC reply (%d) "
+ "fp[%d] state is %x\n",
+ command, fp->index, fp->state);
break;
-#endif
+ }
- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
- DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
- bp->set_mac_pending--;
- smp_wmb();
- break;
+ bp->spq_left++;
- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
- bp->set_mac_pending--;
- smp_wmb();
- break;
+ /* push the change in fp->state and towards the memory */
+ smp_wmb();
- default:
- BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
- command, bp->state);
- break;
- }
- mb(); /* force bnx2x_wait_ramrod() to see the change */
+ return;
}
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
@@ -917,22 +1197,19 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- mask = 0x2 << fp->sb_id;
+ mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
if (status & mask) {
/* Handle Rx and Tx according to SB id */
prefetch(fp->rx_cons_sb);
- prefetch(&fp->status_blk->u_status_block.
- status_block_index);
prefetch(fp->tx_cons_sb);
- prefetch(&fp->status_blk->c_status_block.
- status_block_index);
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
}
}
#ifdef BCM_CNIC
- mask = 0x2 << CNIC_SB_ID(bp);
+ mask = 0x2;
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
@@ -1422,7 +1699,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
bp->vn_weight_sum = 0;
for (vn = VN_0; vn < E1HVN_MAX; vn++) {
int func = 2*vn + port;
- u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+ u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -1454,7 +1731,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
{
struct rate_shaping_vars_per_vn m_rs_vn;
struct fairness_vars_per_vn m_fair_vn;
- u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+ u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
u16 vn_min_rate, vn_max_rate;
int i;
@@ -1511,7 +1788,83 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
((u32 *)(&m_fair_vn))[i]);
}
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+ if (CHIP_REV_IS_SLOW(bp))
+ return CMNG_FNS_NONE;
+ if (IS_E1HMF(bp))
+ return CMNG_FNS_MINMAX;
+
+ return CMNG_FNS_NONE;
+}
+
+static void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+ int vn;
+
+ if (BP_NOMCP(bp))
+ return; /* what should be the default bvalue in this case */
+
+ for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ int /*abs*/func = 2*vn + BP_PORT(bp);
+ bp->mf_config =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ }
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+
+ if (cmng_type == CMNG_FNS_MINMAX) {
+ int vn;
+
+ /* clear cmng_enables */
+ bp->cmng.flags.cmng_enables = 0;
+
+ /* read mf conf from shmem */
+ if (read_cfg)
+ bnx2x_read_mf_cfg(bp);
+
+ /* Init rate shaping and fairness contexts */
+ bnx2x_init_port_minmax(bp);
+
+ /* vn_weight_sum and enable fairness if not 0 */
+ bnx2x_calc_vn_weight_sum(bp);
+
+ /* calculate and set min-max rate for each vn */
+ for (vn = VN_0; vn < E1HVN_MAX; vn++)
+ bnx2x_init_vn_minmax(bp, vn);
+
+ /* always enable rate shaping and fairness */
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+ if (!bp->vn_weight_sum)
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ return;
+ }
+
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "rate shaping and fairness are disabled\n");
+}
+
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func;
+ int vn;
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ if (vn == BP_E1HVN(bp))
+ continue;
+
+ func = ((vn << 1) | port);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
@@ -1669,6 +2022,308 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
+/* must be called under rtnl_lock */
+void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+{
+ u32 mask = (1 << cl_id);
+
+ /* initial seeting is BNX2X_ACCEPT_NONE */
+ u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
+ u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+ u8 unmatched_unicast = 0;
+
+ if (filters & BNX2X_PROMISCUOUS_MODE) {
+ /* promiscious - accept all, drop none */
+ drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
+ accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
+ }
+ if (filters & BNX2X_ACCEPT_UNICAST) {
+ /* accept matched ucast */
+ drop_all_ucast = 0;
+ }
+ if (filters & BNX2X_ACCEPT_MULTICAST) {
+ /* accept matched mcast */
+ drop_all_mcast = 0;
+ }
+ if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
+ /* accept all mcast */
+ drop_all_ucast = 0;
+ accp_all_ucast = 1;
+ }
+ if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
+ /* accept all mcast */
+ drop_all_mcast = 0;
+ accp_all_mcast = 1;
+ }
+ if (filters & BNX2X_ACCEPT_BROADCAST) {
+ /* accept (all) bcast */
+ drop_all_bcast = 0;
+ accp_all_bcast = 1;
+ }
+
+ bp->mac_filters.ucast_drop_all = drop_all_ucast ?
+ bp->mac_filters.ucast_drop_all | mask :
+ bp->mac_filters.ucast_drop_all & ~mask;
+
+ bp->mac_filters.mcast_drop_all = drop_all_mcast ?
+ bp->mac_filters.mcast_drop_all | mask :
+ bp->mac_filters.mcast_drop_all & ~mask;
+
+ bp->mac_filters.bcast_drop_all = drop_all_bcast ?
+ bp->mac_filters.bcast_drop_all | mask :
+ bp->mac_filters.bcast_drop_all & ~mask;
+
+ bp->mac_filters.ucast_accept_all = accp_all_ucast ?
+ bp->mac_filters.ucast_accept_all | mask :
+ bp->mac_filters.ucast_accept_all & ~mask;
+
+ bp->mac_filters.mcast_accept_all = accp_all_mcast ?
+ bp->mac_filters.mcast_accept_all | mask :
+ bp->mac_filters.mcast_accept_all & ~mask;
+
+ bp->mac_filters.bcast_accept_all = accp_all_bcast ?
+ bp->mac_filters.bcast_accept_all | mask :
+ bp->mac_filters.bcast_accept_all & ~mask;
+
+ bp->mac_filters.unmatched_unicast = unmatched_unicast ?
+ bp->mac_filters.unmatched_unicast | mask :
+ bp->mac_filters.unmatched_unicast & ~mask;
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+ if (FUNC_CONFIG(p->func_flgs)) {
+ struct tstorm_eth_function_common_config tcfg = {0};
+
+ /* tpa */
+ if (p->func_flgs & FUNC_FLG_TPA)
+ tcfg.config_flags |=
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
+
+ /* set rss flags */
+ if (p->func_flgs & FUNC_FLG_RSS) {
+ u16 rss_flgs = (p->rss->mode <<
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
+
+ if (p->rss->cap & RSS_IPV4_CAP)
+ rss_flgs |= RSS_IPV4_CAP_MASK;
+ if (p->rss->cap & RSS_IPV4_TCP_CAP)
+ rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
+ if (p->rss->cap & RSS_IPV6_CAP)
+ rss_flgs |= RSS_IPV6_CAP_MASK;
+ if (p->rss->cap & RSS_IPV6_TCP_CAP)
+ rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
+
+ tcfg.config_flags |= rss_flgs;
+ tcfg.rss_result_mask = p->rss->result_mask;
+
+ }
+
+ storm_memset_func_cfg(bp, &tcfg, p->func_id);
+ }
+
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+ storm_memset_func_en(bp, p->func_id, 1);
+
+ /* statistics */
+ if (p->func_flgs & FUNC_FLG_STATS) {
+ struct stats_indication_flags stats_flags = {0};
+ stats_flags.collect_eth = 1;
+
+ storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
+
+ storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
+
+ storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
+
+ storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
+ storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
+ }
+
+ /* spq */
+ if (p->func_flgs & FUNC_FLG_SPQ) {
+ storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ }
+}
+
+static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ u16 flags = 0;
+
+ /* calculate queue flags */
+ flags |= QUEUE_FLG_CACHE_ALIGN;
+ flags |= QUEUE_FLG_HC;
+ flags |= IS_E1HMF(bp) ? QUEUE_FLG_OV : 0;
+
+#ifdef BCM_VLAN
+ flags |= QUEUE_FLG_VLAN;
+ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+#endif
+
+ if (!fp->disable_tpa)
+ flags |= QUEUE_FLG_TPA;
+
+ flags |= QUEUE_FLG_STATS;
+
+ return flags;
+}
+
+static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+ struct bnx2x_rxq_init_params *rxq_init)
+{
+ u16 max_sge = 0;
+ u16 sge_sz = 0;
+ u16 tpa_agg_size = 0;
+
+ /* calculate queue flags */
+ u16 flags = bnx2x_get_cl_flags(bp, fp);
+
+ if (!fp->disable_tpa) {
+ pause->sge_th_hi = 250;
+ pause->sge_th_lo = 150;
+ tpa_agg_size = min_t(u32,
+ (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+ SGE_PAGE_SHIFT;
+ max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
+ 0xffff);
+ }
+
+ /* pause - not for e1 */
+ if (!CHIP_IS_E1(bp)) {
+ pause->bd_th_hi = 350;
+ pause->bd_th_lo = 250;
+ pause->rcq_th_hi = 350;
+ pause->rcq_th_lo = 250;
+ pause->sge_th_hi = 0;
+ pause->sge_th_lo = 0;
+ pause->pri_map = 1;
+ }
+
+ /* rxq setup */
+ rxq_init->flags = flags;
+ rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
+ rxq_init->dscr_map = fp->rx_desc_mapping;
+ rxq_init->sge_map = fp->rx_sge_mapping;
+ rxq_init->rcq_map = fp->rx_comp_mapping;
+ rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ rxq_init->mtu = bp->dev->mtu;
+ rxq_init->buf_sz = bp->rx_buf_size;
+ rxq_init->cl_qzone_id = fp->cl_qzone_id;
+ rxq_init->cl_id = fp->cl_id;
+ rxq_init->spcl_id = fp->cl_id;
+ rxq_init->stat_id = fp->cl_id;
+ rxq_init->tpa_agg_sz = tpa_agg_size;
+ rxq_init->sge_buf_sz = sge_sz;
+ rxq_init->max_sges_pkt = max_sge;
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ rxq_init->fw_sb_id = fp->fw_sb_id;
+
+ rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
+
+ rxq_init->cid = HW_CID(bp, fp->cid);
+
+ rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
+}
+
+static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
+{
+ u16 flags = bnx2x_get_cl_flags(bp, fp);
+
+ txq_init->flags = flags;
+ txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
+ txq_init->dscr_map = fp->tx_desc_mapping;
+ txq_init->stat_id = fp->cl_id;
+ txq_init->cid = HW_CID(bp, fp->cid);
+ txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->fw_sb_id = fp->fw_sb_id;
+ txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
+}
+
+void bnx2x_pf_init(struct bnx2x *bp)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ struct bnx2x_rss_params rss = {0};
+ struct event_ring_data eq_data = { {0} };
+ u16 flags;
+
+ /* pf specific setups */
+ if (!CHIP_IS_E1(bp))
+ storm_memset_ov(bp, bp->e1hov, BP_FUNC(bp));
+
+ /* function setup flags */
+ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
+
+ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+
+ /**
+ * Although RSS is meaningless when there is a single HW queue we
+ * still need it enabled in order to have HW Rx hash generated.
+ *
+ * if (is_eth_multi(bp))
+ * flags |= FUNC_FLG_RSS;
+ */
+
+ /* function setup */
+ if (flags & FUNC_FLG_RSS) {
+ rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
+ RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
+ rss.mode = bp->multi_mode;
+ rss.result_mask = MULTI_MASK;
+ func_init.rss = &rss;
+ }
+
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = BP_FUNC(bp);
+ func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
+ func_init.spq_map = bp->spq_mapping;
+ func_init.spq_prod = bp->spq_prod_idx;
+
+ bnx2x_func_init(bp, &func_init);
+
+ memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+ /*
+ Congestion management values depend on the link rate
+ There is no active link so initial link rate is set to 10 Gbps.
+ When the link comes up The congestion management values are
+ re-calculated according to the actual link rate.
+ */
+ bp->link_vars.line_speed = SPEED_10000;
+ bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+ /* Only the PMF sets the HW */
+ if (bp->port.pmf)
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+ /* no rx until link is up */
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* init Event Queue */
+ eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+ eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+ eq_data.producer = bp->eq_prod;
+ eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+ eq_data.sb_id = DEF_SB_ID;
+ storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+
static void bnx2x_e1h_disable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -1695,40 +2350,6 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
-static void bnx2x_update_min_max(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
- int vn, i;
-
- /* Init rate shaping and fairness contexts */
- bnx2x_init_port_minmax(bp);
-
- bnx2x_calc_vn_weight_sum(bp);
-
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
- bnx2x_init_vn_minmax(bp, 2*vn + port);
-
- if (bp->port.pmf) {
- int func;
-
- /* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- if (vn == BP_E1HVN(bp))
- continue;
-
- func = ((vn << 1) | port);
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
- }
-
- /* Store it to internal memory */
- for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
- ((u32 *)(&bp->cmng))[i]);
- }
-}
-
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -1755,7 +2376,9 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
}
if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
- bnx2x_update_min_max(bp);
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
}
@@ -1790,7 +2413,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
/* Make sure that BD data is updated before writing the producer */
wmb();
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
bp->spq_prod_idx);
mmiowb();
}
@@ -1800,6 +2423,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common)
{
struct eth_spe *spe;
+ u16 type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -1821,22 +2445,42 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spe->hdr.conn_and_cmd_data =
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
+
if (common)
- spe->hdr.type |=
- cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
+ /* Common ramrods:
+ * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
+ * TRAFFIC_STOP, TRAFFIC_START
+ */
+ type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
+ & SPE_HDR_CONN_TYPE;
+ else
+ /* ETH ramrods: SETUP, HALT */
+ type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
+ & SPE_HDR_CONN_TYPE;
+
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
- spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
- spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
+ spe->hdr.type = cpu_to_le16(type);
- bp->spq_left--;
+ spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+ spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+ /* stats ramrod has it's own slot on the spq */
+ if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+ /* It's ok if the actual decrement is issued towards the memory
+ * somewhere between the spin_lock and spin_unlock. Thus no
+ * more explict memory barrier is needed.
+ */
+ bp->spq_left--;
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
+ "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
+ "type(0x%x) left %x\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+ HW_CID(bp, cid), data_hi, data_lo, type, bp->spq_left);
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
@@ -1873,32 +2517,27 @@ static void bnx2x_release_alr(struct bnx2x *bp)
REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
}
+#define BNX2X_DEF_SB_ATT_IDX 0x0001
+#define BNX2X_DEF_SB_IDX 0x0002
+
static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
{
- struct host_def_status_block *def_sb = bp->def_status_blk;
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
u16 rc = 0;
barrier(); /* status block is written to by the chip */
if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
- rc |= 1;
- }
- if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
- bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
- rc |= 2;
+ rc |= BNX2X_DEF_SB_ATT_IDX;
}
- if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
- bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
- rc |= 4;
- }
- if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
- bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
- rc |= 8;
- }
- if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
- bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
- rc |= 16;
+
+ if (bp->def_idx != def_sb->sp_sb.running_index) {
+ bp->def_idx = d