summaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c2637
1 files changed, 38 insertions, 2599 deletions
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 0beaefb7a160..0c00e50787f9 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -56,6 +56,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
+#include "bnx2x_cmn.h"
#define DRV_MODULE_VERSION "1.52.53-1"
#define DRV_MODULE_RELDATE "2010/18/04"
@@ -652,7 +653,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
BNX2X_ERR("end crash dump -----------------\n");
}
-static void bnx2x_int_enable(struct bnx2x *bp)
+void bnx2x_int_enable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -734,7 +735,7 @@ static void bnx2x_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
int i, offset;
@@ -804,235 +805,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
return false;
}
-static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
- u8 storm, u16 index, u8 op, u8 update)
-{
- u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
- COMMAND_REG_INT_ACK);
- struct igu_ack_register igu_ack;
-
- igu_ack.status_block_index = index;
- igu_ack.sb_id_and_flags =
- ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
- (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
- (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
- (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
-
- DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
- (*(u32 *)&igu_ack), hc_addr);
- REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
-
- /* Make sure that ACK is written */
- mmiowb();
- barrier();
-}
-
-static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
-{
- struct host_status_block *fpsb = fp->status_blk;
-
- barrier(); /* status block is written to by the chip */
- fp->fp_c_idx = fpsb->c_status_block.status_block_index;
- fp->fp_u_idx = fpsb->u_status_block.status_block_index;
-}
-
-static u16 bnx2x_ack_int(struct bnx2x *bp)
-{
- u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
- COMMAND_REG_SIMD_MASK);
- u32 result = REG_RD(bp, hc_addr);
-
- DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
- result, hc_addr);
-
- return result;
-}
-
-
-/*
- * fast path service functions
- */
-
-static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
-{
- /* Tell compiler that consumer and producer can change */
- barrier();
- return (fp->tx_pkt_prod != fp->tx_pkt_cons);
-}
-
-/* free skb in the packet ring at pos idx
- * return idx of last bd freed
- */
-static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 idx)
-{
- struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
- struct eth_tx_start_bd *tx_start_bd;
- struct eth_tx_bd *tx_data_bd;
- struct sk_buff *skb = tx_buf->skb;
- u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
- int nbd;
-
- /* prefetch skb end pointer to speedup dev_kfree_skb() */
- prefetch(&skb->end);
-
- DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
- idx, tx_buf, skb);
-
- /* unmap first bd */
- DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
- tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
- BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
-
- nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
-#ifdef BNX2X_STOP_ON_ERROR
- if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
- BNX2X_ERR("BAD nbd!\n");
- bnx2x_panic();
- }
-#endif
- new_cons = nbd + tx_buf->first_bd;
-
- /* Get the next bd */
- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-
- /* Skip a parse bd... */
- --nbd;
- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-
- /* ...and the TSO split header bd since they have no mapping */
- if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
- --nbd;
- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
- }
-
- /* now free frags */
- while (nbd > 0) {
-
- DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
- tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
- dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
- BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
- if (--nbd)
- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
- }
-
- /* release skb */
- WARN_ON(!skb);
- dev_kfree_skb(skb);
- tx_buf->first_bd = 0;
- tx_buf->skb = NULL;
-
- return new_cons;
-}
-
-static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
-{
- s16 used;
- u16 prod;
- u16 cons;
-
- prod = fp->tx_bd_prod;
- cons = fp->tx_bd_cons;
-
- /* NUM_TX_RINGS = number of "next-page" entries
- It will be used as a threshold */
- used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
-
-#ifdef BNX2X_STOP_ON_ERROR
- WARN_ON(used < 0);
- WARN_ON(used > fp->bp->tx_ring_size);
- WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
-#endif
-
- return (s16)(fp->bp->tx_ring_size) - used;
-}
-
-static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
-{
- u16 hw_cons;
-
- /* Tell compiler that status block fields can change */
- barrier();
- hw_cons = le16_to_cpu(*fp->tx_cons_sb);
- return hw_cons != fp->tx_pkt_cons;
-}
-
-static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
- struct netdev_queue *txq;
- u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
-
-#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
- return -1;
-#endif
-
- txq = netdev_get_tx_queue(bp->dev, fp->index);
- hw_cons = le16_to_cpu(*fp->tx_cons_sb);
- sw_cons = fp->tx_pkt_cons;
-
- while (sw_cons != hw_cons) {
- u16 pkt_cons;
-
- pkt_cons = TX_BD(sw_cons);
-
- /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
-
- DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
- hw_cons, sw_cons, pkt_cons);
-
-/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
- rmb();
- prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
- }
-*/
- bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
- sw_cons++;
- }
-
- fp->tx_pkt_cons = sw_cons;
- fp->tx_bd_cons = bd_cons;
-
- /* Need to make the tx_bd_cons update visible to start_xmit()
- * before checking for netif_tx_queue_stopped(). Without the
- * memory barrier, there is a small possibility that
- * start_xmit() will miss it and cause the queue to be stopped
- * forever.
- */
- smp_mb();
-
- /* TBD need a thresh? */
- if (unlikely(netif_tx_queue_stopped(txq))) {
- /* Taking tx_lock() is needed to prevent reenabling the queue
- * while it's empty. This could have happen if rx_action() gets
- * suspended in bnx2x_tx_int() after the condition before
- * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
- *
- * stops the queue->sees fresh tx_bd_cons->releases the queue->
- * sends some packets consuming the whole queue again->
- * stops the queue
- */
-
- __netif_tx_lock(txq, smp_processor_id());
-
- if ((netif_tx_queue_stopped(txq)) &&
- (bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
- netif_tx_wake_queue(txq);
-
- __netif_tx_unlock(txq);
- }
- return 0;
-}
#ifdef BCM_CNIC
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
#endif
-static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
+void bnx2x_sp_event(struct bnx2x_fastpath *fp,
union eth_rx_cqe *rr_cqe)
{
struct bnx2x *bp = fp->bp;
@@ -1116,717 +894,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
mb(); /* force bnx2x_wait_ramrod() to see the change */
}
-static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
-{
- struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
- struct page *page = sw_buf->page;
- struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
-
- /* Skip "next page" elements */
- if (!page)
- return;
-
- dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
- __free_pages(page, PAGES_PER_SGE_SHIFT);
-
- sw_buf->page = NULL;
- sge->addr_hi = 0;
- sge->addr_lo = 0;
-}
-
-static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, int last)
-{
- int i;
-
- for (i = 0; i < last; i++)
- bnx2x_free_rx_sge(bp, fp, i);
-}
-
-static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
-{
- struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
- struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
- struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
- dma_addr_t mapping;
-
- if (unlikely(page == NULL))
- return -ENOMEM;
-
- mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- __free_pages(page, PAGES_PER_SGE_SHIFT);
- return -ENOMEM;
- }
-
- sw_buf->page = page;
- dma_unmap_addr_set(sw_buf, mapping, mapping);
-
- sge->addr_hi = cpu_to_le32(U64_HI(mapping));
- sge->addr_lo = cpu_to_le32(U64_LO(mapping));
-
- return 0;
-}
-
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
-{
- struct sk_buff *skb;
- struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
- struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
- dma_addr_t mapping;
-
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
- if (unlikely(skb == NULL))
- return -ENOMEM;
-
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- rx_buf->skb = skb;
- dma_unmap_addr_set(rx_buf, mapping, mapping);
-
- rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
- rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-
- return 0;
-}
-
-/* note that we are not allocating a new skb,
- * we are just moving one from cons to prod
- * we are not creating a new mapping,
- * so there is no need to check for dma_mapping_error().
- */
-static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
- struct sk_buff *skb, u16 cons, u16 prod)
-{
- struct bnx2x *bp = fp->bp;
- struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
- struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
- struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
- struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
-
- dma_sync_single_for_device(&bp->pdev->dev,
- dma_unmap_addr(cons_rx_buf, mapping),
- RX_COPY_THRESH, DMA_FROM_DEVICE);
-
- prod_rx_buf->skb = cons_rx_buf->skb;
- dma_unmap_addr_set(prod_rx_buf, mapping,
- dma_unmap_addr(cons_rx_buf, mapping));
- *prod_bd = *cons_bd;
-}
-
-static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
- u16 idx)
-{
- u16 last_max = fp->last_max_sge;
-
- if (SUB_S16(idx, last_max) > 0)
- fp->last_max_sge = idx;
-}
-
-static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
-{
- int i, j;
-
- for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
- int idx = RX_SGE_CNT * i - 1;
-
- for (j = 0; j < 2; j++) {
- SGE_MASK_CLEAR_BIT(fp, idx);
- idx--;
- }
- }
-}
-
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
- struct eth_fast_path_rx_cqe *fp_cqe)
-{
- struct bnx2x *bp = fp->bp;
- u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
- le16_to_cpu(fp_cqe->len_on_bd)) >>
- SGE_PAGE_SHIFT;
- u16 last_max, last_elem, first_elem;
- u16 delta = 0;
- u16 i;
-
- if (!sge_len)
- return;
-
- /* First mark all used pages */
- for (i = 0; i < sge_len; i++)
- SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
-
- DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
- sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
-
- /* Here we assume that the last SGE index is the biggest */
- prefetch((void *)(fp->sge_mask));
- bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
-
- last_max = RX_SGE(fp->last_max_sge);
- last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
- first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
-
- /* If ring is not full */
- if (last_elem + 1 != first_elem)
- last_elem++;
-
- /* Now update the prod */
- for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
- if (likely(fp->sge_mask[i]))
- break;
-
- fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
- delta += RX_SGE_MASK_ELEM_SZ;
- }
-
- if (delta > 0) {
- fp->rx_sge_prod += delta;
- /* clear page-end entries */
- bnx2x_clear_sge_mask_next_elems(fp);
- }
-
- DP(NETIF_MSG_RX_STATUS,
- "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
- fp->last_max_sge, fp->rx_sge_prod);
-}
-
-static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
-{
- /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
- memset(fp->sge_mask, 0xff,
- (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
-
- /* Clear the two last indices in the page to 1:
- these are the indices that correspond to the "next" element,
- hence will never be indicated and should be removed from
- the calculations. */
- bnx2x_clear_sge_mask_next_elems(fp);
-}
-
-static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
- struct sk_buff *skb, u16 cons, u16 prod)
-{
- struct bnx2x *bp = fp->bp;
- struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
- struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
- struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
- dma_addr_t mapping;
-
- /* move empty skb from pool to prod and map it */
- prod_rx_buf->skb = fp->tpa_pool[queue].skb;
- mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, DMA_FROM_DEVICE);
- dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-
- /* move partial skb from cons to pool (don't unmap yet) */
- fp->tpa_pool[queue] = *cons_rx_buf;
-
- /* mark bin state as start - print error if current state != stop */
- if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
- BNX2X_ERR("start of bin not in stop [%d]\n", queue);
-
- fp->tpa_state[queue] = BNX2X_TPA_START;
-
- /* point prod_bd to new skb */
- prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
- prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-
-#ifdef BNX2X_STOP_ON_ERROR
- fp->tpa_queue_used |= (1 << queue);
-#ifdef _ASM_GENERIC_INT_L64_H
- DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
- DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
-#endif
- fp->tpa_queue_used);
-#endif
-}
-
-static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- struct sk_buff *skb,
- struct eth_fast_path_rx_cqe *fp_cqe,
- u16 cqe_idx)
-{
- struct sw_rx_page *rx_pg, old_rx_pg;
- u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
- u32 i, frag_len, frag_size, pages;
- int err;
- int j;
-
- frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
- pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
-
- /* This is needed in order to enable forwarding support */
- if (frag_size)
- skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
- max(frag_size, (u32)len_on_bd));
-
-#ifdef BNX2X_STOP_ON_ERROR
- if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
- BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
- pages, cqe_idx);
- BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
- fp_cqe->pkt_len, len_on_bd);
- bnx2x_panic();
- return -EINVAL;
- }
-#endif
-
- /* Run through the SGL and compose the fragmented skb */
- for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
- u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
-
- /* FW gives the indices of the SGE as if the ring is an array
- (meaning that "next" element will consume 2 indices) */
- frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
- rx_pg = &fp->rx_page_ring[sge_idx];
- old_rx_pg = *rx_pg;
-
- /* If we fail to allocate a substitute page, we simply stop
- where we are and drop the whole packet */
- err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
- if (unlikely(err)) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
- return err;
- }
-
- /* Unmap the page as we r going to pass it to the stack */
- dma_unmap_page(&bp->pdev->dev,
- dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-
- /* Add one frag and update the appropriate fields in the skb */
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
-
- skb->data_len += frag_len;
- skb->truesize += frag_len;
- skb->len += frag_len;
-
- frag_size -= frag_len;
- }
-
- return 0;
-}
-
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, int pad, int len, union eth_rx_cqe *cqe,
- u16 cqe_idx)
-{
- struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
- struct sk_buff *skb = rx_buf->skb;
- /* alloc new skb */
- struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-
- /* Unmap skb in the pool anyway, as we are going to change
- pool entry status to BNX2X_TPA_STOP even if new skb allocation
- fails. */
- dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
-
- if (likely(new_skb)) {
- /* fix ip xsum and give it to the stack */
- /* (no need to map the new skb) */
-#ifdef BCM_VLAN
- int is_vlan_cqe =
- (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
- PARSING_FLAGS_VLAN);
- int is_not_hwaccel_vlan_cqe =
- (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
-#endif
-
- prefetch(skb);
- prefetch(((char *)(skb)) + 128);
-
-#ifdef BNX2X_STOP_ON_ERROR
- if (pad + len > bp->rx_buf_size) {
- BNX2X_ERR("skb_put is about to fail... "
- "pad %d len %d rx_buf_size %d\n",
- pad, len, bp->rx_buf_size);
- bnx2x_panic();
- return;
- }
-#endif
-
- skb_reserve(skb, pad);
- skb_put(skb, len);
-
- skb->protocol = eth_type_trans(skb, bp->dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- {
- struct iphdr *iph;
-
- iph = (struct iphdr *)skb->data;
-#ifdef BCM_VLAN
- /* If there is no Rx VLAN offloading -
- take VLAN tag into an account */
- if (unlikely(is_not_hwaccel_vlan_cqe))
- iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
-#endif
- iph->check = 0;
- iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
- }
-
- if (!bnx2x_fill_frag_skb(bp, fp, skb,
- &cqe->fast_path_cqe, cqe_idx)) {
-#ifdef BCM_VLAN
- if ((bp->vlgrp != NULL) && is_vlan_cqe &&
- (!is_not_hwaccel_vlan_cqe))
- vlan_gro_receive(&fp->napi, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.
- vlan_tag), skb);
- else
-#endif
- napi_gro_receive(&fp->napi, skb);
- } else {
- DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
- " - dropping packet!\n");
- dev_kfree_skb(skb);
- }
-
-
- /* put new skb in bin */
- fp->tpa_pool[queue].skb = new_skb;
-
- } else {
- /* else drop the packet and keep the buffer in the bin */
- DP(NETIF_MSG_RX_STATUS,
- "Failed to allocate new skb - dropping packet!\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
- }
-
- fp->tpa_state[queue] = BNX2X_TPA_STOP;
-}
-
-static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
- struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod,
- u16 rx_sge_prod)
-{
- struct ustorm_eth_rx_producers rx_prods = {0};
- int i;
-
- /* Update producers */
- rx_prods.bd_prod = bd_prod;
- rx_prods.cqe_prod = rx_comp_prod;
- rx_prods.sge_prod = rx_sge_prod;
-
- /*
- * Make sure that the BD and SGE data is updated before updating the
- * producers since FW might read the BD/SGE right after the producer
- * is updated.
- * This is only applicable for weak-ordered memory model archs such
- * as IA-64. The following barrier is also mandatory since FW will
- * assumes BDs must have buffers.
- */
- wmb();
-
- for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
- ((u32 *)&rx_prods)[i]);
-
- mmiowb(); /* keep prod updates ordered */
-
- DP(NETIF_MSG_RX_STATUS,
- "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
- fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
-}
-
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
- struct sk_buff *skb)
-{
- /* Set Toeplitz hash from CQE */
- if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->fast_path_cqe.status_flags &
- ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
- skb->rxhash =
- le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
-
-static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
-{
- struct bnx2x *bp = fp->bp;
- u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
- u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
- int rx_pkt = 0;
-
-#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
- return 0;
-#endif
-
- /* CQ "next element" is of the size of the regular element,
- that's why it's ok here */
- hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
- if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
- hw_comp_cons++;
-
- bd_cons = fp->rx_bd_cons;
- bd_prod = fp->rx_bd_prod;
- bd_prod_fw = bd_prod;
- sw_comp_cons = fp->rx_comp_cons;
- sw_comp_prod = fp->rx_comp_prod;
-
- /* Memory barrier necessary as speculative reads of the rx
- * buffer can be ahead of the index in the status block
- */
- rmb();
-
- DP(NETIF_MSG_RX_STATUS,
- "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
- fp->index, hw_comp_cons, sw_comp_cons);
-
- while (sw_comp_cons != hw_comp_cons) {
- struct sw_rx_bd *rx_buf = NULL;
- struct sk_buff *skb;
- union eth_rx_cqe *cqe;
- u8 cqe_fp_flags;
- u16 len, pad;
-
- comp_ring_cons = RCQ_BD(sw_comp_cons);
- bd_prod = RX_BD(bd_prod);
- bd_cons = RX_BD(bd_cons);
-
- /* Prefetch the page containing the BD descriptor
- at producer's index. It will be needed when new skb is
- allocated */
- prefetch((void *)(PAGE_ALIGN((unsigned long)
- (&fp->rx_desc_ring[bd_prod])) -
- PAGE_SIZE + 1));
-
- cqe = &fp->rx_comp_ring[comp_ring_cons];
- cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-
- DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
- " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
- cqe_fp_flags, cqe->fast_path_cqe.status_flags,
- le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
- le16_to_cpu(cqe->fast_path_cqe.pkt_len));
-
- /* is this a slowpath msg? */
- if (unlikely(CQE_TYPE(cqe_fp_flags))) {
- bnx2x_sp_event(fp, cqe);
- goto next_cqe;
-
- /* this is an rx packet */
- } else {
- rx_buf = &fp->rx_buf_ring[bd_cons];
- skb = rx_buf->skb;
- prefetch(skb);
- len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
- pad = cqe->fast_path_cqe.placement_offset;
-
- /* If CQE is marked both TPA_START and TPA_END
- it is a non-TPA CQE */
- if ((!fp->disable_tpa) &&
- (TPA_TYPE(cqe_fp_flags) !=
- (TPA_TYPE_START | TPA_TYPE_END))) {
- u16 queue = cqe->fast_path_cqe.queue_index;
-
- if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_start on queue %d\n",
- queue);
-
- bnx2x_tpa_start(fp, queue, skb,
- bd_cons, bd_prod);
-
- /* Set Toeplitz hash for an LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
-
- goto next_rx;
- }
-
- if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
-
- if (!BNX2X_RX_SUM_FIX(cqe))
- BNX2X_ERR("STOP on none TCP "
- "data\n");
-
- /* This is a size of the linear data
- on this skb */
- len = le16_to_cpu(cqe->fast_path_cqe.
- len_on_bd);
- bnx2x_tpa_stop(bp, fp, queue, pad,
- len, cqe, comp_ring_cons);
-#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
-#endif
-
- bnx2x_update_sge_prod(fp,
- &cqe->fast_path_cqe);
- goto next_cqe;
- }
- }
-
- dma_sync_single_for_device(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- DMA_FROM_DEVICE);
- prefetch(((char *)(skb)) + 128);
-
- /* is this an error packet? */
- if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
- DP(NETIF_MSG_RX_ERR,
- "ERROR flags %x rx packet %u\n",
- cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
- goto reuse_rx;
- }
-
- /* Since we don't have a jumbo ring
- * copy small packets if mtu > 1500
- */
- if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
- (len <= RX_COPY_THRESH)) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev,
- len + pad);
- if (new_skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped "
- "because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
- goto reuse_rx;
- }
-
- /* aligned copy */
- skb_copy_from_linear_data_offset(skb, pad,
- new_skb->data + pad, len);
- skb_reserve(new_skb, pad);
- skb_put(new_skb, len);
-
- bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
-
- skb = new_skb;
-
- } else
- if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
- dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size,
- DMA_FROM_DEVICE);
- skb_reserve(skb, pad);
- skb_put(skb, len);
-
- } else {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped because "
- "of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
-reuse_rx:
- bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
- goto next_rx;
- }
-
- skb->protocol = eth_type_trans(skb, bp->dev);
-
- /* Set Toeplitz hash for a none-LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
-
- skb->ip_summed = CHECKSUM_NONE;
- if (bp->rx_csum) {
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
- }
-
- skb_record_rx_queue(skb, fp->index);
-
-#ifdef BCM_VLAN
- if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
- (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
- PARSING_FLAGS_VLAN))
- vlan_gro_receive(&fp->napi, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
- else
-#endif
- napi_gro_receive(&fp->napi, skb);
-
-
-next_rx:
- rx_buf->skb = NULL;
-
- bd_cons = NEXT_RX_IDX(bd_cons);
- bd_prod = NEXT_RX_IDX(bd_prod);
- bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
- rx_pkt++;
-next_cqe:
- sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
- sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
-
- if (rx_pkt == budget)
- break;
- } /* while */
-
- fp->rx_bd_cons = bd_cons;
- fp->rx_bd_prod = bd_prod_fw;
- fp->rx_comp_cons = sw_comp_cons;
- fp->rx_comp_prod = sw_comp_prod;
-
- /* Update producers */
- bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
- fp->rx_sge_prod);
-
- fp->rx_pkt += rx_pkt;
- fp->rx_calls++;
-
- return rx_pkt;
-}
-
-static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
-{
- struct bnx2x_fastpath *fp = fp_cookie;
- struct bnx2x *bp = fp->bp;
-
- /* Return here if interrupt is disabled */
- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
- return IRQ_HANDLED;
- }
-
- DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
- fp->index, fp->sb_id);
- bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
-
-#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
- return IRQ_HANDLED;
-#endif
-
- /* Handle Rx and Tx according to MSI-X vector */
- prefetch(fp->rx_cons_sb);
- prefetch(fp->tx_cons_sb);
- prefetch(&fp->status_blk->u_status_block.status_block_index);
- prefetch(&fp->status_blk->c_status_block.status_block_index);
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
struct bnx2x *bp = netdev_priv(dev_instance);
u16 status = bnx2x_ack_int(bp);
@@ -1900,7 +968,6 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
/* end of fast path */
-static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/* Link */
@@ -1908,7 +975,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
* General service functions
*/
-static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
{
u32 lock_status;
u32 resource_bit = (1 << resource);
@@ -1953,7 +1020,7 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
return -EAGAIN;
}
-static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
{
u32 lock_status;
u32 resource_bit = (1 << resource);
@@ -1989,22 +1056,6 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
return 0;
}
-/* HW Lock for shared dual port PHYs */
-static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
-{
- mutex_lock(&bp->port.phy_mutex);
-
- if (bp->port.need_hw_lock)
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
-}
-
-static void bnx2x_release_phy_lock(struct bnx2x *bp)
-{
- if (bp->port.need_hw_lock)
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
-
- mutex_unlock(&bp->port.phy_mutex);
-}
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
{
@@ -2181,7 +1232,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
return 0;
}
-static void bnx2x_calc_fc_adv(struct bnx2x *bp)
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
{
switch (bp->link_vars.ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
@@ -2206,58 +1257,8 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
}
}
-static void bnx2x_link_report(struct bnx2x *bp)
-{
- if (bp->flags & MF_FUNC_DIS) {
- netif_carrier_off(bp->dev);
- netdev_err(bp->dev, "NIC Link is Down\n");
- return;
- }
-
- if (bp->link_vars.link_up) {
- u16 line_speed;
-
- if (bp->state == BNX2X_STATE_OPEN)
- netif_carrier_on(bp->dev);
- netdev_info(bp->dev, "NIC Link is Up, ");
-
- line_speed = bp->link_vars.line_speed;
- if (IS_E1HMF(bp)) {
- u16 vn_max_rate;
-
- vn_max_rate =
- ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
- if (vn_max_rate < line_speed)
- line_speed = vn_max_rate;
- }
- pr_cont("%d Mbps ", line_speed);
-
- if (bp->link_vars.duplex == DUPLEX_FULL)
- pr_cont("full duplex");
- else
- pr_cont("half duplex");
-
- if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
- pr_cont(", receive ");
- if (bp->link_vars.flow_ctrl &
- BNX2X_FLOW_CTRL_TX)
- pr_cont("& transmit ");
- } else {
- pr_cont(", transmit ");
- }
- pr_cont("flow control ON");
- }
- pr_cont("\n");
-
- } else { /* link_down */
- netif_carrier_off(bp->dev);
- netdev_err(bp->dev, "NIC Link is Down\n");
- }
-}
-static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
if (!BP_NOMCP(bp)) {
u8 rc;
@@ -2292,7 +1293,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
return -EINVAL;
}
-static void bnx2x_link_set(struct bnx2x *bp)
+void bnx2x_link_set(struct bnx2x *bp)
{
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
@@ -2314,7 +1315,7 @@ static void bnx2x__link_reset(struct bnx2x *bp)
BNX2X_ERR("Bootcode is missing - can not reset link\n");
}
-static u8 bnx2x_link_test(struct bnx2x *bp)
+u8 bnx2x_link_test(struct bnx2x *bp)
{
u8 rc = 0;
@@ -2546,7 +1547,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
}
}
-static void bnx2x__link_status_update(struct bnx2x *bp)
+void bnx2x__link_status_update(struct bnx2x *bp)
{
if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
return;
@@ -2627,9 +1628,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
return rc;
}
-static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
-static void bnx2x_set_rx_mode(struct net_device *dev);
-
static void bnx2x_e1h_disable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -2757,7 +1755,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
}
/* the slow path queue is odd since completions arrive on the fastpath ring */
-static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common)
{
struct eth_spe *spe;
@@ -3169,10 +2167,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
}
}
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
-static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
-
-
#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
@@ -3206,7 +2200,7 @@ static inline void bnx2x_set_reset_i