summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/iavf/iavf_txrx.c
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2018-09-14 17:37:55 -0700
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-09-18 15:32:29 -0700
commit56184e01c00d6d23609f9f9e52cc731568e8088f (patch)
tree922ef08cc5806e1d74ff82d95167ff694c9ed4bd /drivers/net/ethernet/intel/iavf/iavf_txrx.c
parentad64ed8bf908d7d8261256031039b1589386c609 (diff)
iavf: rename most of i40e strings
This is the big rename patch, it takes most of the i40e_ and I40E_ strings and renames them to iavf_ and IAVF_. Some of the adminq code, as well as most of the client interface code used by RDMA is left unchanged in order to indicate that the driver is talking to non-internal to iavf code. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/iavf_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c616
1 files changed, 308 insertions, 308 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 7a33a783b80c..1768c64a922f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -12,24 +12,24 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
{
return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
- ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
- ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
- ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
- ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+ ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
}
-#define I40E_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
+#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
/**
- * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * iavf_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free
**/
-static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
- struct i40e_tx_buffer *tx_buffer)
+static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
+ struct iavf_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
@@ -55,7 +55,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
* iavf_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
**/
-void iavf_clean_tx_ring(struct i40e_ring *tx_ring)
+void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
{
unsigned long bi_size;
u16 i;
@@ -66,9 +66,9 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++)
- i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+ iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
- bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size);
/* Zero out the descriptor ring */
@@ -90,7 +90,7 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring)
*
* Free all transmit software resources
**/
-void iavf_free_tx_resources(struct i40e_ring *tx_ring)
+void iavf_free_tx_resources(struct iavf_ring *tx_ring)
{
iavf_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
@@ -111,7 +111,7 @@ void iavf_free_tx_resources(struct i40e_ring *tx_ring)
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
+u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
@@ -132,9 +132,9 @@ u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
* VSI has netdev and netdev has TX queues. This function is to check each of
* those TX queues if they are hung, trigger recovery by issuing SW interrupt.
**/
-void iavf_detect_recover_hung(struct i40e_vsi *vsi)
+void iavf_detect_recover_hung(struct iavf_vsi *vsi)
{
- struct i40e_ring *tx_ring = NULL;
+ struct iavf_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
int packets;
@@ -142,7 +142,7 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi)
if (!vsi)
return;
- if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ if (test_bit(__IAVF_VSI_DOWN, vsi->state))
return;
netdev = vsi->netdev;
@@ -181,19 +181,19 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi)
#define WB_STRIDE 4
/**
- * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * iavf_clean_tx_irq - Reclaim resources after transmit completes
* @vsi: the VSI we care about
* @tx_ring: Tx ring to clean
* @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
- struct i40e_ring *tx_ring, int napi_budget)
+static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
+ struct iavf_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
- struct i40e_tx_buffer *tx_buf;
- struct i40e_tx_desc *tx_desc;
+ struct iavf_tx_buffer *tx_buf;
+ struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = vsi->work_limit;
@@ -202,7 +202,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
i -= tx_ring->count;
do {
- struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+ struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
@@ -286,7 +286,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
- if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
@@ -296,8 +296,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_VSI_DOWN, vsi->state) &&
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
+ (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -307,14 +307,14 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
- (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_VSI_DOWN, vsi->state)) {
+ !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -330,13 +330,13 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
* @q_vector: the vector on which to enable writeback
*
**/
-static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
- struct i40e_q_vector *q_vector)
+static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
+ struct iavf_q_vector *q_vector)
{
u16 flags = q_vector->tx.ring[0].flags;
u32 val;
- if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
+ if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
return;
if (q_vector->arm_wb_state)
@@ -356,7 +356,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
* @q_vector: the vector on which to force writeback
*
**/
-void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
{
u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
@@ -369,31 +369,31 @@ void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
val);
}
-static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
- struct i40e_ring_container *rc)
+static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
+ struct iavf_ring_container *rc)
{
return &q_vector->rx == rc;
}
-static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
+static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
{
unsigned int divisor;
switch (q_vector->adapter->link_speed) {
case I40E_LINK_SPEED_40GB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
break;
case I40E_LINK_SPEED_25GB:
case I40E_LINK_SPEED_20GB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
break;
default:
case I40E_LINK_SPEED_10GB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
break;
case I40E_LINK_SPEED_1GB:
case I40E_LINK_SPEED_100MB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
break;
}
@@ -401,7 +401,7 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
}
/**
- * i40e_update_itr - update the dynamic ITR value based on statistics
+ * iavf_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information
* @rc: structure containing ring performance data
*
@@ -413,8 +413,8 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_update_itr(struct i40e_q_vector *q_vector,
- struct i40e_ring_container *rc)
+static void iavf_update_itr(struct iavf_q_vector *q_vector,
+ struct iavf_ring_container *rc)
{
unsigned int avg_wire_size, packets, bytes, itr;
unsigned long next_update = jiffies;
@@ -428,9 +428,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
/* For Rx we want to push the delay up and default to low latency.
* for Tx we want to pull the delay down and default to high latency.
*/
- itr = i40e_container_is_rx(q_vector, rc) ?
- I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
- I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
+ itr = iavf_container_is_rx(q_vector, rc) ?
+ IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
+ IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
/* If we didn't update within up to 1 - 2 jiffies we can assume
* that either packets are coming in so slow there hasn't been
@@ -454,15 +454,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
packets = rc->total_packets;
bytes = rc->total_bytes;
- if (i40e_container_is_rx(q_vector, rc)) {
+ if (iavf_container_is_rx(q_vector, rc)) {
/* If Rx there are 1 to 4 packets and bytes are less than
* 9000 assume insufficient data to use bulk rate limiting
* approach unless Tx is already in bulk rate limiting. We
* are likely latency driven.
*/
if (packets && packets < 4 && bytes < 9000 &&
- (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
- itr = I40E_ITR_ADAPTIVE_LATENCY;
+ (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
+ itr = IAVF_ITR_ADAPTIVE_LATENCY;
goto adjust_by_size;
}
} else if (packets < 4) {
@@ -471,15 +471,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
* that the Rx can relax.
*/
- if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
- (q_vector->rx.target_itr & I40E_ITR_MASK) ==
- I40E_ITR_ADAPTIVE_MAX_USECS)
+ if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
+ IAVF_ITR_ADAPTIVE_MAX_USECS)
goto clear_counts;
} else if (packets > 32) {
/* If we have processed over 32 packets in a single interrupt
* for Tx assume we need to switch over to "bulk" mode.
*/
- rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
+ rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
}
/* We have no packets to actually measure against. This means
@@ -491,17 +491,17 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* fixed amount.
*/
if (packets < 56) {
- itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
- if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
- itr &= I40E_ITR_ADAPTIVE_LATENCY;
- itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= IAVF_ITR_ADAPTIVE_LATENCY;
+ itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
}
goto clear_counts;
}
if (packets <= 256) {
itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
- itr &= I40E_ITR_MASK;
+ itr &= IAVF_ITR_MASK;
/* Between 56 and 112 is our "goldilocks" zone where we are
* working out "just right". Just report that our current
@@ -516,9 +516,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* in half per interrupt.
*/
itr /= 2;
- itr &= I40E_ITR_MASK;
- if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
- itr = I40E_ITR_ADAPTIVE_MIN_USECS;
+ itr &= IAVF_ITR_MASK;
+ if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
+ itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
goto clear_counts;
}
@@ -529,7 +529,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* though for smaller packet sizes there isn't much we can do as
* NAPI polling will likely be kicking in sooner rather than later.
*/
- itr = I40E_ITR_ADAPTIVE_BULK;
+ itr = IAVF_ITR_ADAPTIVE_BULK;
adjust_by_size:
/* If packet counts are 256 or greater we can assume we have a gross
@@ -577,7 +577,7 @@ adjust_by_size:
/* If we are in low latency mode halve our delay which doubles the
* rate to somewhere between 100K to 16K ints/sec
*/
- if (itr & I40E_ITR_ADAPTIVE_LATENCY)
+ if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
avg_wire_size /= 2;
/* Resultant value is 256 times larger than it needs to be. This
@@ -587,12 +587,12 @@ adjust_by_size:
* Use addition as we have already recorded the new latency flag
* for the ITR value.
*/
- itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
- I40E_ITR_ADAPTIVE_MIN_INC;
+ itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
+ IAVF_ITR_ADAPTIVE_MIN_INC;
- if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
- itr &= I40E_ITR_ADAPTIVE_LATENCY;
- itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= IAVF_ITR_ADAPTIVE_LATENCY;
+ itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
}
clear_counts:
@@ -612,7 +612,7 @@ clear_counts:
*
* Return 0 on success, negative on error
**/
-int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int bi_size;
@@ -622,13 +622,13 @@ int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_bi);
- bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!tx_ring->tx_bi)
goto err;
/* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -653,7 +653,7 @@ err:
* iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
-void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
+void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
unsigned long bi_size;
u16 i;
@@ -669,7 +669,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
if (!rx_bi->page)
continue;
@@ -685,9 +685,9 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
- i40e_rx_pg_size(rx_ring),
+ iavf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
- I40E_RX_DMA_ATTR);
+ IAVF_RX_DMA_ATTR);
__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
@@ -695,7 +695,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
rx_bi->page_offset = 0;
}
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_bi, 0, bi_size);
/* Zero out the descriptor ring */
@@ -712,7 +712,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
*
* Free all receive software resources
**/
-void iavf_free_rx_resources(struct i40e_ring *rx_ring)
+void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
iavf_clean_rx_ring(rx_ring);
kfree(rx_ring->rx_bi);
@@ -731,14 +731,14 @@ void iavf_free_rx_resources(struct i40e_ring *rx_ring)
*
* Returns 0 on success, negative on failure
**/
-int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int bi_size;
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!rx_ring->rx_bi)
goto err;
@@ -746,7 +746,7 @@ int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -769,11 +769,11 @@ err:
}
/**
- * i40e_release_rx_desc - Store the new tail and head values
+ * iavf_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
**/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
@@ -790,26 +790,26 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_rx_offset - Return expected offset into page to access data
+ * iavf_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of
*
* Returns the offset value for ring into the data buffer.
*/
-static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
{
- return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+ return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
}
/**
- * i40e_alloc_mapped_page - recycle or make a new page
+ * iavf_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
* @bi: rx_buffer struct to modify
*
* Returns true if the page was successfully allocated or
* reused.
**/
-static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi)
+static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -821,7 +821,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* alloc new page for storage */
- page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
+ page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
@@ -829,22 +829,22 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- i40e_rx_pg_size(rx_ring),
+ iavf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
- I40E_RX_DMA_ATTR);
+ IAVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, i40e_rx_pg_order(rx_ring));
+ __free_pages(page, iavf_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = i40e_rx_offset(rx_ring);
+ bi->page_offset = iavf_rx_offset(rx_ring);
/* initialize pagecnt_bias to 1 representing we fully own page */
bi->pagecnt_bias = 1;
@@ -853,15 +853,15 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
+ * iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
**/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
+static void iavf_receive_skb(struct iavf_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag)
{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct iavf_q_vector *q_vector = rx_ring->q_vector;
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
@@ -877,11 +877,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
*
* Returns false if all allocations were successful, true if any fail
**/
-bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
{
u16 ntu = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
+ union iavf_rx_desc *rx_desc;
+ struct iavf_rx_buffer *bi;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
@@ -891,7 +891,7 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_bi[ntu];
do {
- if (!i40e_alloc_mapped_page(rx_ring, bi))
+ if (!iavf_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
/* sync the buffer for use by the device */
@@ -921,13 +921,13 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
} while (cleaned_count);
if (rx_ring->next_to_use != ntu)
- i40e_release_rx_desc(rx_ring, ntu);
+ iavf_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
if (rx_ring->next_to_use != ntu)
- i40e_release_rx_desc(rx_ring, ntu);
+ iavf_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -936,27 +936,27 @@ no_buffers:
}
/**
- * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
**/
-static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
struct sk_buff *skb,
- union i40e_rx_desc *rx_desc)
+ union iavf_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded;
+ struct iavf_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
+ IAVF_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
+ IAVF_RXD_QW1_STATUS_SHIFT;
decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
@@ -975,10 +975,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (!(decoded.known && decoded.outer_ip))
return;
- ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+ ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
if (ipv4 &&
(rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1004,9 +1004,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ case IAVF_RX_PTYPE_INNER_PROT_TCP:
+ case IAVF_RX_PTYPE_INNER_PROT_UDP:
+ case IAVF_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* fall though */
default:
@@ -1020,37 +1020,37 @@ checksum_fail:
}
/**
- * i40e_ptype_to_htype - get a hash type
+ * iavf_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline int i40e_ptype_to_htype(u8 ptype)
+static inline int iavf_ptype_to_htype(u8 ptype)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+ struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
if (!decoded.known)
return PKT_HASH_TYPE_NONE;
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
return PKT_HASH_TYPE_L3;
else
return PKT_HASH_TYPE_L2;
}
/**
- * i40e_rx_hash - set the hash value in the skb
+ * iavf_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
* @skb: skb currently being received and modified
* @rx_ptype: Rx packet type
**/
-static inline void i40e_rx_hash(struct i40e_ring *ring,
- union i40e_rx_desc *rx_desc,
+static inline void iavf_rx_hash(struct iavf_ring *ring,
+ union iavf_rx_desc *rx_desc,
struct sk_buff *skb,
u8 rx_ptype)
{
@@ -1064,7 +1064,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
}
}
@@ -1080,13 +1080,13 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* other fields within the skb.
**/
static inline
-void iavf_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+void iavf_process_skb_fields(struct iavf_ring *rx_ring,
+ union iavf_rx_desc *rx_desc, struct sk_buff *skb,
u8 rx_ptype)
{
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1095,7 +1095,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring,
}
/**
- * i40e_cleanup_headers - Correct empty headers
+ * iavf_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
*
@@ -1107,7 +1107,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring,
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
{
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
@@ -1117,16 +1117,16 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
}
/**
- * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * iavf_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
**/
-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *old_buff)
+static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *old_buff)
{
- struct i40e_rx_buffer *new_buff;
+ struct iavf_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
new_buff = &rx_ring->rx_bi[nta];
@@ -1143,20 +1143,20 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_page_is_reusable - check if any reuse is possible
+ * iavf_page_is_reusable - check if any reuse is possible
* @page: page struct to check
*
* A page is not reusable if it was allocated under low memory
* conditions, or it's not in the same NUMA node as this CPU.
*/
-static inline bool i40e_page_is_reusable(struct page *page)
+static inline bool iavf_page_is_reusable(struct page *page)
{
return (page_to_nid(page) == numa_mem_id()) &&
!page_is_pfmemalloc(page);
}
/**
- * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * iavf_can_reuse_rx_page - Determine if this page can be reused by
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
@@ -1182,13 +1182,13 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (unlikely(!i40e_page_is_reusable(page)))
+ if (unlikely(!iavf_page_is_reusable(page)))
return false;
#if (PAGE_SIZE < 8192)
@@ -1196,9 +1196,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
#else
-#define I40E_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
- if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+#define IAVF_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
+ if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
return false;
#endif
@@ -1215,7 +1215,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
}
/**
- * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
* @skb: sk_buff to place the data into
@@ -1226,15 +1226,15 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
*
* The function will then update the page offset.
**/
-static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
+static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
struct sk_buff *skb,
unsigned int size)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
+ unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -1249,17 +1249,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
}
/**
- * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
-static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
const unsigned int size)
{
- struct i40e_rx_buffer *rx_buffer;
+ struct iavf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
@@ -1278,7 +1278,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
}
/**
- * i40e_construct_skb - Allocate skb and populate it
+ * iavf_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
* @size: size of buffer to add to skb
@@ -1287,13 +1287,13 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
-static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
@@ -1308,15 +1308,15 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
+ IAVF_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
/* Determine available headroom for copy */
headlen = size;
- if (headlen > I40E_RX_HDR_SIZE)
- headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+ if (headlen > IAVF_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -1343,7 +1343,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
/**
- * i40e_build_skb - Build skb around an existing buffer
+ * iavf_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
* @size: size of buffer to add to skb
@@ -1351,16 +1351,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
-static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
unsigned int s