summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-11-05 21:50:43 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-05 21:50:43 -0500
commit2c99cd914d4fed9160d98849c9dd38034616768e (patch)
treefd6c145593ff1c901a9980765952700586d6fe20
parent25de4668d094f00e44a8f2428dd3c1a4ecfa0053 (diff)
parent5cdec679671741bcca1f8280699a64b42c9fa2b4 (diff)
Merge branch 'amd-xgbe-next'
Tom Lendacky says: ==================== amd-xgbe: AMD XGBE driver updates 2014-11-04 The following series of patches includes functional updates to the driver as well as some trivial changes for function renaming and spelling fixes. - Move channel and ring structure allocation into the device open path - Rename the pre_xmit function to dev_xmit - Explicitly use the u32 data type for the device descriptors - Use page allocation for the receive buffers - Add support for split header/payload receive - Add support for per DMA channel interrupts - Add support for receive side scaling (RSS) - Add support for ethtool receive side scaling commands - Fix the spelling of descriptors - After a PCS reset, sync the PCS and PHY modes - Add dependency on HAS_IOMEM to both the amd-xgbe and amd-xgbe-phy drivers This patch series is based on net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt12
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h42
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c193
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c288
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c445
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c82
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c86
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h77
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c3
11 files changed, 955 insertions, 277 deletions
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 41354f730beb..26efd526d16c 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -7,7 +7,10 @@ Required properties:
- PCS registers
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
-- interrupts: Should contain the amd-xgbe interrupt
+- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
+ listed is required and is the general device interrupt. If the optional
+ amd,per-channel-interrupt property is specified, then one additional
+ interrupt for each DMA channel supported by the device should be specified
- clocks:
- DMA clock for the amd-xgbe device (used for calculating the
correct Rx interrupt watchdog timer value on a DMA channel
@@ -23,6 +26,9 @@ Optional properties:
- mac-address: mac address to be assigned to the device. Can be overridden
by UEFI.
- dma-coherent: Present if dma operations are coherent
+- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
+ a unique interrupt for each DMA channel - this requires an additional
+ interrupt be configured for each DMA channel
Example:
xgbe@e0700000 {
@@ -30,7 +36,9 @@ Example:
reg = <0 0xe0700000 0 0x80000>,
<0 0xe0780000 0 0x80000>;
interrupt-parent = <&gic>;
- interrupts = <0 325 4>;
+ interrupts = <0 325 4>,
+ <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
+ amd,per-channel-interrupt;
clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
clock-names = "dma_clk", "ptp_clk";
phy-handle = <&phy>;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8319c99331b0..7a5e4aa5415e 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on OF_NET
+ depends on OF_NET && HAS_IOMEM
select PHYLIB
select AMD_XGBE_PHY
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index caade30820d5..2fe8fc71fe01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -207,6 +207,8 @@
/* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
#define DMA_CH_IER_AIE_INDEX 15
#define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12
@@ -306,6 +308,9 @@
#define MAC_MACA0LR 0x0304
#define MAC_MACA1HR 0x0308
#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
#define MAC_TSCR 0x0d00
#define MAC_SSIR 0x0d04
#define MAC_STSR 0x0d08
@@ -429,6 +434,8 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
#define MAC_RCR_IPC_WIDTH 1
#define MAC_RCR_JE_INDEX 8
@@ -445,6 +452,24 @@
#define MAC_RFCR_UP_WIDTH 1
#define MAC_RQC0R_RXQ0EN_INDEX 0
#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
#define MAC_SSIR_SNSINC_INDEX 8
#define MAC_SSIR_SNSINC_WIDTH 8
#define MAC_SSIR_SSINC_INDEX 16
@@ -844,9 +869,13 @@
#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
#define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30
@@ -855,14 +884,27 @@
#define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16
#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
#define RX_NORMAL_DESC3_INTE_INDEX 30
#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
#define RX_NORMAL_DESC3_LD_INDEX 28
#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_NORMAL_DESC3_OWN_INDEX 31
#define RX_NORMAL_DESC3_OWN_WIDTH 1
#define RX_NORMAL_DESC3_PL_INDEX 0
#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
#define RX_CONTEXT_DESC3_TSA_INDEX 4
#define RX_CONTEXT_DESC3_TSA_WIDTH 1
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 6fc5da01437d..e6b9f54b9697 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,7 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_free_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring)
@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
}
kfree(ring->rdata);
ring->rdata = NULL;
}
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
if (ring->rdesc) {
dma_free_coherent(pdata->dev,
(sizeof(struct xgbe_ring_desc) *
@@ -233,6 +255,96 @@ err_ring:
return ret;
}
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(pdata->dev, pages_dma);
+ if (ret) {
+ put_page(pages);
+ return ret;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+ struct xgbe_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+ order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata;
- dma_addr_t rdesc_dma, skb_dma;
- struct sk_buff *skb = NULL;
+ dma_addr_t rdesc_dma;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma;
- /* Allocate skb & assign to each rdesc */
- skb = dev_alloc_skb(pdata->rx_buf_size);
- if (skb == NULL)
- break;
- skb_dma = dma_map_single(pdata->dev, skb->data,
- pdata->rx_buf_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "failed to do the dma map\n");
- dev_kfree_skb_any(skb);
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
- }
- rdata->skb = skb;
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = pdata->rx_buf_size;
rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc);
@@ -334,8 +431,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
}
-static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
- struct xgbe_ring_data *rdata)
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
{
if (rdata->skb_dma) {
if (rdata->mapped_as_page) {
@@ -354,6 +451,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
rdata->skb = NULL;
}
+ if (rdata->rx_hdr.pa.pages)
+ put_page(rdata->rx_hdr.pa.pages);
+
+ if (rdata->rx_hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
+ rdata->rx_hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx_hdr.pa_unmap.pages);
+ }
+
+ if (rdata->rx_buf.pa.pages)
+ put_page(rdata->rx_buf.pa.pages);
+
+ if (rdata->rx_buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
+ rdata->rx_buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx_buf.pa_unmap.pages);
+ }
+
+ memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
+ memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
+
rdata->tso_header = 0;
rdata->len = 0;
rdata->interrupt = 0;
@@ -494,7 +614,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
err_out:
while (start_index < cur_index) {
rdata = XGBE_GET_DESC_DATA(ring, start_index++);
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
}
DBGPR("<--xgbe_map_tx_skb: count=0\n");
@@ -502,40 +622,25 @@ err_out:
return 0;
}
-static void xgbe_realloc_skb(struct xgbe_channel *channel)
+static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- struct sk_buff *skb = NULL;
- dma_addr_t skb_dma;
int i;
- DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
+ DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
ring->rx.realloc_index);
for (i = 0; i < ring->dirty; i++) {
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
/* Reset rdata values */
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
- /* Allocate skb & assign to each rdesc */
- skb = dev_alloc_skb(pdata->rx_buf_size);
- if (skb == NULL)
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
- skb_dma = dma_map_single(pdata->dev, skb->data,
- pdata->rx_buf_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "failed to do the dma map\n");
- dev_kfree_skb_any(skb);
- break;
- }
- rdata->skb = skb;
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = pdata->rx_buf_size;
hw_if->rx_desc_reset(rdata);
@@ -543,7 +648,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
}
ring->dirty = 0;
- DBGPR("<--xgbe_realloc_skb\n");
+ DBGPR("<--xgbe_realloc_rx_buffer\n");
}
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
@@ -553,8 +658,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb;
- desc_if->realloc_skb = xgbe_realloc_skb;
- desc_if->unmap_skb = xgbe_unmap_skb;
+ desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 9da3a03e8c07..7daa2cd9af5f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata,
+ XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+ return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
@@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
if (channel->tx_ring) {
/* Enable the following Tx interrupts
- * TIE - Transmit Interrupt Enable (unless polling)
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
*/
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
}
if (channel->rx_ring) {
/* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable
- * RIE - Receive Interrupt Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
*/
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
}
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
{
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
int i;
int start_index = ring->cur;
@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Tx descriptor
- * Set buffer 1 (lo) address to zero
- * Set buffer 1 (hi) address to zero
- * Reset all other control bits (IC, TTSE, B2L & B1L)
- * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
- * etc)
- */
- rdesc->desc0 = 0;
- rdesc->desc1 = 0;
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
+ /* Initialize Tx descriptor */
+ xgbe_tx_desc_reset(rdata);
}
- /* Make sure everything is written to the descriptor(s) before
- * telling the device about them
- */
- wmb();
-
/* Update the total number of Tx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
struct xgbe_ring_desc *rdesc = rdata->rdesc;
/* Reset the Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control bits
- * OWN and INTE
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_hdr.dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_hdr.dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx_buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx_buf.dma));
- rdesc->desc3 = 0;
- if (rdata->interrupt)
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ rdata->interrupt ? 1 : 0);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
unsigned int start_index = ring->cur;
unsigned int rx_coalesce, rx_frames;
unsigned int i;
@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control
- * bits OWN and INTE appropriateley
- */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
- rdata->interrupt = 1;
- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
- /* Clear interrupt on completion bit */
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
- 0);
+ /* Set interrupt on completion bit as appropriate */
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
rdata->interrupt = 0;
- }
- }
+ else
+ rdata->interrupt = 1;
- /* Make sure everything is written to the descriptors before
- * telling the device about them
- */
- wmb();
+ /* Initialize Rx descriptor */
+ xgbe_rx_desc_reset(rdata);
+ }
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
@@ -1198,7 +1325,7 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
xgbe_config_flow_control(pdata);
}
-static void xgbe_pre_xmit(struct xgbe_channel *channel)
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring;
@@ -1211,7 +1338,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
int start_index = ring->cur;
int i;
- DBGPR("-->xgbe_pre_xmit\n");
+ DBGPR("-->xgbe_dev_xmit\n");
csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE);
@@ -1410,7 +1537,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
channel->name, start_index & (ring->rdesc_count - 1),
(ring->cur - 1) & (ring->rdesc_count - 1));
- DBGPR("<--xgbe_pre_xmit\n");
+ DBGPR("<--xgbe_dev_xmit\n");
}
static int xgbe_dev_read(struct xgbe_channel *channel)
@@ -1420,7 +1547,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
struct net_device *netdev = channel->pdata->netdev;
- unsigned int err, etlt;
+ unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1454,6 +1581,31 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1);
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ rdata->hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = PKT_HASH_TYPE_L4;
+
+ default:
+ packet->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
/* Get the packet length */
rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
@@ -2485,6 +2637,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -2561,7 +2715,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->powerup_rx = xgbe_powerup_rx;
hw_if->powerdown_rx = xgbe_powerdown_rx;
- hw_if->pre_xmit = xgbe_pre_xmit;
+ hw_if->dev_xmit = xgbe_dev_xmit;
hw_if->dev_read = xgbe_dev_read;
hw_if->enable_int = xgbe_enable_int;
hw_if->disable_int = xgbe_disable_int;
@@ -2620,5 +2774,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+ /* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
+ hw_if->disable_rss = xgbe_disable_rss;
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2349ea970255..ced9f52eb45b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -114,6 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@@ -126,9 +127,99 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_poll(struct napi_struct *, int);
+static int xgbe_one_poll(struct napi_struct *, int);
+static int xgbe_all_poll(struct napi_struct *, int);
static void xgbe_set_rx_mode(struct net_device *);
+static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+ int ret = -ENOMEM;
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ goto err_channel;
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the DMA interrupt (offset 1) */
+ ret = platform_get_irq(pdata->pdev, i + 1);
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "platform_get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&rx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel = channel_mem;
+ pdata->channel_count = count;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_mem);
+
+err_channel:
+ return ret;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->channel)
+ return;
+
+ kfree(pdata->channel->rx_ring);
+ kfree(pdata->channel->tx_ring);
+ kfree(pdata->channel);
+
+ pdata->channel = NULL;
+ pdata->channel_count = 0;
+}
+
static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
{
return (ring->rdesc_count - (ring->cur - ring->dirty));
@@ -144,8 +235,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
}
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
- rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
+ rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
~(XGBE_RX_BUF_ALIGN - 1);
@@ -213,11 +304,7 @@ static irqr