summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/amd/pcnet32.c
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-03-30 07:46:36 -0700
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-10 19:54:11 -0700
commitb955f6ca776f3bab3d1e2c5fb1d247b203cbda14 (patch)
tree88e47fa3369f95c5673d4d654ae5f432cdc79048 /drivers/net/ethernet/amd/pcnet32.c
parentca7a8e85262e93065b2a49dfb96a24d4a534a049 (diff)
amd: Move AMD (Lance) chipset drivers
Moves the drivers for the AMD chipsets into drivers/net/ethernet/amd/ and the necessary Kconfig and Makfile changes. The au1000 (Alchemy) driver was also moved into the same directory even though it is not a "Lance" driver. CC: Peter Maydell <pmaydell@chiark.greenend.org.uk> CC: Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> CC: "Maciej W. Rozycki" <macro@linux-mips.org> CC: Donald Becker <becker@scyld.com> CC: Sam Creasey <sammy@users.qual.net> CC: Miguel de Icaza <miguel@nuclecu.unam.mx> CC: Thomas Bogendoerfer <tsbogend@alpha.franken.de> CC: Don Fry <pcnet32@frontier.com> CC: Geert Uytterhoeven <geert@linux-m68k.org> CC: Russell King <linux@arm.linux.org.uk> CC: David Davies <davies@maniac.ultranet.com> CC: "M.Hipp" <hippm@informatik.uni-tuebingen.de> CC: Pete Popov <ppopov@embeddedalley.com> CC: David Hinds <dahinds@users.sourceforge.net> CC: "Roger C. Pao" <rpao@paonet.org> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/amd/pcnet32.c')
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2937
1 files changed, 2937 insertions, 0 deletions
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
new file mode 100644
index 000000000000..8b3090dc4bcd
--- /dev/null
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -0,0 +1,2937 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ * 23 Oct, 2000.
+ * Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "pcnet32"
+#define DRV_VERSION "1.35"
+#define DRV_RELDATE "21.Apr.2008"
+#define PFX DRV_NAME ": "
+
+static const char *const version =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
+
+ /*
+ * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
+ * the incorrect vendor id.
+ */
+ { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
+
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
+
+static int cards_found;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+static int pcnet32_debug;
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct net_device *pcnet32_dev;
+
+static int max_interrupt_work = 2;
+static int rx_copybreak = 200;
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
+#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static const unsigned char options_mapping[] = {
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
+ PCNET32_PORT_ASEL /* 15 not supported */
+};
+
+static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Loopback test (offline)"
+};
+
+#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
+
+#define PCNET32_NUM_REGS 136
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+static int homepna[MAX_UNITS];
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 5
+#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
+#define PCNET32_LOG_MAX_RX_BUFFERS 9
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
+
+#define PKT_BUF_SKB 1544
+/* actual buffer length after being aligned */
+#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
+/* chip wants twos complement of the (aligned) buffer length */
+#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+#define CSR0 0
+#define CSR0_INIT 0x1
+#define CSR0_START 0x2
+#define CSR0_STOP 0x4
+#define CSR0_TXPOLL 0x8
+#define CSR0_INTEN 0x40
+#define CSR0_IDON 0x0100
+#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
+#define PCNET32_INIT_LOW 1
+#define PCNET32_INIT_HIGH 2
+#define CSR3 3
+#define CSR4 4
+#define CSR5 5
+#define CSR5_SUSPEND 0x0001
+#define CSR15 15
+#define PCNET32_MC_FILTER 8
+
+#define PCNET32_79C970A 0x2621
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ __le32 base;
+ __le16 buf_length; /* two`s complement of length */
+ __le16 status;
+ __le32 msg_length;
+ __le32 reserved;
+};
+
+struct pcnet32_tx_head {
+ __le32 base;
+ __le16 length; /* two`s complement of length */
+ __le16 status;
+ __le32 misc;
+ __le32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ __le16 mode;
+ __le16 tlen_rlen;
+ u8 phys_addr[6];
+ __le16 reserved;
+ __le32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ __le32 rx_ring;
+ __le32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+ u16 (*read_csr) (unsigned long, int);
+ void (*write_csr) (unsigned long, int, u16);
+ u16 (*read_bcr) (unsigned long, int);
+ void (*write_bcr) (unsigned long, int, u16);
+ u16 (*read_rap) (unsigned long);
+ void (*write_rap) (unsigned long, u16);
+ void (*reset) (unsigned long);
+};
+
+/*
+ * The first field of pcnet32_private is read by the ethernet device
+ * so the structure should be allocated using pci_alloc_consistent().
+ */
+struct pcnet32_private {
+ struct pcnet32_init_block *init_block;
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head *rx_ring;
+ struct pcnet32_tx_head *tx_ring;
+ dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
+ returned by pci_alloc_consistent */
+ struct pci_dev *pci_dev;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *tx_dma_addr;
+ dma_addr_t *rx_dma_addr;
+ struct pcnet32_access a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int rx_ring_size; /* current rx ring size */
+ unsigned int tx_ring_size; /* current tx ring size */
+ unsigned int rx_mod_mask; /* rx ring modular mask */
+ unsigned int tx_mod_mask; /* tx ring modular mask */
+ unsigned short rx_len_bits;
+ unsigned short tx_len_bits;
+ dma_addr_t rx_ring_dma_addr;
+ dma_addr_t tx_ring_dma_addr;
+ unsigned int dirty_rx, /* ring entries to be freed. */
+ dirty_tx;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ char tx_full;
+ char phycount; /* number of phys found */
+ int options;
+ unsigned int shared_irq:1, /* shared irq possible */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1; /* mii port available */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+ struct timer_list watchdog_timer;
+ u32 msg_enable; /* debug message level */
+
+ /* each bit indicates an available PHY */
+ u32 phymask;
+ unsigned short chip_version; /* which variant this is */
+
+ /* saved registers during ethtool blink */
+ u16 save_regs[4];
+};
+
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
+ struct net_device *);
+static void pcnet32_tx_timeout(struct net_device *dev);
+static irqreturn_t pcnet32_interrupt(int, void *);
+static int pcnet32_close(struct net_device *);
+static struct net_device_stats *pcnet32_get_stats(struct net_device *);
+static void pcnet32_load_multicast(struct net_device *dev);
+static void pcnet32_set_multicast_list(struct net_device *);
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+static void pcnet32_watchdog(struct net_device *);
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
+ int val);
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *eth_test, u64 * data);
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
+static int pcnet32_get_regs_len(struct net_device *dev);
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr);
+static void pcnet32_purge_tx_ring(struct net_device *dev);
+static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
+static void pcnet32_free_ring(struct net_device *dev);
+static void pcnet32_check_media(struct net_device *dev, int verbose);
+
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap(unsigned long addr)
+{
+ return inw(addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
+{
+ outw(val, addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset(unsigned long addr)
+{
+ inw(addr + PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check(unsigned long addr)
+{
+ outw(88, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RAP) == 88;
+}
+
+static struct pcnet32_access pcnet32_wio = {
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+ .write_bcr = pcnet32_wio_write_bcr,
+ .read_rap = pcnet32_wio_read_rap,
+ .write_rap = pcnet32_wio_write_rap,
+ .reset = pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
+{
+ return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
+{
+ outl(val, addr + PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset(unsigned long addr)
+{
+ inl(addr + PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check(unsigned long addr)
+{
+ outl(88, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
+}
+
+static struct pcnet32_access pcnet32_dwio = {
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+ .write_bcr = pcnet32_dwio_write_bcr,
+ .read_rap = pcnet32_dwio_read_rap,
+ .write_rap = pcnet32_dwio_write_rap,
+ .reset = pcnet32_dwio_reset
+};
+
+static void pcnet32_netif_stop(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ napi_disable(&lp->napi);
+ netif_tx_disable(dev);
+}
+
+static void pcnet32_netif_start(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ ulong ioaddr = dev->base_addr;
+ u16 val;
+
+ netif_wake_queue(dev);
+ val = lp->a.read_csr(ioaddr, CSR3);
+ val &= 0x00ff;
+ lp->a.write_csr(ioaddr, CSR3, val);
+ napi_enable(&lp->napi);
+}
+
+/*
+ * Allocate space for the new sized tx ring.
+ * Free old resources
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_tx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_tx_head *new_tx_ring;
+ struct sk_buff **new_skb_list;
+
+ pcnet32_purge_tx_ring(dev);
+
+ new_tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ (1 << size),
+ &new_ring_dma_addr);
+ if (new_tx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return;
+ }
+ memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
+
+ new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_tx_ring;
+ }
+
+ new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_lists;
+ }
+
+ kfree(lp->tx_skbuff);
+ kfree(lp->tx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size, lp->tx_ring,
+ lp->tx_ring_dma_addr);
+
+ lp->tx_ring_size = (1 << size);
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (size << 12);
+ lp->tx_ring = new_tx_ring;
+ lp->tx_ring_dma_addr = new_ring_dma_addr;
+ lp->tx_dma_addr = new_dma_addr_list;
+ lp->tx_skbuff = new_skb_list;
+ return;
+
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_tx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ (1 << size),
+ new_tx_ring,
+ new_ring_dma_addr);
+}
+
+/*
+ * Allocate space for the new sized rx ring.
+ * Re-use old receive buffers.
+ * alloc extra buffers
+ * free unneeded buffers
+ * free unneeded buffers
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_rx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_rx_head *new_rx_ring;
+ struct sk_buff **new_skb_list;
+ int new, overlap;
+
+ new_rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ (1 << size),
+ &new_ring_dma_addr);
+ if (new_rx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return;
+ }
+ memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+
+ new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_rx_ring;
+ }
+
+ new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_lists;
+ }
+
+ /* first copy the current receive buffers */
+ overlap = min(size, lp->rx_ring_size);
+ for (new = 0; new < overlap; new++) {
+ new_rx_ring[new] = lp->rx_ring[new];
+ new_dma_addr_list[new] = lp->rx_dma_addr[new];
+ new_skb_list[new] = lp->rx_skbuff[new];
+ }
+ /* now allocate any new buffers needed */
+ for (; new < size; new++) {
+ struct sk_buff *rx_skbuff;
+ new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
+ rx_skbuff = new_skb_list[new];
+ if (!rx_skbuff) {
+ /* keep the original lists and buffers */
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
+ goto free_all_new;
+ }
+ skb_reserve(rx_skbuff, NET_IP_ALIGN);
+
+ new_dma_addr_list[new] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
+ new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ new_rx_ring[new].status = cpu_to_le16(0x8000);
+ }
+ /* and free any unneeded buffers */
+ for (; new < lp->rx_ring_size; new++) {
+ if (lp->rx_skbuff[new]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[new]);
+ }
+ }
+
+ kfree(lp->rx_skbuff);
+ kfree(lp->rx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+
+ lp->rx_ring_size = (1 << size);
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (size << 4);
+ lp->rx_ring = new_rx_ring;
+ lp->rx_ring_dma_addr = new_ring_dma_addr;
+ lp->rx_dma_addr = new_dma_addr_list;
+ lp->rx_skbuff = new_skb_list;
+ return;
+
+free_all_new:
+ while (--new >= lp->rx_ring_size) {
+ if (new_skb_list[new]) {
+ pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(new_skb_list[new]);
+ }
+ }
+ kfree(new_skb_list);
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_rx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ (1 << size),
+ new_rx_ring,
+ new_ring_dma_addr);
+}
+
+static void pcnet32_purge_rx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ /* free all allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void pcnet32_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ pcnet32_interrupt(0, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ mii_ethtool_gset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ r = 0;
+ }
+ return r;
+}
+
+static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_ethtool_sset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (lp->pci_dev)
+ strcpy(info->bus_info, pci_name(lp->pci_dev));
+ else
+ sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+}
+
+static u32 pcnet32_get_link(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_link_ok(&lp->mii_if);
+ } else if (lp->chip_version >= PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ } else { /* can not detect link on really old chips */
+ r = 1;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return r;
+}
+
+static u32 pcnet32_get_msglevel(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ lp->msg_enable = value;
+}
+
+static int pcnet32_nway_reset(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_nway_restart(&lp->mii_if);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ ering->tx_max_pending = TX_MAX_RING_SIZE;
+ ering->tx_pending = lp->tx_ring_size;
+ ering->rx_max_pending = RX_MAX_RING_SIZE;
+ ering->rx_pending = lp->rx_ring_size;
+}
+
+static int pcnet32_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int size;
+ ulong ioaddr = dev->base_addr;
+ int i;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
+
+ /* set the minimum ring size to 4, to allow the loopback test to work
+ * unchanged.
+ */
+ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->tx_ring_size)
+ pcnet32_realloc_tx_ring(dev, lp, i);
+
+ size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
+ for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->rx_ring_size)
+ pcnet32_realloc_rx_ring(dev, lp, i);
+
+ lp->napi.weight = lp->rx_ring_size / 2;
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
+ lp->rx_ring_size, lp->tx_ring_size);
+
+ return 0;
+}
+
+static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
+}
+
+static int pcnet32_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return PCNET32_TEST_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *test, u64 * data)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int rc;
+
+ if (test->flags == ETH_TEST_FL_OFFLINE) {
+ rc = pcnet32_loopback_test(dev, data);
+ if (rc) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test failed\n");
+ test->flags |= ETH_TEST_FL_FAILED;
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test passed\n");
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "No tests to run (specify 'Offline' on ethtool)\n");
+} /* end pcnet32_ethtool_test */
+
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct pcnet32_access *a = &lp->a; /* access to registers */
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ struct sk_buff *skb; /* sk buff */
+ int x, i; /* counters */
+ int numbuffs = 4; /* number of TX/RX buffers and descs */
+ u16 status = 0x8300; /* TX ring status */
+ __le16 teststatus; /* test of ring status */
+ int rc; /* return code */
+ int size; /* size of packets */
+ unsigned char *packet; /* source packet data */
+ static const int data_len = 60; /* length of source packets */
+ unsigned long flags;
+ unsigned long ticks;
+
+ rc = 1; /* default to fail */
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
+
+ /* Reset the PCNET32 */
+ lp->a.reset(ioaddr);
+ lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr(ioaddr, 20, 2);
+
+ /* purge & init rings but don't actually restart */
+ pcnet32_restart(dev, 0x0000);
+
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+
+ /* Initialize Transmit buffers. */
+ size = data_len + 15;
+ for (x = 0; x < numbuffs; x++) {
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Cannot allocate skb at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = cpu_to_le16(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = cpu_to_le16(status);
+ }
+
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
+ a->write_bcr(ioaddr, 32, x | 0x0002);
+
+ /* set int loopback in CSR15 */
+ x = a->read_csr(ioaddr, CSR15) & 0xfffc;
+ lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
+
+ teststatus = cpu_to_le16(0x8000);
+ lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
+
+ /* Check status of descriptors */
+ for (x = 0; x < numbuffs; x++) {
+ ticks = 0;
+ rmb();
+ while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ rmb();
+ ticks++;
+ }
+ if (ticks == 200) {
+ netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
+ break;
+ }
+ }
+
+ lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+ wmb();
+ if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
+ netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
+
+ for (x = 0; x < numbuffs; x++) {
+ netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
+ skb = lp->rx_skbuff[x];
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", *(skb->data + i));
+ pr_cont("\n");
+ }
+ }
+
+ x = 0;
+ rc = 0;
+ while (x < numbuffs && !rc) {
+ skb = lp->rx_skbuff[x];
+ packet = lp->tx_skbuff[x]->data;
+ for (i = 0; i < size; i++) {
+ if (*(skb->data + i) != packet[i]) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error in compare! %2x - %02x %02x\n",
+ i, *(skb->data + i), packet[i]);
+ rc = 1;
+ break;
+ }
+ }
+ x++;
+ }
+
+clean_up:
+ *data1 = rc;
+ pcnet32_purge_tx_ring(dev);
+
+ x = a->read_csr(ioaddr, CSR15);
+ a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
+
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ a->write_bcr(ioaddr, 32, (x & ~0x0002));
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ } else {
+ pcnet32_purge_rx_ring(dev);
+ lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+} /* end pcnet32_loopback_test */
+
+static int pcnet32_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 2; /* cycle on/off twice per second */
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ /* Blink the led */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * lp->lock must be held.
+ */
+static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
+ int can_sleep)
+{
+ int csr5;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+
+ /* really old chips have to be stopped. */
+ if (lp->chip_version < PCNET32_79C970A)
+ return 0;
+
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
+ spin_unlock_irqrestore(&lp->lock, *flags);
+ if (can_sleep)
+ msleep(1);
+ else
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, *flags);
+ ticks++;
+ if (ticks > 200) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
+ return 0;