blob: 16803251a999a3cf4c9baad27182ecc1d18297e0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040019 [link no longer provides useful info -jgarzik]
Philippe De Muytere714d992006-08-03 18:42:15 +020020 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023*/
24
25#define DRV_NAME "sundance"
Andy Gospodarekd5b20692006-09-11 17:39:18 -040026#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
Arjan van de Venf71e1302006-03-03 21:33:57 -050035static const int multicast_filter_limit = 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/interrupt.h>
88#include <linux/pci.h>
89#include <linux/netdevice.h>
90#include <linux/etherdevice.h>
91#include <linux/skbuff.h>
92#include <linux/init.h>
93#include <linux/bitops.h>
94#include <asm/uaccess.h>
95#include <asm/processor.h> /* Processor type for cache alignment. */
96#include <asm/io.h>
97#include <linux/delay.h>
98#include <linux/spinlock.h>
99#ifndef _COMPAT_WITH_OLD_KERNEL
100#include <linux/crc32.h>
101#include <linux/ethtool.h>
102#include <linux/mii.h>
103#else
104#include "crc32.h"
105#include "ethtool.h"
106#include "mii.h"
107#include "compat.h"
108#endif
109
110/* These identify the driver base version and may not be removed. */
Stephen Hemminger3af0fe32009-02-26 10:19:33 +0000111static const char version[] __devinitconst =
112 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
113 " Written by Donald Becker\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117MODULE_LICENSE("GPL");
118
119module_param(debug, int, 0);
120module_param(rx_copybreak, int, 0);
121module_param_array(media, charp, NULL, 0);
122module_param(flowctrl, int, 0);
123MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
126
127/*
128 Theory of Operation
129
130I. Board Compatibility
131
132This driver is designed for the Sundance Technologies "Alta" ST201 chip.
133
134II. Board-specific settings
135
136III. Driver operation
137
138IIIa. Ring buffers
139
140This driver uses two statically allocated fixed-size descriptor lists
141formed into rings by a branch from the final descriptor to the beginning of
142the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143Some chips explicitly use only 2^N sized rings, while others use a
144'next descriptor' pointer that the driver forms into rings.
145
146IIIb/c. Transmit/Receive Structure
147
148This driver uses a zero-copy receive and transmit scheme.
149The driver allocates full frame size skbuffs for the Rx ring buffers at
150open() time and passes the skb->data field to the chip as receive data
151buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152a fresh skbuff is allocated and the frame is copied to the new skbuff.
153When the incoming frame is larger, the skbuff is passed directly up the
154protocol stack. Buffers consumed this way are replaced by newly allocated
155skbuffs in a later phase of receives.
156
157The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158using a full-sized skbuff for small frames vs. the copying costs of larger
159frames. New boards are typically used in generously configured machines
160and the underfilled buffers have negligible impact compared to the benefit of
161a single allocation size, so the default value of zero results in never
162copying packets. When copying is done, the cost is usually mitigated by using
163a combined copy/checksum routine. Copying also preloads the cache, which is
164most useful with small frames.
165
166A subtle aspect of the operation is that the IP header at offset 14 in an
167ethernet frame isn't longword aligned for further processing.
168Unaligned buffers are permitted by the Sundance hardware, so
169frames are received into the skbuff at an offset of "+2", 16-byte aligning
170the IP header.
171
172IIId. Synchronization
173
174The driver runs as two independent, single-threaded flows of control. One
175is the send-packet routine, which enforces single-threaded use by the
176dev->tbusy flag. The other thread is the interrupt handler, which is single
177threaded by the hardware and interrupt handling software.
178
179The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182the 'lp->tx_full' flag.
183
184The interrupt handler has exclusive control over the Rx ring and records stats
185from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187clears both the tx_full and tbusy flags.
188
189IV. Notes
190
191IVb. References
192
193The Sundance ST201 datasheet, preliminary version.
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200194The Kendin KS8723 datasheet, preliminary version.
195The ICplus IP100 datasheet, preliminary version.
196http://www.scyld.com/expert/100mbps.html
197http://www.scyld.com/expert/NWay.html
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199IVc. Errata
200
201*/
202
203/* Work-around for Kendin chip bugs. */
204#ifndef CONFIG_SUNDANCE_MMIO
205#define USE_IO_OPS 1
206#endif
207
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000208static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400209 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
216 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217};
218MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
219
220enum {
221 netdev_io_size = 128
222};
223
224struct pci_id_info {
225 const char *name;
226};
Jeff Garzik46009c82006-06-27 09:12:38 -0400227static const struct pci_id_info pci_id_tbl[] __devinitdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 {"D-Link DFE-550TX FAST Ethernet Adapter"},
229 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 {"D-Link DFE-580TX 4 port Server Adapter"},
231 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 {"D-Link DL10050-based FAST Ethernet Adapter"},
233 {"Sundance Technology Alta"},
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +0200234 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
Jeff Garzik46009c82006-06-27 09:12:38 -0400235 { } /* terminate list. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236};
237
238/* This driver was written to use PCI memory space, however x86-oriented
239 hardware often uses I/O space accesses. */
240
241/* Offsets to the device registers.
242 Unlike software-only systems, device drivers interact with complex hardware.
243 It's not useful to define symbolic names for every register bit in the
244 device. The name can only partially document the semantics and make
245 the driver longer and more difficult to read.
246 In general, only the important configuration values or bits changed
247 multiple times should be defined symbolically.
248*/
249enum alta_offsets {
250 DMACtrl = 0x00,
251 TxListPtr = 0x04,
252 TxDMABurstThresh = 0x08,
253 TxDMAUrgentThresh = 0x09,
254 TxDMAPollPeriod = 0x0a,
255 RxDMAStatus = 0x0c,
256 RxListPtr = 0x10,
257 DebugCtrl0 = 0x1a,
258 DebugCtrl1 = 0x1c,
259 RxDMABurstThresh = 0x14,
260 RxDMAUrgentThresh = 0x15,
261 RxDMAPollPeriod = 0x16,
262 LEDCtrl = 0x1a,
263 ASICCtrl = 0x30,
264 EEData = 0x34,
265 EECtrl = 0x36,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 FlashAddr = 0x40,
267 FlashData = 0x44,
268 TxStatus = 0x46,
269 TxFrameId = 0x47,
270 DownCounter = 0x18,
271 IntrClear = 0x4a,
272 IntrEnable = 0x4c,
273 IntrStatus = 0x4e,
274 MACCtrl0 = 0x50,
275 MACCtrl1 = 0x52,
276 StationAddr = 0x54,
277 MaxFrameSize = 0x5A,
278 RxMode = 0x5c,
279 MIICtrl = 0x5e,
280 MulticastFilter0 = 0x60,
281 MulticastFilter1 = 0x64,
282 RxOctetsLow = 0x68,
283 RxOctetsHigh = 0x6a,
284 TxOctetsLow = 0x6c,
285 TxOctetsHigh = 0x6e,
286 TxFramesOK = 0x70,
287 RxFramesOK = 0x72,
288 StatsCarrierError = 0x74,
289 StatsLateColl = 0x75,
290 StatsMultiColl = 0x76,
291 StatsOneColl = 0x77,
292 StatsTxDefer = 0x78,
293 RxMissed = 0x79,
294 StatsTxXSDefer = 0x7a,
295 StatsTxAbort = 0x7b,
296 StatsBcastTx = 0x7c,
297 StatsBcastRx = 0x7d,
298 StatsMcastTx = 0x7e,
299 StatsMcastRx = 0x7f,
300 /* Aliased and bogus values! */
301 RxStatus = 0x0c,
302};
303enum ASICCtrl_HiWord_bit {
304 GlobalReset = 0x0001,
305 RxReset = 0x0002,
306 TxReset = 0x0004,
307 DMAReset = 0x0008,
308 FIFOReset = 0x0010,
309 NetworkReset = 0x0020,
310 HostReset = 0x0040,
311 ResetBusy = 0x0400,
312};
313
314/* Bits in the interrupt status/mask registers. */
315enum intr_status_bits {
316 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
318 IntrDrvRqst=0x0040,
319 StatsMax=0x0080, LinkChange=0x0100,
320 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321};
322
323/* Bits in the RxMode register. */
324enum rx_mode_bits {
325 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
327};
328/* Bits in MACCtrl. */
329enum mac_ctrl0_bits {
330 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
332};
333enum mac_ctrl1_bits {
334 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
335 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337};
338
339/* The Rx and Tx buffer descriptors. */
340/* Note that using only 32 bit fields simplifies conversion to big-endian
341 architectures. */
342struct netdev_desc {
Al Viro14c9d9b2007-12-09 16:50:47 +0000343 __le32 next_desc;
344 __le32 status;
345 struct desc_frag { __le32 addr, length; } frag[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346};
347
348/* Bits in netdev_desc.status */
349enum desc_status_bits {
350 DescOwn=0x8000,
351 DescEndPacket=0x4000,
352 DescEndRing=0x2000,
353 LastFrag=0x80000000,
354 DescIntrOnTx=0x8000,
355 DescIntrOnDMADone=0x80000000,
356 DisableAlign = 0x00000001,
357};
358
359#define PRIV_ALIGN 15 /* Required alignment mask */
360/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
361 within the structure. */
362#define MII_CNT 4
363struct netdev_private {
364 /* Descriptor rings first for alignment. */
365 struct netdev_desc *rx_ring;
366 struct netdev_desc *tx_ring;
367 struct sk_buff* rx_skbuff[RX_RING_SIZE];
368 struct sk_buff* tx_skbuff[TX_RING_SIZE];
369 dma_addr_t tx_ring_dma;
370 dma_addr_t rx_ring_dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 struct timer_list timer; /* Media monitoring timer. */
372 /* Frequently used values: keep some adjacent for cache effect. */
373 spinlock_t lock;
374 spinlock_t rx_lock; /* Group with Tx control cache line. */
375 int msg_enable;
376 int chip_id;
377 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
378 unsigned int rx_buf_sz; /* Based on MTU+slack. */
379 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
380 unsigned int cur_tx, dirty_tx;
381 /* These values are keep track of the transceiver/media in use. */
382 unsigned int flowctrl:1;
383 unsigned int default_port:4; /* Last dev->if_port value. */
384 unsigned int an_enable:1;
385 unsigned int speed;
386 struct tasklet_struct rx_tasklet;
387 struct tasklet_struct tx_tasklet;
388 int budget;
389 int cur_task;
390 /* Multicast and receive mode. */
391 spinlock_t mcastlock; /* SMP lock multicast updates. */
392 u16 mcast_filter[4];
393 /* MII transceiver section. */
394 struct mii_if_info mii_if;
395 int mii_preamble_required;
396 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
397 struct pci_dev *pci_dev;
398 void __iomem *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399};
400
401/* The station address location in the EEPROM. */
402#define EEPROM_SA_OFFSET 0x10
403#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
404 IntrDrvRqst | IntrTxDone | StatsMax | \
405 LinkChange)
406
407static int change_mtu(struct net_device *dev, int new_mtu);
408static int eeprom_read(void __iomem *ioaddr, int location);
409static int mdio_read(struct net_device *dev, int phy_id, int location);
410static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
Dan Nicholson50500152008-08-20 16:51:59 -0700411static int mdio_wait_link(struct net_device *dev, int wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412static int netdev_open(struct net_device *dev);
413static void check_duplex(struct net_device *dev);
414static void netdev_timer(unsigned long data);
415static void tx_timeout(struct net_device *dev);
416static void init_ring(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000417static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418static int reset_tx (struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100419static irqreturn_t intr_handler(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420static void rx_poll(unsigned long data);
421static void tx_poll(unsigned long data);
422static void refill_rx (struct net_device *dev);
423static void netdev_error(struct net_device *dev, int intr_status);
424static void netdev_error(struct net_device *dev, int intr_status);
425static void set_rx_mode(struct net_device *dev);
426static int __set_mac_addr(struct net_device *dev);
427static struct net_device_stats *get_stats(struct net_device *dev);
428static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
429static int netdev_close(struct net_device *dev);
Jeff Garzik7282d492006-09-13 14:30:00 -0400430static const struct ethtool_ops ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200432static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
433{
434 struct netdev_private *np = netdev_priv(dev);
435 void __iomem *ioaddr = np->base + ASICCtrl;
436 int countdown;
437
438 /* ST201 documentation states ASICCtrl is a 32bit register */
439 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
440 /* ST201 documentation states reset can take up to 1 ms */
441 countdown = 10 + 1;
442 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
443 if (--countdown == 0) {
444 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
445 break;
446 }
447 udelay(100);
448 }
449}
450
Stephen Hemminger633a2772009-01-07 17:58:43 -0800451static const struct net_device_ops netdev_ops = {
452 .ndo_open = netdev_open,
453 .ndo_stop = netdev_close,
454 .ndo_start_xmit = start_tx,
455 .ndo_get_stats = get_stats,
456 .ndo_set_multicast_list = set_rx_mode,
457 .ndo_do_ioctl = netdev_ioctl,
458 .ndo_tx_timeout = tx_timeout,
459 .ndo_change_mtu = change_mtu,
460 .ndo_set_mac_address = eth_mac_addr,
461 .ndo_validate_addr = eth_validate_addr,
462};
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464static int __devinit sundance_probe1 (struct pci_dev *pdev,
465 const struct pci_device_id *ent)
466{
467 struct net_device *dev;
468 struct netdev_private *np;
469 static int card_idx;
470 int chip_idx = ent->driver_data;
471 int irq;
472 int i;
473 void __iomem *ioaddr;
474 u16 mii_ctl;
475 void *ring_space;
476 dma_addr_t ring_dma;
477#ifdef USE_IO_OPS
478 int bar = 0;
479#else
480 int bar = 1;
481#endif
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400482 int phy, phy_end, phy_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484/* when built into the kernel, we only print version if device is found */
485#ifndef MODULE
486 static int printed_version;
487 if (!printed_version++)
488 printk(version);
489#endif
490
491 if (pci_enable_device(pdev))
492 return -EIO;
493 pci_set_master(pdev);
494
495 irq = pdev->irq;
496
497 dev = alloc_etherdev(sizeof(*np));
498 if (!dev)
499 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 SET_NETDEV_DEV(dev, &pdev->dev);
501
502 if (pci_request_regions(pdev, DRV_NAME))
503 goto err_out_netdev;
504
505 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
506 if (!ioaddr)
507 goto err_out_res;
508
509 for (i = 0; i < 3; i++)
Al Viro14c9d9b2007-12-09 16:50:47 +0000510 ((__le16 *)dev->dev_addr)[i] =
511 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
John W. Linville30d60a82005-09-12 10:48:58 -0400512 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
514 dev->base_addr = (unsigned long)ioaddr;
515 dev->irq = irq;
516
517 np = netdev_priv(dev);
518 np->base = ioaddr;
519 np->pci_dev = pdev;
520 np->chip_id = chip_idx;
521 np->msg_enable = (1 << debug) - 1;
522 spin_lock_init(&np->lock);
523 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
524 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
525
526 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
527 if (!ring_space)
528 goto err_out_cleardev;
529 np->tx_ring = (struct netdev_desc *)ring_space;
530 np->tx_ring_dma = ring_dma;
531
532 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
533 if (!ring_space)
534 goto err_out_unmap_tx;
535 np->rx_ring = (struct netdev_desc *)ring_space;
536 np->rx_ring_dma = ring_dma;
537
538 np->mii_if.dev = dev;
539 np->mii_if.mdio_read = mdio_read;
540 np->mii_if.mdio_write = mdio_write;
541 np->mii_if.phy_id_mask = 0x1f;
542 np->mii_if.reg_num_mask = 0x1f;
543
544 /* The chip-specific entries in the device structure. */
Stephen Hemminger633a2772009-01-07 17:58:43 -0800545 dev->netdev_ops = &netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 SET_ETHTOOL_OPS(dev, &ethtool_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger633a2772009-01-07 17:58:43 -0800548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 pci_set_drvdata(pdev, dev);
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 i = register_netdev(dev);
552 if (i)
553 goto err_out_unmap_rx;
554
Johannes Berge1749612008-10-27 15:59:26 -0700555 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
Joe Perches0795af52007-10-03 17:59:30 -0700556 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
Johannes Berge1749612008-10-27 15:59:26 -0700557 dev->dev_addr, irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
John W. Linville67ec2f82005-10-18 21:31:01 -0400559 np->phys[0] = 1; /* Default setting */
560 np->mii_preamble_required++;
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400561
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500562 /*
563 * It seems some phys doesn't deal well with address 0 being accessed
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400564 * first
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500565 */
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400566 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
567 phy = 0;
568 phy_end = 31;
569 } else {
570 phy = 1;
571 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
572 }
573 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400574 int phyx = phy & 0x1f;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500575 int mii_status = mdio_read(dev, phyx, MII_BMSR);
John W. Linville67ec2f82005-10-18 21:31:01 -0400576 if (mii_status != 0xffff && mii_status != 0x0000) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400577 np->phys[phy_idx++] = phyx;
578 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
John W. Linville67ec2f82005-10-18 21:31:01 -0400579 if ((mii_status & 0x0040) == 0)
580 np->mii_preamble_required++;
581 printk(KERN_INFO "%s: MII PHY found at address %d, status "
582 "0x%4.4x advertising %4.4x.\n",
John W. Linvilleb06c0932005-10-19 08:07:34 -0400583 dev->name, phyx, mii_status, np->mii_if.advertising);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
John W. Linville67ec2f82005-10-18 21:31:01 -0400586 np->mii_preamble_required--;
587
588 if (phy_idx == 0) {
589 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
590 dev->name, ioread32(ioaddr + ASICCtrl));
591 goto err_out_unregister;
592 }
593
594 np->mii_if.phy_id = np->phys[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
596 /* Parse override configuration */
597 np->an_enable = 1;
598 if (card_idx < MAX_UNITS) {
599 if (media[card_idx] != NULL) {
600 np->an_enable = 0;
601 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
602 strcmp (media[card_idx], "4") == 0) {
603 np->speed = 100;
604 np->mii_if.full_duplex = 1;
Joe Perches8e95a202009-12-03 07:58:21 +0000605 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
606 strcmp (media[card_idx], "3") == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 np->speed = 100;
608 np->mii_if.full_duplex = 0;
609 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
610 strcmp (media[card_idx], "2") == 0) {
611 np->speed = 10;
612 np->mii_if.full_duplex = 1;
613 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
614 strcmp (media[card_idx], "1") == 0) {
615 np->speed = 10;
616 np->mii_if.full_duplex = 0;
617 } else {
618 np->an_enable = 1;
619 }
620 }
621 if (flowctrl == 1)
622 np->flowctrl = 1;
623 }
624
625 /* Fibre PHY? */
626 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
627 /* Default 100Mbps Full */
628 if (np->an_enable) {
629 np->speed = 100;
630 np->mii_if.full_duplex = 1;
631 np->an_enable = 0;
632 }
633 }
634 /* Reset PHY */
635 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
636 mdelay (300);
637 /* If flow control enabled, we need to advertise it.*/
638 if (np->flowctrl)
639 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
640 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
641 /* Force media type */
642 if (!np->an_enable) {
643 mii_ctl = 0;
644 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
645 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
646 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
647 printk (KERN_INFO "Override speed=%d, %s duplex\n",
648 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
649
650 }
651
652 /* Perhaps move the reset here? */
653 /* Reset the chip to erase previous misconfiguration. */
654 if (netif_msg_hw(np))
655 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
Philippe De Muytere714d992006-08-03 18:42:15 +0200656 sundance_reset(dev, 0x00ff << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 if (netif_msg_hw(np))
658 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
659
660 card_idx++;
661 return 0;
662
663err_out_unregister:
664 unregister_netdev(dev);
665err_out_unmap_rx:
666 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
667err_out_unmap_tx:
668 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
669err_out_cleardev:
670 pci_set_drvdata(pdev, NULL);
671 pci_iounmap(pdev, ioaddr);
672err_out_res:
673 pci_release_regions(pdev);
674err_out_netdev:
675 free_netdev (dev);
676 return -ENODEV;
677}
678
679static int change_mtu(struct net_device *dev, int new_mtu)
680{
681 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
682 return -EINVAL;
683 if (netif_running(dev))
684 return -EBUSY;
685 dev->mtu = new_mtu;
686 return 0;
687}
688
689#define eeprom_delay(ee_addr) ioread32(ee_addr)
690/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
691static int __devinit eeprom_read(void __iomem *ioaddr, int location)
692{
693 int boguscnt = 10000; /* Typical 1900 ticks. */
694 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
695 do {
696 eeprom_delay(ioaddr + EECtrl);
697 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
698 return ioread16(ioaddr + EEData);
699 }
700 } while (--boguscnt > 0);
701 return 0;
702}
703
704/* MII transceiver control section.
705 Read and write the MII registers using software-generated serial
706 MDIO protocol. See the MII specifications or DP83840A data sheet
707 for details.
708
709 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
710 met by back-to-back 33Mhz PCI cycles. */
711#define mdio_delay() ioread8(mdio_addr)
712
713enum mii_reg_bits {
714 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
715};
716#define MDIO_EnbIn (0)
717#define MDIO_WRITE0 (MDIO_EnbOutput)
718#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
719
720/* Generate the preamble required for initial synchronization and
721 a few older transceivers. */
722static void mdio_sync(void __iomem *mdio_addr)
723{
724 int bits = 32;
725
726 /* Establish sync by sending at least 32 logic ones. */
727 while (--bits >= 0) {
728 iowrite8(MDIO_WRITE1, mdio_addr);
729 mdio_delay();
730 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
731 mdio_delay();
732 }
733}
734
735static int mdio_read(struct net_device *dev, int phy_id, int location)
736{
737 struct netdev_private *np = netdev_priv(dev);
738 void __iomem *mdio_addr = np->base + MIICtrl;
739 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
740 int i, retval = 0;
741
742 if (np->mii_preamble_required)
743 mdio_sync(mdio_addr);
744
745 /* Shift the read command bits out. */
746 for (i = 15; i >= 0; i--) {
747 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
748
749 iowrite8(dataval, mdio_addr);
750 mdio_delay();
751 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
752 mdio_delay();
753 }
754 /* Read the two transition, 16 data, and wire-idle bits. */
755 for (i = 19; i > 0; i--) {
756 iowrite8(MDIO_EnbIn, mdio_addr);
757 mdio_delay();
758 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
759 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
760 mdio_delay();
761 }
762 return (retval>>1) & 0xffff;
763}
764
765static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
766{
767 struct netdev_private *np = netdev_priv(dev);
768 void __iomem *mdio_addr = np->base + MIICtrl;
769 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
770 int i;
771
772 if (np->mii_preamble_required)
773 mdio_sync(mdio_addr);
774
775 /* Shift the command bits out. */
776 for (i = 31; i >= 0; i--) {
777 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
778
779 iowrite8(dataval, mdio_addr);
780 mdio_delay();
781 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
782 mdio_delay();
783 }
784 /* Clear out extra bits. */
785 for (i = 2; i > 0; i--) {
786 iowrite8(MDIO_EnbIn, mdio_addr);
787 mdio_delay();
788 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
789 mdio_delay();
790 }
791 return;
792}
793
Dan Nicholson50500152008-08-20 16:51:59 -0700794static int mdio_wait_link(struct net_device *dev, int wait)
795{
796 int bmsr;
797 int phy_id;
798 struct netdev_private *np;
799
800 np = netdev_priv(dev);
801 phy_id = np->phys[0];
802
803 do {
804 bmsr = mdio_read(dev, phy_id, MII_BMSR);
805 if (bmsr & 0x0004)
806 return 0;
807 mdelay(1);
808 } while (--wait > 0);
809 return -1;
810}
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812static int netdev_open(struct net_device *dev)
813{
814 struct netdev_private *np = netdev_priv(dev);
815 void __iomem *ioaddr = np->base;
Jesse Huangacd70c22006-10-20 14:42:13 -0700816 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 int i;
818
819 /* Do we need to reset the chip??? */
820
Joe Perchesa0607fd2009-11-18 23:29:17 -0800821 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 if (i)
823 return i;
824
825 if (netif_msg_ifup(np))
826 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
827 dev->name, dev->irq);
828 init_ring(dev);
829
830 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
831 /* The Tx list pointer is written as packets are queued. */
832
833 /* Initialize other registers. */
834 __set_mac_addr(dev);
835#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
836 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
837#else
838 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
839#endif
840 if (dev->mtu > 2047)
841 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
842
843 /* Configure the PCI bus bursts and FIFO thresholds. */
844
845 if (dev->if_port == 0)
846 dev->if_port = np->default_port;
847
848 spin_lock_init(&np->mcastlock);
849
850 set_rx_mode(dev);
851 iowrite16(0, ioaddr + IntrEnable);
852 iowrite16(0, ioaddr + DownCounter);
853 /* Set the chip to poll every N*320nsec. */
854 iowrite8(100, ioaddr + RxDMAPollPeriod);
855 iowrite8(127, ioaddr + TxDMAPollPeriod);
856 /* Fix DFE-580TX packet drop issue */
Auke Kok44c10132007-06-08 15:46:36 -0700857 if (np->pci_dev->revision >= 0x14)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 iowrite8(0x01, ioaddr + DebugCtrl1);
859 netif_start_queue(dev);
860
Jesse Huangacd70c22006-10-20 14:42:13 -0700861 spin_lock_irqsave(&np->lock, flags);
862 reset_tx(dev);
863 spin_unlock_irqrestore(&np->lock, flags);
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
866
867 if (netif_msg_ifup(np))
868 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
869 "MAC Control %x, %4.4x %4.4x.\n",
870 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
871 ioread32(ioaddr + MACCtrl0),
872 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
873
874 /* Set the timer to check for link beat. */
875 init_timer(&np->timer);
876 np->timer.expires = jiffies + 3*HZ;
877 np->timer.data = (unsigned long)dev;
878 np->timer.function = &netdev_timer; /* timer handler */
879 add_timer(&np->timer);
880
881 /* Enable interrupts by setting the interrupt mask. */
882 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
883
884 return 0;
885}
886
887static void check_duplex(struct net_device *dev)
888{
889 struct netdev_private *np = netdev_priv(dev);
890 void __iomem *ioaddr = np->base;
891 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
892 int negotiated = mii_lpa & np->mii_if.advertising;
893 int duplex;
894
895 /* Force media */
896 if (!np->an_enable || mii_lpa == 0xffff) {
897 if (np->mii_if.full_duplex)
898 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
899 ioaddr + MACCtrl0);
900 return;
901 }
902
903 /* Autonegotiation */
904 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
905 if (np->mii_if.full_duplex != duplex) {
906 np->mii_if.full_duplex = duplex;
907 if (netif_msg_link(np))
908 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
909 "negotiated capability %4.4x.\n", dev->name,
910 duplex ? "full" : "half", np->phys[0], negotiated);
Roel Kluin62660e22009-02-18 10:19:50 +0100911 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 }
913}
914
915static void netdev_timer(unsigned long data)
916{
917 struct net_device *dev = (struct net_device *)data;
918 struct netdev_private *np = netdev_priv(dev);
919 void __iomem *ioaddr = np->base;
920 int next_tick = 10*HZ;
921
922 if (netif_msg_timer(np)) {
923 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
924 "Tx %x Rx %x.\n",
925 dev->name, ioread16(ioaddr + IntrEnable),
926 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
927 }
928 check_duplex(dev);
929 np->timer.expires = jiffies + next_tick;
930 add_timer(&np->timer);
931}
932
933static void tx_timeout(struct net_device *dev)
934{
935 struct netdev_private *np = netdev_priv(dev);
936 void __iomem *ioaddr = np->base;
937 unsigned long flag;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 netif_stop_queue(dev);
940 tasklet_disable(&np->tx_tasklet);
941 iowrite16(0, ioaddr + IntrEnable);
942 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
943 "TxFrameId %2.2x,"
944 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
945 ioread8(ioaddr + TxFrameId));
946
947 {
948 int i;
949 for (i=0; i<TX_RING_SIZE; i++) {
950 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
951 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
952 le32_to_cpu(np->tx_ring[i].next_desc),
953 le32_to_cpu(np->tx_ring[i].status),
954 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400955 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 le32_to_cpu(np->tx_ring[i].frag[0].length));
957 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400958 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
959 ioread32(np->base + TxListPtr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 netif_queue_stopped(dev));
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400961 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 np->cur_tx, np->cur_tx % TX_RING_SIZE,
963 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
964 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
965 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
966 }
967 spin_lock_irqsave(&np->lock, flag);
968
969 /* Stop and restart the chip's Tx processes . */
970 reset_tx(dev);
971 spin_unlock_irqrestore(&np->lock, flag);
972
973 dev->if_port = 0;
974
Eric Dumazet1ae5dc32010-05-10 05:01:31 -0700975 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +0000976 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
978 netif_wake_queue(dev);
979 }
980 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
981 tasklet_enable(&np->tx_tasklet);
982}
983
984
985/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
986static void init_ring(struct net_device *dev)
987{
988 struct netdev_private *np = netdev_priv(dev);
989 int i;
990
991 np->cur_rx = np->cur_tx = 0;
992 np->dirty_rx = np->dirty_tx = 0;
993 np->cur_task = 0;
994
995 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
996
997 /* Initialize all Rx descriptors. */
998 for (i = 0; i < RX_RING_SIZE; i++) {
999 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1000 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1001 np->rx_ring[i].status = 0;
1002 np->rx_ring[i].frag[0].length = 0;
1003 np->rx_skbuff[i] = NULL;
1004 }
1005
1006 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1007 for (i = 0; i < RX_RING_SIZE; i++) {
1008 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1009 np->rx_skbuff[i] = skb;
1010 if (skb == NULL)
1011 break;
1012 skb->dev = dev; /* Mark as being used by this device. */
1013 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1014 np->rx_ring[i].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001015 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 PCI_DMA_FROMDEVICE));
1017 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1018 }
1019 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1020
1021 for (i = 0; i < TX_RING_SIZE; i++) {
1022 np->tx_skbuff[i] = NULL;
1023 np->tx_ring[i].status = 0;
1024 }
1025 return;
1026}
1027
1028static void tx_poll (unsigned long data)
1029{
1030 struct net_device *dev = (struct net_device *)data;
1031 struct netdev_private *np = netdev_priv(dev);
1032 unsigned head = np->cur_task % TX_RING_SIZE;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001033 struct netdev_desc *txdesc =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 /* Chain the next pointer */
1037 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1038 int entry = np->cur_task % TX_RING_SIZE;
1039 txdesc = &np->tx_ring[entry];
1040 if (np->last_tx) {
1041 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1042 entry*sizeof(struct netdev_desc));
1043 }
1044 np->last_tx = txdesc;
1045 }
1046 /* Indicate the latest descriptor of tx ring */
1047 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1048
1049 if (ioread32 (np->base + TxListPtr) == 0)
1050 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1051 np->base + TxListPtr);
1052 return;
1053}
1054
Stephen Hemminger613573252009-08-31 19:50:58 +00001055static netdev_tx_t
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056start_tx (struct sk_buff *skb, struct net_device *dev)
1057{
1058 struct netdev_private *np = netdev_priv(dev);
1059 struct netdev_desc *txdesc;
1060 unsigned entry;
1061
1062 /* Calculate the next Tx descriptor entry. */
1063 entry = np->cur_tx % TX_RING_SIZE;
1064 np->tx_skbuff[entry] = skb;
1065 txdesc = &np->tx_ring[entry];
1066
1067 txdesc->next_desc = 0;
1068 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1069 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1070 skb->len,
1071 PCI_DMA_TODEVICE));
1072 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1073
1074 /* Increment cur_tx before tasklet_schedule() */
1075 np->cur_tx++;
1076 mb();
1077 /* Schedule a tx_poll() task */
1078 tasklet_schedule(&np->tx_tasklet);
1079
1080 /* On some architectures: explicitly flush cache lines here. */
Joe Perches8e95a202009-12-03 07:58:21 +00001081 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1082 !netif_queue_stopped(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 /* do nothing */
1084 } else {
1085 netif_stop_queue (dev);
1086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 if (netif_msg_tx_queued(np)) {
1088 printk (KERN_DEBUG
1089 "%s: Transmit frame #%d queued in slot %d.\n",
1090 dev->name, np->cur_tx, entry);
1091 }
Patrick McHardy6ed10652009-06-23 06:03:08 +00001092 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093}
1094
1095/* Reset hardware tx and free all of tx buffers */
1096static int
1097reset_tx (struct net_device *dev)
1098{
1099 struct netdev_private *np = netdev_priv(dev);
1100 void __iomem *ioaddr = np->base;
1101 struct sk_buff *skb;
1102 int i;
1103 int irq = in_interrupt();
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 /* Reset tx logic, TxListPtr will be cleaned */
1106 iowrite16 (TxDisable, ioaddr + MACCtrl1);
Philippe De Muytere714d992006-08-03 18:42:15 +02001107 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 /* free all tx skbuff */
1110 for (i = 0; i < TX_RING_SIZE; i++) {
Jesse Huang2109f892006-10-20 14:42:11 -07001111 np->tx_ring[i].next_desc = 0;
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 skb = np->tx_skbuff[i];
1114 if (skb) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001115 pci_unmap_single(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001116 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1117 skb->len, PCI_DMA_TODEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 if (irq)
1119 dev_kfree_skb_irq (skb);
1120 else
1121 dev_kfree_skb (skb);
1122 np->tx_skbuff[i] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001123 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
1125 }
1126 np->cur_tx = np->dirty_tx = 0;
1127 np->cur_task = 0;
Jesse Huang2109f892006-10-20 14:42:11 -07001128
Randy Dunlapbca79eb2006-11-29 13:15:17 -08001129 np->last_tx = NULL;
Jesse Huang2109f892006-10-20 14:42:11 -07001130 iowrite8(127, ioaddr + TxDMAPollPeriod);
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1133 return 0;
1134}
1135
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001136/* The interrupt handler cleans up after the Tx thread,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 and schedule a Rx thread work */
David Howells7d12e782006-10-05 14:55:46 +01001138static irqreturn_t intr_handler(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139{
1140 struct net_device *dev = (struct net_device *)dev_instance;
1141 struct netdev_private *np = netdev_priv(dev);
1142 void __iomem *ioaddr = np->base;
1143 int hw_frame_id;
1144 int tx_cnt;
1145 int tx_status;
1146 int handled = 0;
Jesse Huange2420402006-10-20 14:42:05 -07001147 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
1149
1150 do {
1151 int intr_status = ioread16(ioaddr + IntrStatus);
1152 iowrite16(intr_status, ioaddr + IntrStatus);
1153
1154 if (netif_msg_intr(np))
1155 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1156 dev->name, intr_status);
1157
1158 if (!(intr_status & DEFAULT_INTR))
1159 break;
1160
1161 handled = 1;
1162
1163 if (intr_status & (IntrRxDMADone)) {
1164 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1165 ioaddr + IntrEnable);
1166 if (np->budget < 0)
1167 np->budget = RX_BUDGET;
1168 tasklet_schedule(&np->rx_tasklet);
1169 }
1170 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1171 tx_status = ioread16 (ioaddr + TxStatus);
1172 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1173 if (netif_msg_tx_done(np))
1174 printk
1175 ("%s: Transmit status is %2.2x.\n",
1176 dev->name, tx_status);
1177 if (tx_status & 0x1e) {
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001178 if (netif_msg_tx_err(np))
1179 printk("%s: Transmit error status %4.4x.\n",
1180 dev->name, tx_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001181 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 if (tx_status & 0x10)
Eric Dumazet553e2332009-05-27 10:34:50 +00001183 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 if (tx_status & 0x08)
Eric Dumazet553e2332009-05-27 10:34:50 +00001185 dev->stats.collisions++;
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001186 if (tx_status & 0x04)
Eric Dumazet553e2332009-05-27 10:34:50 +00001187 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 if (tx_status & 0x02)
Eric Dumazet553e2332009-05-27 10:34:50 +00001189 dev->stats.tx_window_errors++;
Jesse Huange2420402006-10-20 14:42:05 -07001190
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001191 /*
1192 ** This reset has been verified on
1193 ** DFE-580TX boards ! phdm@macqel.be.
1194 */
1195 if (tx_status & 0x10) { /* TxUnderrun */
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001196 /* Restart Tx FIFO and transmitter */
1197 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001198 /* No need to reset the Tx pointer here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 }
Jesse Huang2109f892006-10-20 14:42:11 -07001200 /* Restart the Tx. Need to make sure tx enabled */
1201 i = 10;
1202 do {
1203 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1204 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1205 break;
1206 mdelay(1);
1207 } while (--i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
1209 /* Yup, this is a documentation bug. It cost me *hours*. */
1210 iowrite16 (0, ioaddr + TxStatus);
1211 if (tx_cnt < 0) {
1212 iowrite32(5000, ioaddr + DownCounter);
1213 break;
1214 }
1215 tx_status = ioread16 (ioaddr + TxStatus);
1216 }
1217 hw_frame_id = (tx_status >> 8) & 0xff;
1218 } else {
1219 hw_frame_id = ioread8(ioaddr + TxFrameId);
1220 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001221
Auke Kok44c10132007-06-08 15:46:36 -07001222 if (np->pci_dev->revision >= 0x14) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 spin_lock(&np->lock);
1224 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1225 int entry = np->dirty_tx % TX_RING_SIZE;
1226 struct sk_buff *skb;
1227 int sw_frame_id;
1228 sw_frame_id = (le32_to_cpu(
1229 np->tx_ring[entry].status) >> 2) & 0xff;
1230 if (sw_frame_id == hw_frame_id &&
1231 !(le32_to_cpu(np->tx_ring[entry].status)
1232 & 0x00010000))
1233 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001234 if (sw_frame_id == (hw_frame_id + 1) %
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 TX_RING_SIZE)
1236 break;
1237 skb = np->tx_skbuff[entry];
1238 /* Free the original skb. */
1239 pci_unmap_single(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001240 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 skb->len, PCI_DMA_TODEVICE);
1242 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1243 np->tx_skbuff[entry] = NULL;
1244 np->tx_ring[entry].frag[0].addr = 0;
1245 np->tx_ring[entry].frag[0].length = 0;
1246 }
1247 spin_unlock(&np->lock);
1248 } else {
1249 spin_lock(&np->lock);
1250 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1251 int entry = np->dirty_tx % TX_RING_SIZE;
1252 struct sk_buff *skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001253 if (!(le32_to_cpu(np->tx_ring[entry].status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 & 0x00010000))
1255 break;
1256 skb = np->tx_skbuff[entry];
1257 /* Free the original skb. */
1258 pci_unmap_single(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001259 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 skb->len, PCI_DMA_TODEVICE);
1261 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1262 np->tx_skbuff[entry] = NULL;
1263 np->tx_ring[entry].frag[0].addr = 0;
1264 np->tx_ring[entry].frag[0].length = 0;
1265 }
1266 spin_unlock(&np->lock);
1267 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 if (netif_queue_stopped(dev) &&
1270 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1271 /* The ring is no longer full, clear busy flag. */
1272 netif_wake_queue (dev);
1273 }
1274 /* Abnormal error summary/uncommon events handlers. */
1275 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1276 netdev_error(dev, intr_status);
1277 } while (0);
1278 if (netif_msg_intr(np))
1279 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1280 dev->name, ioread16(ioaddr + IntrStatus));
1281 return IRQ_RETVAL(handled);
1282}
1283
1284static void rx_poll(unsigned long data)
1285{
1286 struct net_device *dev = (struct net_device *)data;
1287 struct netdev_private *np = netdev_priv(dev);
1288 int entry = np->cur_rx % RX_RING_SIZE;
1289 int boguscnt = np->budget;
1290 void __iomem *ioaddr = np->base;
1291 int received = 0;
1292
1293 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1294 while (1) {
1295 struct netdev_desc *desc = &(np->rx_ring[entry]);
1296 u32 frame_status = le32_to_cpu(desc->status);
1297 int pkt_len;
1298
1299 if (--boguscnt < 0) {
1300 goto not_done;
1301 }
1302 if (!(frame_status & DescOwn))
1303 break;
1304 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1305 if (netif_msg_rx_status(np))
1306 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1307 frame_status);
1308 if (frame_status & 0x001f4000) {
1309 /* There was a error. */
1310 if (netif_msg_rx_err(np))
1311 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1312 frame_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001313 dev->stats.rx_errors++;
1314 if (frame_status & 0x00100000)
1315 dev->stats.rx_length_errors++;
1316 if (frame_status & 0x00010000)
1317 dev->stats.rx_fifo_errors++;
1318 if (frame_status & 0x00060000)
1319 dev->stats.rx_frame_errors++;
1320 if (frame_status & 0x00080000)
1321 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 if (frame_status & 0x00100000) {
1323 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1324 " status %8.8x.\n",
1325 dev->name, frame_status);
1326 }
1327 } else {
1328 struct sk_buff *skb;
1329#ifndef final_version
1330 if (netif_msg_rx_status(np))
1331 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1332 ", bogus_cnt %d.\n",
1333 pkt_len, boguscnt);
1334#endif
1335 /* Check if the packet is long enough to accept without copying
1336 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +00001337 if (pkt_len < rx_copybreak &&
1338 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 skb_reserve(skb, 2); /* 16 byte align the IP header */
1340 pci_dma_sync_single_for_cpu(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001341 le32_to_cpu(desc->frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 np->rx_buf_sz,
1343 PCI_DMA_FROMDEVICE);
1344
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001345 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 pci_dma_sync_single_for_device(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001347 le32_to_cpu(desc->frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 np->rx_buf_sz,
1349 PCI_DMA_FROMDEVICE);
1350 skb_put(skb, pkt_len);
1351 } else {
1352 pci_unmap_single(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001353 le32_to_cpu(desc->frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 np->rx_buf_sz,
1355 PCI_DMA_FROMDEVICE);
1356 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1357 np->rx_skbuff[entry] = NULL;
1358 }
1359 skb->protocol = eth_type_trans(skb, dev);
1360 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1361 netif_rx(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 }
1363 entry = (entry + 1) % RX_RING_SIZE;
1364 received++;
1365 }
1366 np->cur_rx = entry;
1367 refill_rx (dev);
1368 np->budget -= received;
1369 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1370 return;
1371
1372not_done:
1373 np->cur_rx = entry;
1374 refill_rx (dev);
1375 if (!received)
1376 received = 1;
1377 np->budget -= received;
1378 if (np->budget <= 0)
1379 np->budget = RX_BUDGET;
1380 tasklet_schedule(&np->rx_tasklet);
1381 return;
1382}
1383
1384static void refill_rx (struct net_device *dev)
1385{
1386 struct netdev_private *np = netdev_priv(dev);
1387 int entry;
1388 int cnt = 0;
1389
1390 /* Refill the Rx ring buffers. */
1391 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1392 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1393 struct sk_buff *skb;
1394 entry = np->dirty_rx % RX_RING_SIZE;
1395 if (np->rx_skbuff[entry] == NULL) {
1396 skb = dev_alloc_skb(np->rx_buf_sz);
1397 np->rx_skbuff[entry] = skb;
1398 if (skb == NULL)
1399 break; /* Better luck next round. */
1400 skb->dev = dev; /* Mark as being used by this device. */
1401 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1402 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001403 pci_map_single(np->pci_dev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1405 }
1406 /* Perhaps we need not reset this field. */
1407 np->rx_ring[entry].frag[0].length =
1408 cpu_to_le32(np->rx_buf_sz | LastFrag);
1409 np->rx_ring[entry].status = 0;
1410 cnt++;
1411 }
1412 return;
1413}
1414static void netdev_error(struct net_device *dev, int intr_status)
1415{
1416 struct netdev_private *np = netdev_priv(dev);
1417 void __iomem *ioaddr = np->base;
1418 u16 mii_ctl, mii_advertise, mii_lpa;
1419 int speed;
1420
1421 if (intr_status & LinkChange) {
Dan Nicholson50500152008-08-20 16:51:59 -07001422 if (mdio_wait_link(dev, 10) == 0) {
1423 printk(KERN_INFO "%s: Link up\n", dev->name);
1424 if (np->an_enable) {
1425 mii_advertise = mdio_read(dev, np->phys[0],
1426 MII_ADVERTISE);
1427 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1428 mii_advertise &= mii_lpa;
1429 printk(KERN_INFO "%s: Link changed: ",
1430 dev->name);
1431 if (mii_advertise & ADVERTISE_100FULL) {
1432 np->speed = 100;
1433 printk("100Mbps, full duplex\n");
1434 } else if (mii_advertise & ADVERTISE_100HALF) {
1435 np->speed = 100;
1436 printk("100Mbps, half duplex\n");
1437 } else if (mii_advertise & ADVERTISE_10FULL) {
1438 np->speed = 10;
1439 printk("10Mbps, full duplex\n");
1440 } else if (mii_advertise & ADVERTISE_10HALF) {
1441 np->speed = 10;
1442 printk("10Mbps, half duplex\n");
1443 } else
1444 printk("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
Dan Nicholson50500152008-08-20 16:51:59 -07001446 } else {
1447 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1448 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1449 np->speed = speed;
1450 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1451 dev->name, speed);
1452 printk("%s duplex.\n",
1453 (mii_ctl & BMCR_FULLDPLX) ?
1454 "full" : "half");
1455 }
1456 check_duplex(dev);
1457 if (np->flowctrl && np->mii_if.full_duplex) {
1458 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1459 ioaddr + MulticastFilter1+2);
1460 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1461 ioaddr + MACCtrl0);
1462 }
1463 netif_carrier_on(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 } else {
Dan Nicholson50500152008-08-20 16:51:59 -07001465 printk(KERN_INFO "%s: Link down\n", dev->name);
1466 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 }
1468 }
1469 if (intr_status & StatsMax) {
1470 get_stats(dev);
1471 }
1472 if (intr_status & IntrPCIErr) {
1473 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1474 dev->name, intr_status);
1475 /* We must do a global reset of DMA to continue. */
1476 }
1477}
1478
1479static struct net_device_stats *get_stats(struct net_device *dev)
1480{
1481 struct netdev_private *np = netdev_priv(dev);
1482 void __iomem *ioaddr = np->base;
1483 int i;
1484
1485 /* We should lock this segment of code for SMP eventually, although
1486 the vulnerability window is very small and statistics are
1487 non-critical. */
1488 /* The chip only need report frame silently dropped. */
Eric Dumazet553e2332009-05-27 10:34:50 +00001489 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1490 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1491 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1492 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1493 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1494 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1495 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 ioread8(ioaddr + StatsTxDefer);
1497 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1498 ioread8(ioaddr + i);
Eric Dumazet553e2332009-05-27 10:34:50 +00001499 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1500 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1501 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1502 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Eric Dumazet553e2332009-05-27 10:34:50 +00001504 return &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505}
1506
1507static void set_rx_mode(struct net_device *dev)
1508{
1509 struct netdev_private *np = netdev_priv(dev);
1510 void __iomem *ioaddr = np->base;
1511 u16 mc_filter[4]; /* Multicast hash filter */
1512 u32 rx_mode;
1513 int i;
1514
1515 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 memset(mc_filter, 0xff, sizeof(mc_filter));
1517 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001518 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00001519 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 /* Too many to match, or accept all multicasts. */
1521 memset(mc_filter, 0xff, sizeof(mc_filter));
1522 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001523 } else if (!netdev_mc_empty(dev)) {
Jiri Pirko22bedad2010-04-01 21:22:57 +00001524 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 int bit;
1526 int index;
1527 int crc;
1528 memset (mc_filter, 0, sizeof (mc_filter));
Jiri Pirko22bedad2010-04-01 21:22:57 +00001529 netdev_for_each_mc_addr(ha, dev) {
1530 crc = ether_crc_le(ETH_ALEN, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1532 if (crc & 0x80000000) index |= 1 << bit;
1533 mc_filter[index/16] |= (1 << (index % 16));
1534 }
1535 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1536 } else {
1537 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1538 return;
1539 }
1540 if (np->mii_if.full_duplex && np->flowctrl)
1541 mc_filter[3] |= 0x0200;
1542
1543 for (i = 0; i < 4; i++)
1544 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1545 iowrite8(rx_mode, ioaddr + RxMode);
1546}
1547
1548static int __set_mac_addr(struct net_device *dev)
1549{
1550 struct netdev_private *np = netdev_priv(dev);
1551 u16 addr16;
1552
1553 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1554 iowrite16(addr16, np->base + StationAddr);
1555 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1556 iowrite16(addr16, np->base + StationAddr+2);
1557 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1558 iowrite16(addr16, np->base + StationAddr+4);
1559 return 0;
1560}
1561
1562static int check_if_running(struct net_device *dev)
1563{
1564 if (!netif_running(dev))
1565 return -EINVAL;
1566 return 0;
1567}
1568
1569static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1570{
1571 struct netdev_private *np = netdev_priv(dev);
1572 strcpy(info->driver, DRV_NAME);
1573 strcpy(info->version, DRV_VERSION);
1574 strcpy(info->bus_info, pci_name(np->pci_dev));
1575}
1576
1577static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1578{
1579 struct netdev_private *np = netdev_priv(dev);
1580 spin_lock_irq(&np->lock);
1581 mii_ethtool_gset(&np->mii_if, ecmd);
1582 spin_unlock_irq(&np->lock);
1583 return 0;
1584}
1585
1586static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1587{
1588 struct netdev_private *np = netdev_priv(dev);
1589 int res;
1590 spin_lock_irq(&np->lock);
1591 res = mii_ethtool_sset(&np->mii_if, ecmd);
1592 spin_unlock_irq(&np->lock);
1593 return res;
1594}
1595
1596static int nway_reset(struct net_device *dev)
1597{
1598 struct netdev_private *np = netdev_priv(dev);
1599 return mii_nway_restart(&np->mii_if);
1600}
1601
1602static u32 get_link(struct net_device *dev)
1603{
1604 struct netdev_private *np = netdev_priv(dev);
1605 return mii_link_ok(&np->mii_if);
1606}
1607
1608static u32 get_msglevel(struct net_device *dev)
1609{
1610 struct netdev_private *np = netdev_priv(dev);
1611 return np->msg_enable;
1612}
1613
1614static void set_msglevel(struct net_device *dev, u32 val)
1615{
1616 struct netdev_private *np = netdev_priv(dev);
1617 np->msg_enable = val;
1618}
1619
Jeff Garzik7282d492006-09-13 14:30:00 -04001620static const struct ethtool_ops ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 .begin = check_if_running,
1622 .get_drvinfo = get_drvinfo,
1623 .get_settings = get_settings,
1624 .set_settings = set_settings,
1625 .nway_reset = nway_reset,
1626 .get_link = get_link,
1627 .get_msglevel = get_msglevel,
1628 .set_msglevel = set_msglevel,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629};
1630
1631static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1632{
1633 struct netdev_private *np = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 if (!netif_running(dev))
1637 return -EINVAL;
1638
1639 spin_lock_irq(&np->lock);
1640 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1641 spin_unlock_irq(&np->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643 return rc;
1644}
1645
1646static int netdev_close(struct net_device *dev)
1647{
1648 struct netdev_private *np = netdev_priv(dev);
1649 void __iomem *ioaddr = np->base;
1650 struct sk_buff *skb;
1651 int i;
1652
Jesse Huang31f817e2006-11-08 19:49:12 -08001653 /* Wait and kill tasklet */
1654 tasklet_kill(&np->rx_tasklet);
1655 tasklet_kill(&np->tx_tasklet);
1656 np->cur_tx = 0;
1657 np->dirty_tx = 0;
1658 np->cur_task = 0;
Randy Dunlapbca79eb2006-11-29 13:15:17 -08001659 np->last_tx = NULL;
Jesse Huang31f817e2006-11-08 19:49:12 -08001660
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 netif_stop_queue(dev);
1662
1663 if (netif_msg_ifdown(np)) {
1664 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1665 "Rx %4.4x Int %2.2x.\n",
1666 dev->name, ioread8(ioaddr + TxStatus),
1667 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1668 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1669 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1670 }
1671
1672 /* Disable interrupts by clearing the interrupt mask. */
1673 iowrite16(0x0000, ioaddr + IntrEnable);
1674
Jesse Huangacd70c22006-10-20 14:42:13 -07001675 /* Disable Rx and Tx DMA for safely release resource */
1676 iowrite32(0x500, ioaddr + DMACtrl);
1677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 /* Stop the chip's Tx and Rx processes. */
1679 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1680
Jesse Huang31f817e2006-11-08 19:49:12 -08001681 for (i = 2000; i > 0; i--) {
1682 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1683 break;
1684 mdelay(1);
1685 }
1686
1687 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1688 ioaddr +ASICCtrl + 2);
1689
1690 for (i = 2000; i > 0; i--) {
1691 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1692 break;
1693 mdelay(1);
1694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696#ifdef __i386__
1697 if (netif_msg_hw(np)) {
Joe Perchesad361c92009-07-06 13:05:40 -07001698 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 (int)(np->tx_ring_dma));
1700 for (i = 0; i < TX_RING_SIZE; i++)
Joe Perchesad361c92009-07-06 13:05:40 -07001701 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1703 np->tx_ring[i].frag[0].length);
Joe Perchesad361c92009-07-06 13:05:40 -07001704 printk(KERN_DEBUG " Rx ring %8.8x:\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 (int)(np->rx_ring_dma));
1706 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1707 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1708 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1709 np->rx_ring[i].frag[0].length);
1710 }
1711 }
1712#endif /* __i386__ debugging only */
1713
1714 free_irq(dev->irq, dev);
1715
1716 del_timer_sync(&np->timer);
1717
1718 /* Free all the skbuffs in the Rx queue. */
1719 for (i = 0; i < RX_RING_SIZE; i++) {
1720 np->rx_ring[i].status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 skb = np->rx_skbuff[i];
1722 if (skb) {
1723 pci_unmap_single(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001724 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1725 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 dev_kfree_skb(skb);
1727 np->rx_skbuff[i] = NULL;
1728 }
Al Viro14c9d9b2007-12-09 16:50:47 +00001729 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 }
1731 for (i = 0; i < TX_RING_SIZE; i++) {
Jesse Huang31f817e2006-11-08 19:49:12 -08001732 np->tx_ring[i].next_desc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 skb = np->tx_skbuff[i];
1734 if (skb) {
1735 pci_unmap_single(np->pci_dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001736 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1737 skb->len, PCI_DMA_TODEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 dev_kfree_skb(skb);
1739 np->tx_skbuff[i] = NULL;
1740 }
1741 }
1742
1743 return 0;
1744}
1745
1746static void __devexit sundance_remove1 (struct pci_dev *pdev)
1747{
1748 struct net_device *dev = pci_get_drvdata(pdev);
1749
1750 if (dev) {
1751 struct netdev_private *np = netdev_priv(dev);
1752
1753 unregister_netdev(dev);
1754 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1755 np->rx_ring_dma);
1756 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1757 np->tx_ring_dma);
1758 pci_iounmap(pdev, np->base);
1759 pci_release_regions(pdev);
1760 free_netdev(dev);
1761 pci_set_drvdata(pdev, NULL);
1762 }
1763}
1764
1765static struct pci_driver sundance_driver = {
1766 .name = DRV_NAME,
1767 .id_table = sundance_pci_tbl,
1768 .probe = sundance_probe1,
1769 .remove = __devexit_p(sundance_remove1),
1770};
1771
1772static int __init sundance_init(void)
1773{
1774/* when a module, this is printed whether or not devices are found in probe */
1775#ifdef MODULE
1776 printk(version);
1777#endif
Jeff Garzik29917622006-08-19 17:48:59 -04001778 return pci_register_driver(&sundance_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779}
1780
1781static void __exit sundance_exit(void)
1782{
1783 pci_unregister_driver(&sundance_driver);
1784}
1785
1786module_init(sundance_init);
1787module_exit(sundance_exit);
1788
1789