blob: e148a7212073195976de970e2c60f6b3ce5ffe68 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19
20
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
23
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
27
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
31
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
35
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
44
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
51
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
56
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
64
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
68
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
71
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
74
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
82
83 Version LK1.08 (D-Link):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
86
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
90
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
93
94*/
95
96#define DRV_NAME "sundance"
97#define DRV_VERSION "1.01+LK1.09a"
98#define DRV_RELDATE "10-Jul-2003"
99
100
101/* The user-configurable values.
102 These may be modified when a driver module is loaded.*/
103static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
104/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
105 Typical is a 64 element hash table based on the Ethernet CRC. */
106static int multicast_filter_limit = 32;
107
108/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
109 Setting to > 1518 effectively disables this feature.
110 This chip can receive into offset buffers, so the Alpha does not
111 need a copy-align. */
112static int rx_copybreak;
113static int flowctrl=1;
114
115/* media[] specifies the media type the NIC operates at.
116 autosense Autosensing active media.
117 10mbps_hd 10Mbps half duplex.
118 10mbps_fd 10Mbps full duplex.
119 100mbps_hd 100Mbps half duplex.
120 100mbps_fd 100Mbps full duplex.
121 0 Autosensing active media.
122 1 10Mbps half duplex.
123 2 10Mbps full duplex.
124 3 100Mbps half duplex.
125 4 100Mbps full duplex.
126*/
127#define MAX_UNITS 8
128static char *media[MAX_UNITS];
129
130
131/* Operational parameters that are set at compile time. */
132
133/* Keep the ring sizes a power of two for compile efficiency.
134 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
135 Making the Tx ring too large decreases the effectiveness of channel
136 bonding and packet priority, and more than 128 requires modifying the
137 Tx error recovery.
138 Large receive rings merely waste memory. */
139#define TX_RING_SIZE 32
140#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
141#define RX_RING_SIZE 64
142#define RX_BUDGET 32
143#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
144#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
145
146/* Operational parameters that usually are not changed. */
147/* Time in jiffies before concluding the transmitter is hung. */
148#define TX_TIMEOUT (4*HZ)
149#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
150
151/* Include files, designed to support most kernel versions 2.0.0 and later. */
152#include <linux/module.h>
153#include <linux/kernel.h>
154#include <linux/string.h>
155#include <linux/timer.h>
156#include <linux/errno.h>
157#include <linux/ioport.h>
158#include <linux/slab.h>
159#include <linux/interrupt.h>
160#include <linux/pci.h>
161#include <linux/netdevice.h>
162#include <linux/etherdevice.h>
163#include <linux/skbuff.h>
164#include <linux/init.h>
165#include <linux/bitops.h>
166#include <asm/uaccess.h>
167#include <asm/processor.h> /* Processor type for cache alignment. */
168#include <asm/io.h>
169#include <linux/delay.h>
170#include <linux/spinlock.h>
171#ifndef _COMPAT_WITH_OLD_KERNEL
172#include <linux/crc32.h>
173#include <linux/ethtool.h>
174#include <linux/mii.h>
175#else
176#include "crc32.h"
177#include "ethtool.h"
178#include "mii.h"
179#include "compat.h"
180#endif
181
182/* These identify the driver base version and may not be removed. */
183static char version[] __devinitdata =
184KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
185KERN_INFO " http://www.scyld.com/network/sundance.html\n";
186
187MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
188MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
189MODULE_LICENSE("GPL");
190
191module_param(debug, int, 0);
192module_param(rx_copybreak, int, 0);
193module_param_array(media, charp, NULL, 0);
194module_param(flowctrl, int, 0);
195MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
196MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
197MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
198
199/*
200 Theory of Operation
201
202I. Board Compatibility
203
204This driver is designed for the Sundance Technologies "Alta" ST201 chip.
205
206II. Board-specific settings
207
208III. Driver operation
209
210IIIa. Ring buffers
211
212This driver uses two statically allocated fixed-size descriptor lists
213formed into rings by a branch from the final descriptor to the beginning of
214the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
215Some chips explicitly use only 2^N sized rings, while others use a
216'next descriptor' pointer that the driver forms into rings.
217
218IIIb/c. Transmit/Receive Structure
219
220This driver uses a zero-copy receive and transmit scheme.
221The driver allocates full frame size skbuffs for the Rx ring buffers at
222open() time and passes the skb->data field to the chip as receive data
223buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
224a fresh skbuff is allocated and the frame is copied to the new skbuff.
225When the incoming frame is larger, the skbuff is passed directly up the
226protocol stack. Buffers consumed this way are replaced by newly allocated
227skbuffs in a later phase of receives.
228
229The RX_COPYBREAK value is chosen to trade-off the memory wasted by
230using a full-sized skbuff for small frames vs. the copying costs of larger
231frames. New boards are typically used in generously configured machines
232and the underfilled buffers have negligible impact compared to the benefit of
233a single allocation size, so the default value of zero results in never
234copying packets. When copying is done, the cost is usually mitigated by using
235a combined copy/checksum routine. Copying also preloads the cache, which is
236most useful with small frames.
237
238A subtle aspect of the operation is that the IP header at offset 14 in an
239ethernet frame isn't longword aligned for further processing.
240Unaligned buffers are permitted by the Sundance hardware, so
241frames are received into the skbuff at an offset of "+2", 16-byte aligning
242the IP header.
243
244IIId. Synchronization
245
246The driver runs as two independent, single-threaded flows of control. One
247is the send-packet routine, which enforces single-threaded use by the
248dev->tbusy flag. The other thread is the interrupt handler, which is single
249threaded by the hardware and interrupt handling software.
250
251The send packet thread has partial control over the Tx ring and 'dev->tbusy'
252flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
253queue slot is empty, it clears the tbusy flag when finished otherwise it sets
254the 'lp->tx_full' flag.
255
256The interrupt handler has exclusive control over the Rx ring and records stats
257from the Tx ring. After reaping the stats, it marks the Tx queue entry as
258empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
259clears both the tx_full and tbusy flags.
260
261IV. Notes
262
263IVb. References
264
265The Sundance ST201 datasheet, preliminary version.
266http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
267http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
268
269IVc. Errata
270
271*/
272
273/* Work-around for Kendin chip bugs. */
274#ifndef CONFIG_SUNDANCE_MMIO
275#define USE_IO_OPS 1
276#endif
277
278static struct pci_device_id sundance_pci_tbl[] = {
279 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
280 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
281 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
282 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
283 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
284 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
285 {0,}
286};
287MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
288
289enum {
290 netdev_io_size = 128
291};
292
293struct pci_id_info {
294 const char *name;
295};
296static struct pci_id_info pci_id_tbl[] = {
297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
299 {"D-Link DFE-580TX 4 port Server Adapter"},
300 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
301 {"D-Link DL10050-based FAST Ethernet Adapter"},
302 {"Sundance Technology Alta"},
303 {NULL,}, /* 0 terminated list. */
304};
305
306/* This driver was written to use PCI memory space, however x86-oriented
307 hardware often uses I/O space accesses. */
308
309/* Offsets to the device registers.
310 Unlike software-only systems, device drivers interact with complex hardware.
311 It's not useful to define symbolic names for every register bit in the
312 device. The name can only partially document the semantics and make
313 the driver longer and more difficult to read.
314 In general, only the important configuration values or bits changed
315 multiple times should be defined symbolically.
316*/
317enum alta_offsets {
318 DMACtrl = 0x00,
319 TxListPtr = 0x04,
320 TxDMABurstThresh = 0x08,
321 TxDMAUrgentThresh = 0x09,
322 TxDMAPollPeriod = 0x0a,
323 RxDMAStatus = 0x0c,
324 RxListPtr = 0x10,
325 DebugCtrl0 = 0x1a,
326 DebugCtrl1 = 0x1c,
327 RxDMABurstThresh = 0x14,
328 RxDMAUrgentThresh = 0x15,
329 RxDMAPollPeriod = 0x16,
330 LEDCtrl = 0x1a,
331 ASICCtrl = 0x30,
332 EEData = 0x34,
333 EECtrl = 0x36,
334 TxStartThresh = 0x3c,
335 RxEarlyThresh = 0x3e,
336 FlashAddr = 0x40,
337 FlashData = 0x44,
338 TxStatus = 0x46,
339 TxFrameId = 0x47,
340 DownCounter = 0x18,
341 IntrClear = 0x4a,
342 IntrEnable = 0x4c,
343 IntrStatus = 0x4e,
344 MACCtrl0 = 0x50,
345 MACCtrl1 = 0x52,
346 StationAddr = 0x54,
347 MaxFrameSize = 0x5A,
348 RxMode = 0x5c,
349 MIICtrl = 0x5e,
350 MulticastFilter0 = 0x60,
351 MulticastFilter1 = 0x64,
352 RxOctetsLow = 0x68,
353 RxOctetsHigh = 0x6a,
354 TxOctetsLow = 0x6c,
355 TxOctetsHigh = 0x6e,
356 TxFramesOK = 0x70,
357 RxFramesOK = 0x72,
358 StatsCarrierError = 0x74,
359 StatsLateColl = 0x75,
360 StatsMultiColl = 0x76,
361 StatsOneColl = 0x77,
362 StatsTxDefer = 0x78,
363 RxMissed = 0x79,
364 StatsTxXSDefer = 0x7a,
365 StatsTxAbort = 0x7b,
366 StatsBcastTx = 0x7c,
367 StatsBcastRx = 0x7d,
368 StatsMcastTx = 0x7e,
369 StatsMcastRx = 0x7f,
370 /* Aliased and bogus values! */
371 RxStatus = 0x0c,
372};
373enum ASICCtrl_HiWord_bit {
374 GlobalReset = 0x0001,
375 RxReset = 0x0002,
376 TxReset = 0x0004,
377 DMAReset = 0x0008,
378 FIFOReset = 0x0010,
379 NetworkReset = 0x0020,
380 HostReset = 0x0040,
381 ResetBusy = 0x0400,
382};
383
384/* Bits in the interrupt status/mask registers. */
385enum intr_status_bits {
386 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
387 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
388 IntrDrvRqst=0x0040,
389 StatsMax=0x0080, LinkChange=0x0100,
390 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
391};
392
393/* Bits in the RxMode register. */
394enum rx_mode_bits {
395 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
396 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
397};
398/* Bits in MACCtrl. */
399enum mac_ctrl0_bits {
400 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
401 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
402};
403enum mac_ctrl1_bits {
404 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
405 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
406 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
407};
408
409/* The Rx and Tx buffer descriptors. */
410/* Note that using only 32 bit fields simplifies conversion to big-endian
411 architectures. */
412struct netdev_desc {
413 u32 next_desc;
414 u32 status;
415 struct desc_frag { u32 addr, length; } frag[1];
416};
417
418/* Bits in netdev_desc.status */
419enum desc_status_bits {
420 DescOwn=0x8000,
421 DescEndPacket=0x4000,
422 DescEndRing=0x2000,
423 LastFrag=0x80000000,
424 DescIntrOnTx=0x8000,
425 DescIntrOnDMADone=0x80000000,
426 DisableAlign = 0x00000001,
427};
428
429#define PRIV_ALIGN 15 /* Required alignment mask */
430/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
431 within the structure. */
432#define MII_CNT 4
433struct netdev_private {
434 /* Descriptor rings first for alignment. */
435 struct netdev_desc *rx_ring;
436 struct netdev_desc *tx_ring;
437 struct sk_buff* rx_skbuff[RX_RING_SIZE];
438 struct sk_buff* tx_skbuff[TX_RING_SIZE];
439 dma_addr_t tx_ring_dma;
440 dma_addr_t rx_ring_dma;
441 struct net_device_stats stats;
442 struct timer_list timer; /* Media monitoring timer. */
443 /* Frequently used values: keep some adjacent for cache effect. */
444 spinlock_t lock;
445 spinlock_t rx_lock; /* Group with Tx control cache line. */
446 int msg_enable;
447 int chip_id;
448 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
451 unsigned int cur_tx, dirty_tx;
452 /* These values are keep track of the transceiver/media in use. */
453 unsigned int flowctrl:1;
454 unsigned int default_port:4; /* Last dev->if_port value. */
455 unsigned int an_enable:1;
456 unsigned int speed;
457 struct tasklet_struct rx_tasklet;
458 struct tasklet_struct tx_tasklet;
459 int budget;
460 int cur_task;
461 /* Multicast and receive mode. */
462 spinlock_t mcastlock; /* SMP lock multicast updates. */
463 u16 mcast_filter[4];
464 /* MII transceiver section. */
465 struct mii_if_info mii_if;
466 int mii_preamble_required;
467 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
468 struct pci_dev *pci_dev;
469 void __iomem *base;
470 unsigned char pci_rev_id;
471};
472
473/* The station address location in the EEPROM. */
474#define EEPROM_SA_OFFSET 0x10
475#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
476 IntrDrvRqst | IntrTxDone | StatsMax | \
477 LinkChange)
478
479static int change_mtu(struct net_device *dev, int new_mtu);
480static int eeprom_read(void __iomem *ioaddr, int location);
481static int mdio_read(struct net_device *dev, int phy_id, int location);
482static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
483static int netdev_open(struct net_device *dev);
484static void check_duplex(struct net_device *dev);
485static void netdev_timer(unsigned long data);
486static void tx_timeout(struct net_device *dev);
487static void init_ring(struct net_device *dev);
488static int start_tx(struct sk_buff *skb, struct net_device *dev);
489static int reset_tx (struct net_device *dev);
490static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
491static void rx_poll(unsigned long data);
492static void tx_poll(unsigned long data);
493static void refill_rx (struct net_device *dev);
494static void netdev_error(struct net_device *dev, int intr_status);
495static void netdev_error(struct net_device *dev, int intr_status);
496static void set_rx_mode(struct net_device *dev);
497static int __set_mac_addr(struct net_device *dev);
498static struct net_device_stats *get_stats(struct net_device *dev);
499static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500static int netdev_close(struct net_device *dev);
501static struct ethtool_ops ethtool_ops;
502
503static int __devinit sundance_probe1 (struct pci_dev *pdev,
504 const struct pci_device_id *ent)
505{
506 struct net_device *dev;
507 struct netdev_private *np;
508 static int card_idx;
509 int chip_idx = ent->driver_data;
510 int irq;
511 int i;
512 void __iomem *ioaddr;
513 u16 mii_ctl;
514 void *ring_space;
515 dma_addr_t ring_dma;
516#ifdef USE_IO_OPS
517 int bar = 0;
518#else
519 int bar = 1;
520#endif
521
522
523/* when built into the kernel, we only print version if device is found */
524#ifndef MODULE
525 static int printed_version;
526 if (!printed_version++)
527 printk(version);
528#endif
529
530 if (pci_enable_device(pdev))
531 return -EIO;
532 pci_set_master(pdev);
533
534 irq = pdev->irq;
535
536 dev = alloc_etherdev(sizeof(*np));
537 if (!dev)
538 return -ENOMEM;
539 SET_MODULE_OWNER(dev);
540 SET_NETDEV_DEV(dev, &pdev->dev);
541
542 if (pci_request_regions(pdev, DRV_NAME))
543 goto err_out_netdev;
544
545 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
546 if (!ioaddr)
547 goto err_out_res;
548
549 for (i = 0; i < 3; i++)
550 ((u16 *)dev->dev_addr)[i] =
551 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
John W. Linville30d60a82005-09-12 10:48:58 -0400552 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 dev->base_addr = (unsigned long)ioaddr;
555 dev->irq = irq;
556
557 np = netdev_priv(dev);
558 np->base = ioaddr;
559 np->pci_dev = pdev;
560 np->chip_id = chip_idx;
561 np->msg_enable = (1 << debug) - 1;
562 spin_lock_init(&np->lock);
563 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
564 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
565
566 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
567 if (!ring_space)
568 goto err_out_cleardev;
569 np->tx_ring = (struct netdev_desc *)ring_space;
570 np->tx_ring_dma = ring_dma;
571
572 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
573 if (!ring_space)
574 goto err_out_unmap_tx;
575 np->rx_ring = (struct netdev_desc *)ring_space;
576 np->rx_ring_dma = ring_dma;
577
578 np->mii_if.dev = dev;
579 np->mii_if.mdio_read = mdio_read;
580 np->mii_if.mdio_write = mdio_write;
581 np->mii_if.phy_id_mask = 0x1f;
582 np->mii_if.reg_num_mask = 0x1f;
583
584 /* The chip-specific entries in the device structure. */
585 dev->open = &netdev_open;
586 dev->hard_start_xmit = &start_tx;
587 dev->stop = &netdev_close;
588 dev->get_stats = &get_stats;
589 dev->set_multicast_list = &set_rx_mode;
590 dev->do_ioctl = &netdev_ioctl;
591 SET_ETHTOOL_OPS(dev, &ethtool_ops);
592 dev->tx_timeout = &tx_timeout;
593 dev->watchdog_timeo = TX_TIMEOUT;
594 dev->change_mtu = &change_mtu;
595 pci_set_drvdata(pdev, dev);
596
597 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
598
599 i = register_netdev(dev);
600 if (i)
601 goto err_out_unmap_rx;
602
603 printk(KERN_INFO "%s: %s at %p, ",
604 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
605 for (i = 0; i < 5; i++)
606 printk("%2.2x:", dev->dev_addr[i]);
607 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
608
609 if (1) {
610 int phy, phy_idx = 0;
611 np->phys[0] = 1; /* Default setting */
612 np->mii_preamble_required++;
613 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
614 int mii_status = mdio_read(dev, phy, MII_BMSR);
615 if (mii_status != 0xffff && mii_status != 0x0000) {
616 np->phys[phy_idx++] = phy;
617 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
618 if ((mii_status & 0x0040) == 0)
619 np->mii_preamble_required++;
620 printk(KERN_INFO "%s: MII PHY found at address %d, status "
621 "0x%4.4x advertising %4.4x.\n",
622 dev->name, phy, mii_status, np->mii_if.advertising);
623 }
624 }
625 np->mii_preamble_required--;
626
627 if (phy_idx == 0) {
628 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
629 dev->name, ioread32(ioaddr + ASICCtrl));
630 goto err_out_unregister;
631 }
632
633 np->mii_if.phy_id = np->phys[0];
634 }
635
636 /* Parse override configuration */
637 np->an_enable = 1;
638 if (card_idx < MAX_UNITS) {
639 if (media[card_idx] != NULL) {
640 np->an_enable = 0;
641 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
642 strcmp (media[card_idx], "4") == 0) {
643 np->speed = 100;
644 np->mii_if.full_duplex = 1;
645 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
646 || strcmp (media[card_idx], "3") == 0) {
647 np->speed = 100;
648 np->mii_if.full_duplex = 0;
649 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
650 strcmp (media[card_idx], "2") == 0) {
651 np->speed = 10;
652 np->mii_if.full_duplex = 1;
653 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
654 strcmp (media[card_idx], "1") == 0) {
655 np->speed = 10;
656 np->mii_if.full_duplex = 0;
657 } else {
658 np->an_enable = 1;
659 }
660 }
661 if (flowctrl == 1)
662 np->flowctrl = 1;
663 }
664
665 /* Fibre PHY? */
666 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
667 /* Default 100Mbps Full */
668 if (np->an_enable) {
669 np->speed = 100;
670 np->mii_if.full_duplex = 1;
671 np->an_enable = 0;
672 }
673 }
674 /* Reset PHY */
675 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
676 mdelay (300);
677 /* If flow control enabled, we need to advertise it.*/
678 if (np->flowctrl)
679 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
680 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
681 /* Force media type */
682 if (!np->an_enable) {
683 mii_ctl = 0;
684 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
685 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
686 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
687 printk (KERN_INFO "Override speed=%d, %s duplex\n",
688 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
689
690 }
691
692 /* Perhaps move the reset here? */
693 /* Reset the chip to erase previous misconfiguration. */
694 if (netif_msg_hw(np))
695 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
696 iowrite16(0x007f, ioaddr + ASICCtrl + 2);
697 if (netif_msg_hw(np))
698 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
699
700 card_idx++;
701 return 0;
702
703err_out_unregister:
704 unregister_netdev(dev);
705err_out_unmap_rx:
706 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
707err_out_unmap_tx:
708 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
709err_out_cleardev:
710 pci_set_drvdata(pdev, NULL);
711 pci_iounmap(pdev, ioaddr);
712err_out_res:
713 pci_release_regions(pdev);
714err_out_netdev:
715 free_netdev (dev);
716 return -ENODEV;
717}
718
719static int change_mtu(struct net_device *dev, int new_mtu)
720{
721 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
722 return -EINVAL;
723 if (netif_running(dev))
724 return -EBUSY;
725 dev->mtu = new_mtu;
726 return 0;
727}
728
729#define eeprom_delay(ee_addr) ioread32(ee_addr)
730/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
731static int __devinit eeprom_read(void __iomem *ioaddr, int location)
732{
733 int boguscnt = 10000; /* Typical 1900 ticks. */
734 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
735 do {
736 eeprom_delay(ioaddr + EECtrl);
737 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
738 return ioread16(ioaddr + EEData);
739 }
740 } while (--boguscnt > 0);
741 return 0;
742}
743
744/* MII transceiver control section.
745 Read and write the MII registers using software-generated serial
746 MDIO protocol. See the MII specifications or DP83840A data sheet
747 for details.
748
749 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
750 met by back-to-back 33Mhz PCI cycles. */
751#define mdio_delay() ioread8(mdio_addr)
752
753enum mii_reg_bits {
754 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
755};
756#define MDIO_EnbIn (0)
757#define MDIO_WRITE0 (MDIO_EnbOutput)
758#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
759
760/* Generate the preamble required for initial synchronization and
761 a few older transceivers. */
762static void mdio_sync(void __iomem *mdio_addr)
763{
764 int bits = 32;
765
766 /* Establish sync by sending at least 32 logic ones. */
767 while (--bits >= 0) {
768 iowrite8(MDIO_WRITE1, mdio_addr);
769 mdio_delay();
770 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
771 mdio_delay();
772 }
773}
774
775static int mdio_read(struct net_device *dev, int phy_id, int location)
776{
777 struct netdev_private *np = netdev_priv(dev);
778 void __iomem *mdio_addr = np->base + MIICtrl;
779 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
780 int i, retval = 0;
781
782 if (np->mii_preamble_required)
783 mdio_sync(mdio_addr);
784
785 /* Shift the read command bits out. */
786 for (i = 15; i >= 0; i--) {
787 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
788
789 iowrite8(dataval, mdio_addr);
790 mdio_delay();
791 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
792 mdio_delay();
793 }
794 /* Read the two transition, 16 data, and wire-idle bits. */
795 for (i = 19; i > 0; i--) {
796 iowrite8(MDIO_EnbIn, mdio_addr);
797 mdio_delay();
798 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
799 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
800 mdio_delay();
801 }
802 return (retval>>1) & 0xffff;
803}
804
805static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
806{
807 struct netdev_private *np = netdev_priv(dev);
808 void __iomem *mdio_addr = np->base + MIICtrl;
809 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
810 int i;
811
812 if (np->mii_preamble_required)
813 mdio_sync(mdio_addr);
814
815 /* Shift the command bits out. */
816 for (i = 31; i >= 0; i--) {
817 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
818
819 iowrite8(dataval, mdio_addr);
820 mdio_delay();
821 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
822 mdio_delay();
823 }
824 /* Clear out extra bits. */
825 for (i = 2; i > 0; i--) {
826 iowrite8(MDIO_EnbIn, mdio_addr);
827 mdio_delay();
828 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
829 mdio_delay();
830 }
831 return;
832}
833
834static int netdev_open(struct net_device *dev)
835{
836 struct netdev_private *np = netdev_priv(dev);
837 void __iomem *ioaddr = np->base;
838 int i;
839
840 /* Do we need to reset the chip??? */
841
842 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
843 if (i)
844 return i;
845
846 if (netif_msg_ifup(np))
847 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
848 dev->name, dev->irq);
849 init_ring(dev);
850
851 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
852 /* The Tx list pointer is written as packets are queued. */
853
854 /* Initialize other registers. */
855 __set_mac_addr(dev);
856#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
857 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
858#else
859 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
860#endif
861 if (dev->mtu > 2047)
862 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
863
864 /* Configure the PCI bus bursts and FIFO thresholds. */
865
866 if (dev->if_port == 0)
867 dev->if_port = np->default_port;
868
869 spin_lock_init(&np->mcastlock);
870
871 set_rx_mode(dev);
872 iowrite16(0, ioaddr + IntrEnable);
873 iowrite16(0, ioaddr + DownCounter);
874 /* Set the chip to poll every N*320nsec. */
875 iowrite8(100, ioaddr + RxDMAPollPeriod);
876 iowrite8(127, ioaddr + TxDMAPollPeriod);
877 /* Fix DFE-580TX packet drop issue */
878 if (np->pci_rev_id >= 0x14)
879 iowrite8(0x01, ioaddr + DebugCtrl1);
880 netif_start_queue(dev);
881
882 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
883
884 if (netif_msg_ifup(np))
885 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
886 "MAC Control %x, %4.4x %4.4x.\n",
887 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
888 ioread32(ioaddr + MACCtrl0),
889 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
890
891 /* Set the timer to check for link beat. */
892 init_timer(&np->timer);
893 np->timer.expires = jiffies + 3*HZ;
894 np->timer.data = (unsigned long)dev;
895 np->timer.function = &netdev_timer; /* timer handler */
896 add_timer(&np->timer);
897
898 /* Enable interrupts by setting the interrupt mask. */
899 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
900
901 return 0;
902}
903
904static void check_duplex(struct net_device *dev)
905{
906 struct netdev_private *np = netdev_priv(dev);
907 void __iomem *ioaddr = np->base;
908 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
909 int negotiated = mii_lpa & np->mii_if.advertising;
910 int duplex;
911
912 /* Force media */
913 if (!np->an_enable || mii_lpa == 0xffff) {
914 if (np->mii_if.full_duplex)
915 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
916 ioaddr + MACCtrl0);
917 return;
918 }
919
920 /* Autonegotiation */
921 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
922 if (np->mii_if.full_duplex != duplex) {
923 np->mii_if.full_duplex = duplex;
924 if (netif_msg_link(np))
925 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
926 "negotiated capability %4.4x.\n", dev->name,
927 duplex ? "full" : "half", np->phys[0], negotiated);
928 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
929 }
930}
931
932static void netdev_timer(unsigned long data)
933{
934 struct net_device *dev = (struct net_device *)data;
935 struct netdev_private *np = netdev_priv(dev);
936 void __iomem *ioaddr = np->base;
937 int next_tick = 10*HZ;
938
939 if (netif_msg_timer(np)) {
940 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
941 "Tx %x Rx %x.\n",
942 dev->name, ioread16(ioaddr + IntrEnable),
943 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
944 }
945 check_duplex(dev);
946 np->timer.expires = jiffies + next_tick;
947 add_timer(&np->timer);
948}
949
950static void tx_timeout(struct net_device *dev)
951{
952 struct netdev_private *np = netdev_priv(dev);
953 void __iomem *ioaddr = np->base;
954 unsigned long flag;
955
956 netif_stop_queue(dev);
957 tasklet_disable(&np->tx_tasklet);
958 iowrite16(0, ioaddr + IntrEnable);
959 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
960 "TxFrameId %2.2x,"
961 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
962 ioread8(ioaddr + TxFrameId));
963
964 {
965 int i;
966 for (i=0; i<TX_RING_SIZE; i++) {
967 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
968 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
969 le32_to_cpu(np->tx_ring[i].next_desc),
970 le32_to_cpu(np->tx_ring[i].status),
971 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
972 le32_to_cpu(np->tx_ring[i].frag[0].addr),
973 le32_to_cpu(np->tx_ring[i].frag[0].length));
974 }
975 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
976 ioread32(np->base + TxListPtr),
977 netif_queue_stopped(dev));
978 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
979 np->cur_tx, np->cur_tx % TX_RING_SIZE,
980 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
981 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
982 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
983 }
984 spin_lock_irqsave(&np->lock, flag);
985
986 /* Stop and restart the chip's Tx processes . */
987 reset_tx(dev);
988 spin_unlock_irqrestore(&np->lock, flag);
989
990 dev->if_port = 0;
991
992 dev->trans_start = jiffies;
993 np->stats.tx_errors++;
994 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
995 netif_wake_queue(dev);
996 }
997 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
998 tasklet_enable(&np->tx_tasklet);
999}
1000
1001
1002/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1003static void init_ring(struct net_device *dev)
1004{
1005 struct netdev_private *np = netdev_priv(dev);
1006 int i;
1007
1008 np->cur_rx = np->cur_tx = 0;
1009 np->dirty_rx = np->dirty_tx = 0;
1010 np->cur_task = 0;
1011
1012 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1013
1014 /* Initialize all Rx descriptors. */
1015 for (i = 0; i < RX_RING_SIZE; i++) {
1016 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1017 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1018 np->rx_ring[i].status = 0;
1019 np->rx_ring[i].frag[0].length = 0;
1020 np->rx_skbuff[i] = NULL;
1021 }
1022
1023 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1024 for (i = 0; i < RX_RING_SIZE; i++) {
1025 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1026 np->rx_skbuff[i] = skb;
1027 if (skb == NULL)
1028 break;
1029 skb->dev = dev; /* Mark as being used by this device. */
1030 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1031 np->rx_ring[i].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001032 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 PCI_DMA_FROMDEVICE));
1034 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1035 }
1036 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1037
1038 for (i = 0; i < TX_RING_SIZE; i++) {
1039 np->tx_skbuff[i] = NULL;
1040 np->tx_ring[i].status = 0;
1041 }
1042 return;
1043}
1044
1045static void tx_poll (unsigned long data)
1046{
1047 struct net_device *dev = (struct net_device *)data;
1048 struct netdev_private *np = netdev_priv(dev);
1049 unsigned head = np->cur_task % TX_RING_SIZE;
1050 struct netdev_desc *txdesc =
1051 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1052
1053 /* Chain the next pointer */
1054 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1055 int entry = np->cur_task % TX_RING_SIZE;
1056 txdesc = &np->tx_ring[entry];
1057 if (np->last_tx) {
1058 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1059 entry*sizeof(struct netdev_desc));
1060 }
1061 np->last_tx = txdesc;
1062 }
1063 /* Indicate the latest descriptor of tx ring */
1064 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1065
1066 if (ioread32 (np->base + TxListPtr) == 0)
1067 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1068 np->base + TxListPtr);
1069 return;
1070}
1071
1072static int
1073start_tx (struct sk_buff *skb, struct net_device *dev)
1074{
1075 struct netdev_private *np = netdev_priv(dev);
1076 struct netdev_desc *txdesc;
1077 unsigned entry;
1078
1079 /* Calculate the next Tx descriptor entry. */
1080 entry = np->cur_tx % TX_RING_SIZE;
1081 np->tx_skbuff[entry] = skb;
1082 txdesc = &np->tx_ring[entry];
1083
1084 txdesc->next_desc = 0;
1085 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1086 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1087 skb->len,
1088 PCI_DMA_TODEVICE));
1089 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1090
1091 /* Increment cur_tx before tasklet_schedule() */
1092 np->cur_tx++;
1093 mb();
1094 /* Schedule a tx_poll() task */
1095 tasklet_schedule(&np->tx_tasklet);
1096
1097 /* On some architectures: explicitly flush cache lines here. */
1098 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1099 && !netif_queue_stopped(dev)) {
1100 /* do nothing */
1101 } else {
1102 netif_stop_queue (dev);
1103 }
1104 dev->trans_start = jiffies;
1105 if (netif_msg_tx_queued(np)) {
1106 printk (KERN_DEBUG
1107 "%s: Transmit frame #%d queued in slot %d.\n",
1108 dev->name, np->cur_tx, entry);
1109 }
1110 return 0;
1111}
1112
1113/* Reset hardware tx and free all of tx buffers */
1114static int
1115reset_tx (struct net_device *dev)
1116{
1117 struct netdev_private *np = netdev_priv(dev);
1118 void __iomem *ioaddr = np->base;
1119 struct sk_buff *skb;
1120 int i;
1121 int irq = in_interrupt();
1122
1123 /* Reset tx logic, TxListPtr will be cleaned */
1124 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1125 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1126 ioaddr + ASICCtrl + 2);
1127 for (i=50; i > 0; i--) {
1128 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1129 break;
1130 mdelay(1);
1131 }
1132 /* free all tx skbuff */
1133 for (i = 0; i < TX_RING_SIZE; i++) {
1134 skb = np->tx_skbuff[i];
1135 if (skb) {
1136 pci_unmap_single(np->pci_dev,
1137 np->tx_ring[i].frag[0].addr, skb->len,
1138 PCI_DMA_TODEVICE);
1139 if (irq)
1140 dev_kfree_skb_irq (skb);
1141 else
1142 dev_kfree_skb (skb);
1143 np->tx_skbuff[i] = NULL;
1144 np->stats.tx_dropped++;
1145 }
1146 }
1147 np->cur_tx = np->dirty_tx = 0;
1148 np->cur_task = 0;
1149 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1150 return 0;
1151}
1152
1153/* The interrupt handler cleans up after the Tx thread,
1154 and schedule a Rx thread work */
1155static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1156{
1157 struct net_device *dev = (struct net_device *)dev_instance;
1158 struct netdev_private *np = netdev_priv(dev);
1159 void __iomem *ioaddr = np->base;
1160 int hw_frame_id;
1161 int tx_cnt;
1162 int tx_status;
1163 int handled = 0;
1164
1165
1166 do {
1167 int intr_status = ioread16(ioaddr + IntrStatus);
1168 iowrite16(intr_status, ioaddr + IntrStatus);
1169
1170 if (netif_msg_intr(np))
1171 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1172 dev->name, intr_status);
1173
1174 if (!(intr_status & DEFAULT_INTR))
1175 break;
1176
1177 handled = 1;
1178
1179 if (intr_status & (IntrRxDMADone)) {
1180 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1181 ioaddr + IntrEnable);
1182 if (np->budget < 0)
1183 np->budget = RX_BUDGET;
1184 tasklet_schedule(&np->rx_tasklet);
1185 }
1186 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1187 tx_status = ioread16 (ioaddr + TxStatus);
1188 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1189 if (netif_msg_tx_done(np))
1190 printk
1191 ("%s: Transmit status is %2.2x.\n",
1192 dev->name, tx_status);
1193 if (tx_status & 0x1e) {
1194 np->stats.tx_errors++;
1195 if (tx_status & 0x10)
1196 np->stats.tx_fifo_errors++;
1197 if (tx_status & 0x08)
1198 np->stats.collisions++;
1199 if (tx_status & 0x02)
1200 np->stats.tx_window_errors++;
1201 /* This reset has not been verified!. */
1202 if (tx_status & 0x10) { /* Reset the Tx. */
1203 np->stats.tx_fifo_errors++;
1204 spin_lock(&np->lock);
1205 reset_tx(dev);
1206 spin_unlock(&np->lock);
1207 }
1208 if (tx_status & 0x1e) /* Restart the Tx. */
1209 iowrite16 (TxEnable,
1210 ioaddr + MACCtrl1);
1211 }
1212 /* Yup, this is a documentation bug. It cost me *hours*. */
1213 iowrite16 (0, ioaddr + TxStatus);
1214 if (tx_cnt < 0) {
1215 iowrite32(5000, ioaddr + DownCounter);
1216 break;
1217 }
1218 tx_status = ioread16 (ioaddr + TxStatus);
1219 }
1220 hw_frame_id = (tx_status >> 8) & 0xff;
1221 } else {
1222 hw_frame_id = ioread8(ioaddr + TxFrameId);
1223 }
1224
1225 if (np->pci_rev_id >= 0x14) {
1226 spin_lock(&np->lock);
1227 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1228 int entry = np->dirty_tx % TX_RING_SIZE;
1229 struct sk_buff *skb;
1230 int sw_frame_id;
1231 sw_frame_id = (le32_to_cpu(
1232 np->tx_ring[entry].status) >> 2) & 0xff;
1233 if (sw_frame_id == hw_frame_id &&
1234 !(le32_to_cpu(np->tx_ring[entry].status)
1235 & 0x00010000))
1236 break;
1237 if (sw_frame_id == (hw_frame_id + 1) %
1238 TX_RING_SIZE)
1239 break;
1240 skb = np->tx_skbuff[entry];
1241 /* Free the original skb. */
1242 pci_unmap_single(np->pci_dev,
1243 np->tx_ring[entry].frag[0].addr,
1244 skb->len, PCI_DMA_TODEVICE);
1245 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1246 np->tx_skbuff[entry] = NULL;
1247 np->tx_ring[entry].frag[0].addr = 0;
1248 np->tx_ring[entry].frag[0].length = 0;
1249 }
1250 spin_unlock(&np->lock);
1251 } else {
1252 spin_lock(&np->lock);
1253 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1254 int entry = np->dirty_tx % TX_RING_SIZE;
1255 struct sk_buff *skb;
1256 if (!(le32_to_cpu(np->tx_ring[entry].status)
1257 & 0x00010000))
1258 break;
1259 skb = np->tx_skbuff[entry];
1260 /* Free the original skb. */
1261 pci_unmap_single(np->pci_dev,
1262 np->tx_ring[entry].frag[0].addr,
1263 skb->len, PCI_DMA_TODEVICE);
1264 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1265 np->tx_skbuff[entry] = NULL;
1266 np->tx_ring[entry].frag[0].addr = 0;
1267 np->tx_ring[entry].frag[0].length = 0;
1268 }
1269 spin_unlock(&np->lock);
1270 }
1271
1272 if (netif_queue_stopped(dev) &&
1273 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1274 /* The ring is no longer full, clear busy flag. */
1275 netif_wake_queue (dev);
1276 }
1277 /* Abnormal error summary/uncommon events handlers. */
1278 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1279 netdev_error(dev, intr_status);
1280 } while (0);
1281 if (netif_msg_intr(np))
1282 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1283 dev->name, ioread16(ioaddr + IntrStatus));
1284 return IRQ_RETVAL(handled);
1285}
1286
1287static void rx_poll(unsigned long data)
1288{
1289 struct net_device *dev = (struct net_device *)data;
1290 struct netdev_private *np = netdev_priv(dev);
1291 int entry = np->cur_rx % RX_RING_SIZE;
1292 int boguscnt = np->budget;
1293 void __iomem *ioaddr = np->base;
1294 int received = 0;
1295
1296 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1297 while (1) {
1298 struct netdev_desc *desc = &(np->rx_ring[entry]);
1299 u32 frame_status = le32_to_cpu(desc->status);
1300 int pkt_len;
1301
1302 if (--boguscnt < 0) {
1303 goto not_done;
1304 }
1305 if (!(frame_status & DescOwn))
1306 break;
1307 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1308 if (netif_msg_rx_status(np))
1309 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1310 frame_status);
1311 if (frame_status & 0x001f4000) {
1312 /* There was a error. */
1313 if (netif_msg_rx_err(np))
1314 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1315 frame_status);
1316 np->stats.rx_errors++;
1317 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1318 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1319 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1320 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1321 if (frame_status & 0x00100000) {
1322 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1323 " status %8.8x.\n",
1324 dev->name, frame_status);
1325 }
1326 } else {
1327 struct sk_buff *skb;
1328#ifndef final_version
1329 if (netif_msg_rx_status(np))
1330 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1331 ", bogus_cnt %d.\n",
1332 pkt_len, boguscnt);
1333#endif
1334 /* Check if the packet is long enough to accept without copying
1335 to a minimally-sized skbuff. */
1336 if (pkt_len < rx_copybreak
1337 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1338 skb->dev = dev;
1339 skb_reserve(skb, 2); /* 16 byte align the IP header */
1340 pci_dma_sync_single_for_cpu(np->pci_dev,
1341 desc->frag[0].addr,
1342 np->rx_buf_sz,
1343 PCI_DMA_FROMDEVICE);
1344
David S. Miller689be432005-06-28 15:25:31 -07001345 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 pci_dma_sync_single_for_device(np->pci_dev,
1347 desc->frag[0].addr,
1348 np->rx_buf_sz,
1349 PCI_DMA_FROMDEVICE);
1350 skb_put(skb, pkt_len);
1351 } else {
1352 pci_unmap_single(np->pci_dev,
1353 desc->frag[0].addr,
1354 np->rx_buf_sz,
1355 PCI_DMA_FROMDEVICE);
1356 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1357 np->rx_skbuff[entry] = NULL;
1358 }
1359 skb->protocol = eth_type_trans(skb, dev);
1360 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1361 netif_rx(skb);
1362 dev->last_rx = jiffies;
1363 }
1364 entry = (entry + 1) % RX_RING_SIZE;
1365 received++;
1366 }
1367 np->cur_rx = entry;
1368 refill_rx (dev);
1369 np->budget -= received;
1370 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1371 return;
1372
1373not_done:
1374 np->cur_rx = entry;
1375 refill_rx (dev);
1376 if (!received)
1377 received = 1;
1378 np->budget -= received;
1379 if (np->budget <= 0)
1380 np->budget = RX_BUDGET;
1381 tasklet_schedule(&np->rx_tasklet);
1382 return;
1383}
1384
1385static void refill_rx (struct net_device *dev)
1386{
1387 struct netdev_private *np = netdev_priv(dev);
1388 int entry;
1389 int cnt = 0;
1390
1391 /* Refill the Rx ring buffers. */
1392 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1393 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1394 struct sk_buff *skb;
1395 entry = np->dirty_rx % RX_RING_SIZE;
1396 if (np->rx_skbuff[entry] == NULL) {
1397 skb = dev_alloc_skb(np->rx_buf_sz);
1398 np->rx_skbuff[entry] = skb;
1399 if (skb == NULL)
1400 break; /* Better luck next round. */
1401 skb->dev = dev; /* Mark as being used by this device. */
1402 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1403 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001404 pci_map_single(np->pci_dev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1406 }
1407 /* Perhaps we need not reset this field. */
1408 np->rx_ring[entry].frag[0].length =
1409 cpu_to_le32(np->rx_buf_sz | LastFrag);
1410 np->rx_ring[entry].status = 0;
1411 cnt++;
1412 }
1413 return;
1414}
1415static void netdev_error(struct net_device *dev, int intr_status)
1416{
1417 struct netdev_private *np = netdev_priv(dev);
1418 void __iomem *ioaddr = np->base;
1419 u16 mii_ctl, mii_advertise, mii_lpa;
1420 int speed;
1421
1422 if (intr_status & LinkChange) {
1423 if (np->an_enable) {
1424 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1425 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1426 mii_advertise &= mii_lpa;
1427 printk (KERN_INFO "%s: Link changed: ", dev->name);
1428 if (mii_advertise & ADVERTISE_100FULL) {
1429 np->speed = 100;
1430 printk ("100Mbps, full duplex\n");
1431 } else if (mii_advertise & ADVERTISE_100HALF) {
1432 np->speed = 100;
1433 printk ("100Mbps, half duplex\n");
1434 } else if (mii_advertise & ADVERTISE_10FULL) {
1435 np->speed = 10;
1436 printk ("10Mbps, full duplex\n");
1437 } else if (mii_advertise & ADVERTISE_10HALF) {
1438 np->speed = 10;
1439 printk ("10Mbps, half duplex\n");
1440 } else
1441 printk ("\n");
1442
1443 } else {
1444 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1445 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1446 np->speed = speed;
1447 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1448 dev->name, speed);
1449 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1450 "full" : "half");
1451 }
1452 check_duplex (dev);
1453 if (np->flowctrl && np->mii_if.full_duplex) {
1454 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1455 ioaddr + MulticastFilter1+2);
1456 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1457 ioaddr + MACCtrl0);
1458 }
1459 }
1460 if (intr_status & StatsMax) {
1461 get_stats(dev);
1462 }
1463 if (intr_status & IntrPCIErr) {
1464 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1465 dev->name, intr_status);
1466 /* We must do a global reset of DMA to continue. */
1467 }
1468}
1469
1470static struct net_device_stats *get_stats(struct net_device *dev)
1471{
1472 struct netdev_private *np = netdev_priv(dev);
1473 void __iomem *ioaddr = np->base;
1474 int i;
1475
1476 /* We should lock this segment of code for SMP eventually, although
1477 the vulnerability window is very small and statistics are
1478 non-critical. */
1479 /* The chip only need report frame silently dropped. */
1480 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1481 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1482 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1483 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1484 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1485 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1486 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1487 ioread8(ioaddr + StatsTxDefer);
1488 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1489 ioread8(ioaddr + i);
1490 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1491 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1492 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1493 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1494
1495 return &np->stats;
1496}
1497
1498static void set_rx_mode(struct net_device *dev)
1499{
1500 struct netdev_private *np = netdev_priv(dev);
1501 void __iomem *ioaddr = np->base;
1502 u16 mc_filter[4]; /* Multicast hash filter */
1503 u32 rx_mode;
1504 int i;
1505
1506 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1507 /* Unconditionally log net taps. */
1508 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1509 memset(mc_filter, 0xff, sizeof(mc_filter));
1510 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1511 } else if ((dev->mc_count > multicast_filter_limit)
1512 || (dev->flags & IFF_ALLMULTI)) {
1513 /* Too many to match, or accept all multicasts. */
1514 memset(mc_filter, 0xff, sizeof(mc_filter));
1515 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1516 } else if (dev->mc_count) {
1517 struct dev_mc_list *mclist;
1518 int bit;
1519 int index;
1520 int crc;
1521 memset (mc_filter, 0, sizeof (mc_filter));
1522 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1523 i++, mclist = mclist->next) {
1524 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1525 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1526 if (crc & 0x80000000) index |= 1 << bit;
1527 mc_filter[index/16] |= (1 << (index % 16));
1528 }
1529 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1530 } else {
1531 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1532 return;
1533 }
1534 if (np->mii_if.full_duplex && np->flowctrl)
1535 mc_filter[3] |= 0x0200;
1536
1537 for (i = 0; i < 4; i++)
1538 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1539 iowrite8(rx_mode, ioaddr + RxMode);
1540}
1541
1542static int __set_mac_addr(struct net_device *dev)
1543{
1544 struct netdev_private *np = netdev_priv(dev);
1545 u16 addr16;
1546
1547 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1548 iowrite16(addr16, np->base + StationAddr);
1549 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1550 iowrite16(addr16, np->base + StationAddr+2);
1551 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1552 iowrite16(addr16, np->base + StationAddr+4);
1553 return 0;
1554}
1555
1556static int check_if_running(struct net_device *dev)
1557{
1558 if (!netif_running(dev))
1559 return -EINVAL;
1560 return 0;
1561}
1562
1563static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1564{
1565 struct netdev_private *np = netdev_priv(dev);
1566 strcpy(info->driver, DRV_NAME);
1567 strcpy(info->version, DRV_VERSION);
1568 strcpy(info->bus_info, pci_name(np->pci_dev));
1569}
1570
1571static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1572{
1573 struct netdev_private *np = netdev_priv(dev);
1574 spin_lock_irq(&np->lock);
1575 mii_ethtool_gset(&np->mii_if, ecmd);
1576 spin_unlock_irq(&np->lock);
1577 return 0;
1578}
1579
1580static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1581{
1582 struct netdev_private *np = netdev_priv(dev);
1583 int res;
1584 spin_lock_irq(&np->lock);
1585 res = mii_ethtool_sset(&np->mii_if, ecmd);
1586 spin_unlock_irq(&np->lock);
1587 return res;
1588}
1589
1590static int nway_reset(struct net_device *dev)
1591{
1592 struct netdev_private *np = netdev_priv(dev);
1593 return mii_nway_restart(&np->mii_if);
1594}
1595
1596static u32 get_link(struct net_device *dev)
1597{
1598 struct netdev_private *np = netdev_priv(dev);
1599 return mii_link_ok(&np->mii_if);
1600}
1601
1602static u32 get_msglevel(struct net_device *dev)
1603{
1604 struct netdev_private *np = netdev_priv(dev);
1605 return np->msg_enable;
1606}
1607
1608static void set_msglevel(struct net_device *dev, u32 val)
1609{
1610 struct netdev_private *np = netdev_priv(dev);
1611 np->msg_enable = val;
1612}
1613
1614static struct ethtool_ops ethtool_ops = {
1615 .begin = check_if_running,
1616 .get_drvinfo = get_drvinfo,
1617 .get_settings = get_settings,
1618 .set_settings = set_settings,
1619 .nway_reset = nway_reset,
1620 .get_link = get_link,
1621 .get_msglevel = get_msglevel,
1622 .set_msglevel = set_msglevel,
John W. Linville30d60a82005-09-12 10:48:58 -04001623 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624};
1625
1626static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1627{
1628 struct netdev_private *np = netdev_priv(dev);
1629 void __iomem *ioaddr = np->base;
1630 int rc;
1631 int i;
1632
1633 if (!netif_running(dev))
1634 return -EINVAL;
1635
1636 spin_lock_irq(&np->lock);
1637 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1638 spin_unlock_irq(&np->lock);
1639 switch (cmd) {
1640 case SIOCDEVPRIVATE:
1641 for (i=0; i<TX_RING_SIZE; i++) {
1642 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1643 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1644 le32_to_cpu(np->tx_ring[i].next_desc),
1645 le32_to_cpu(np->tx_ring[i].status),
1646 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1647 & 0xff,
1648 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1649 le32_to_cpu(np->tx_ring[i].frag[0].length));
1650 }
1651 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1652 ioread32(np->base + TxListPtr),
1653 netif_queue_stopped(dev));
1654 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1655 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1656 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1657 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1658 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1659 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1660 return 0;
1661 }
1662
1663
1664 return rc;
1665}
1666
1667static int netdev_close(struct net_device *dev)
1668{
1669 struct netdev_private *np = netdev_priv(dev);
1670 void __iomem *ioaddr = np->base;
1671 struct sk_buff *skb;
1672 int i;
1673
1674 netif_stop_queue(dev);
1675
1676 if (netif_msg_ifdown(np)) {
1677 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1678 "Rx %4.4x Int %2.2x.\n",
1679 dev->name, ioread8(ioaddr + TxStatus),
1680 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1681 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1682 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1683 }
1684
1685 /* Disable interrupts by clearing the interrupt mask. */
1686 iowrite16(0x0000, ioaddr + IntrEnable);
1687
1688 /* Stop the chip's Tx and Rx processes. */
1689 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1690
1691 /* Wait and kill tasklet */
1692 tasklet_kill(&np->rx_tasklet);
1693 tasklet_kill(&np->tx_tasklet);
1694
1695#ifdef __i386__
1696 if (netif_msg_hw(np)) {
1697 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1698 (int)(np->tx_ring_dma));
1699 for (i = 0; i < TX_RING_SIZE; i++)
1700 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1701 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1702 np->tx_ring[i].frag[0].length);
1703 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1704 (int)(np->rx_ring_dma));
1705 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1706 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1707 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1708 np->rx_ring[i].frag[0].length);
1709 }
1710 }
1711#endif /* __i386__ debugging only */
1712
1713 free_irq(dev->irq, dev);
1714
1715 del_timer_sync(&np->timer);
1716
1717 /* Free all the skbuffs in the Rx queue. */
1718 for (i = 0; i < RX_RING_SIZE; i++) {
1719 np->rx_ring[i].status = 0;
1720 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1721 skb = np->rx_skbuff[i];
1722 if (skb) {
1723 pci_unmap_single(np->pci_dev,
1724 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1725 PCI_DMA_FROMDEVICE);
1726 dev_kfree_skb(skb);
1727 np->rx_skbuff[i] = NULL;
1728 }
1729 }
1730 for (i = 0; i < TX_RING_SIZE; i++) {
1731 skb = np->tx_skbuff[i];
1732 if (skb) {
1733 pci_unmap_single(np->pci_dev,
1734 np->tx_ring[i].frag[0].addr, skb->len,
1735 PCI_DMA_TODEVICE);
1736 dev_kfree_skb(skb);
1737 np->tx_skbuff[i] = NULL;
1738 }
1739 }
1740
1741 return 0;
1742}
1743
1744static void __devexit sundance_remove1 (struct pci_dev *pdev)
1745{
1746 struct net_device *dev = pci_get_drvdata(pdev);
1747
1748 if (dev) {
1749 struct netdev_private *np = netdev_priv(dev);
1750
1751 unregister_netdev(dev);
1752 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1753 np->rx_ring_dma);
1754 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1755 np->tx_ring_dma);
1756 pci_iounmap(pdev, np->base);
1757 pci_release_regions(pdev);
1758 free_netdev(dev);
1759 pci_set_drvdata(pdev, NULL);
1760 }
1761}
1762
1763static struct pci_driver sundance_driver = {
1764 .name = DRV_NAME,
1765 .id_table = sundance_pci_tbl,
1766 .probe = sundance_probe1,
1767 .remove = __devexit_p(sundance_remove1),
1768};
1769
1770static int __init sundance_init(void)
1771{
1772/* when a module, this is printed whether or not devices are found in probe */
1773#ifdef MODULE
1774 printk(version);
1775#endif
1776 return pci_module_init(&sundance_driver);
1777}
1778
1779static void __exit sundance_exit(void)
1780{
1781 pci_unregister_driver(&sundance_driver);
1782}
1783
1784module_init(sundance_init);
1785module_exit(sundance_exit);
1786
1787