blob: f13b2a195c708fe32d8c53d05988875a51bd52e1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19
20
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
23
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
27
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
31
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
35
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
44
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
51
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
56
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
64
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
68
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
71
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
74
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
82
Philippe De Muyterb71b95e2005-10-28 12:23:47 +020083 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
86
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
90
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
93
Philippe De Muyterb71b95e2005-10-28 12:23:47 +020094 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
96
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +020097 Version LK1.11 (Pedro Alejandro Lopez-Valencia palopezv at gmail.com):
98 - Add support for IC Plus Corporation IP100A chipset
Linus Torvalds1da177e2005-04-16 15:20:36 -070099*/
100
101#define DRV_NAME "sundance"
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +0200102#define DRV_VERSION "1.01+LK1.11"
103#define DRV_RELDATE "14-Jun-2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105
106/* The user-configurable values.
107 These may be modified when a driver module is loaded.*/
108static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
109/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
110 Typical is a 64 element hash table based on the Ethernet CRC. */
Arjan van de Venf71e1302006-03-03 21:33:57 -0500111static const int multicast_filter_limit = 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
114 Setting to > 1518 effectively disables this feature.
115 This chip can receive into offset buffers, so the Alpha does not
116 need a copy-align. */
117static int rx_copybreak;
118static int flowctrl=1;
119
120/* media[] specifies the media type the NIC operates at.
121 autosense Autosensing active media.
122 10mbps_hd 10Mbps half duplex.
123 10mbps_fd 10Mbps full duplex.
124 100mbps_hd 100Mbps half duplex.
125 100mbps_fd 100Mbps full duplex.
126 0 Autosensing active media.
127 1 10Mbps half duplex.
128 2 10Mbps full duplex.
129 3 100Mbps half duplex.
130 4 100Mbps full duplex.
131*/
132#define MAX_UNITS 8
133static char *media[MAX_UNITS];
134
135
136/* Operational parameters that are set at compile time. */
137
138/* Keep the ring sizes a power of two for compile efficiency.
139 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
140 Making the Tx ring too large decreases the effectiveness of channel
141 bonding and packet priority, and more than 128 requires modifying the
142 Tx error recovery.
143 Large receive rings merely waste memory. */
144#define TX_RING_SIZE 32
145#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
146#define RX_RING_SIZE 64
147#define RX_BUDGET 32
148#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
149#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
150
151/* Operational parameters that usually are not changed. */
152/* Time in jiffies before concluding the transmitter is hung. */
153#define TX_TIMEOUT (4*HZ)
154#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
155
156/* Include files, designed to support most kernel versions 2.0.0 and later. */
157#include <linux/module.h>
158#include <linux/kernel.h>
159#include <linux/string.h>
160#include <linux/timer.h>
161#include <linux/errno.h>
162#include <linux/ioport.h>
163#include <linux/slab.h>
164#include <linux/interrupt.h>
165#include <linux/pci.h>
166#include <linux/netdevice.h>
167#include <linux/etherdevice.h>
168#include <linux/skbuff.h>
169#include <linux/init.h>
170#include <linux/bitops.h>
171#include <asm/uaccess.h>
172#include <asm/processor.h> /* Processor type for cache alignment. */
173#include <asm/io.h>
174#include <linux/delay.h>
175#include <linux/spinlock.h>
176#ifndef _COMPAT_WITH_OLD_KERNEL
177#include <linux/crc32.h>
178#include <linux/ethtool.h>
179#include <linux/mii.h>
180#else
181#include "crc32.h"
182#include "ethtool.h"
183#include "mii.h"
184#include "compat.h"
185#endif
186
187/* These identify the driver base version and may not be removed. */
188static char version[] __devinitdata =
189KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
190KERN_INFO " http://www.scyld.com/network/sundance.html\n";
191
192MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
193MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
194MODULE_LICENSE("GPL");
195
196module_param(debug, int, 0);
197module_param(rx_copybreak, int, 0);
198module_param_array(media, charp, NULL, 0);
199module_param(flowctrl, int, 0);
200MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
201MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
202MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
203
204/*
205 Theory of Operation
206
207I. Board Compatibility
208
209This driver is designed for the Sundance Technologies "Alta" ST201 chip.
210
211II. Board-specific settings
212
213III. Driver operation
214
215IIIa. Ring buffers
216
217This driver uses two statically allocated fixed-size descriptor lists
218formed into rings by a branch from the final descriptor to the beginning of
219the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
220Some chips explicitly use only 2^N sized rings, while others use a
221'next descriptor' pointer that the driver forms into rings.
222
223IIIb/c. Transmit/Receive Structure
224
225This driver uses a zero-copy receive and transmit scheme.
226The driver allocates full frame size skbuffs for the Rx ring buffers at
227open() time and passes the skb->data field to the chip as receive data
228buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
229a fresh skbuff is allocated and the frame is copied to the new skbuff.
230When the incoming frame is larger, the skbuff is passed directly up the
231protocol stack. Buffers consumed this way are replaced by newly allocated
232skbuffs in a later phase of receives.
233
234The RX_COPYBREAK value is chosen to trade-off the memory wasted by
235using a full-sized skbuff for small frames vs. the copying costs of larger
236frames. New boards are typically used in generously configured machines
237and the underfilled buffers have negligible impact compared to the benefit of
238a single allocation size, so the default value of zero results in never
239copying packets. When copying is done, the cost is usually mitigated by using
240a combined copy/checksum routine. Copying also preloads the cache, which is
241most useful with small frames.
242
243A subtle aspect of the operation is that the IP header at offset 14 in an
244ethernet frame isn't longword aligned for further processing.
245Unaligned buffers are permitted by the Sundance hardware, so
246frames are received into the skbuff at an offset of "+2", 16-byte aligning
247the IP header.
248
249IIId. Synchronization
250
251The driver runs as two independent, single-threaded flows of control. One
252is the send-packet routine, which enforces single-threaded use by the
253dev->tbusy flag. The other thread is the interrupt handler, which is single
254threaded by the hardware and interrupt handling software.
255
256The send packet thread has partial control over the Tx ring and 'dev->tbusy'
257flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
258queue slot is empty, it clears the tbusy flag when finished otherwise it sets
259the 'lp->tx_full' flag.
260
261The interrupt handler has exclusive control over the Rx ring and records stats
262from the Tx ring. After reaping the stats, it marks the Tx queue entry as
263empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
264clears both the tx_full and tbusy flags.
265
266IV. Notes
267
268IVb. References
269
270The Sundance ST201 datasheet, preliminary version.
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200271The Kendin KS8723 datasheet, preliminary version.
272The ICplus IP100 datasheet, preliminary version.
273http://www.scyld.com/expert/100mbps.html
274http://www.scyld.com/expert/NWay.html
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276IVc. Errata
277
278*/
279
280/* Work-around for Kendin chip bugs. */
281#ifndef CONFIG_SUNDANCE_MMIO
282#define USE_IO_OPS 1
283#endif
284
285static struct pci_device_id sundance_pci_tbl[] = {
286 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
287 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
288 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
289 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
290 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
291 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +0200292 {0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 {0,}
294};
295MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
296
297enum {
298 netdev_io_size = 128
299};
300
301struct pci_id_info {
302 const char *name;
303};
Arjan van de Venf71e1302006-03-03 21:33:57 -0500304static const struct pci_id_info pci_id_tbl[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 {"D-Link DFE-550TX FAST Ethernet Adapter"},
306 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
307 {"D-Link DFE-580TX 4 port Server Adapter"},
308 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
309 {"D-Link DL10050-based FAST Ethernet Adapter"},
310 {"Sundance Technology Alta"},
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +0200311 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 {NULL,}, /* 0 terminated list. */
313};
314
315/* This driver was written to use PCI memory space, however x86-oriented
316 hardware often uses I/O space accesses. */
317
318/* Offsets to the device registers.
319 Unlike software-only systems, device drivers interact with complex hardware.
320 It's not useful to define symbolic names for every register bit in the
321 device. The name can only partially document the semantics and make
322 the driver longer and more difficult to read.
323 In general, only the important configuration values or bits changed
324 multiple times should be defined symbolically.
325*/
326enum alta_offsets {
327 DMACtrl = 0x00,
328 TxListPtr = 0x04,
329 TxDMABurstThresh = 0x08,
330 TxDMAUrgentThresh = 0x09,
331 TxDMAPollPeriod = 0x0a,
332 RxDMAStatus = 0x0c,
333 RxListPtr = 0x10,
334 DebugCtrl0 = 0x1a,
335 DebugCtrl1 = 0x1c,
336 RxDMABurstThresh = 0x14,
337 RxDMAUrgentThresh = 0x15,
338 RxDMAPollPeriod = 0x16,
339 LEDCtrl = 0x1a,
340 ASICCtrl = 0x30,
341 EEData = 0x34,
342 EECtrl = 0x36,
343 TxStartThresh = 0x3c,
344 RxEarlyThresh = 0x3e,
345 FlashAddr = 0x40,
346 FlashData = 0x44,
347 TxStatus = 0x46,
348 TxFrameId = 0x47,
349 DownCounter = 0x18,
350 IntrClear = 0x4a,
351 IntrEnable = 0x4c,
352 IntrStatus = 0x4e,
353 MACCtrl0 = 0x50,
354 MACCtrl1 = 0x52,
355 StationAddr = 0x54,
356 MaxFrameSize = 0x5A,
357 RxMode = 0x5c,
358 MIICtrl = 0x5e,
359 MulticastFilter0 = 0x60,
360 MulticastFilter1 = 0x64,
361 RxOctetsLow = 0x68,
362 RxOctetsHigh = 0x6a,
363 TxOctetsLow = 0x6c,
364 TxOctetsHigh = 0x6e,
365 TxFramesOK = 0x70,
366 RxFramesOK = 0x72,
367 StatsCarrierError = 0x74,
368 StatsLateColl = 0x75,
369 StatsMultiColl = 0x76,
370 StatsOneColl = 0x77,
371 StatsTxDefer = 0x78,
372 RxMissed = 0x79,
373 StatsTxXSDefer = 0x7a,
374 StatsTxAbort = 0x7b,
375 StatsBcastTx = 0x7c,
376 StatsBcastRx = 0x7d,
377 StatsMcastTx = 0x7e,
378 StatsMcastRx = 0x7f,
379 /* Aliased and bogus values! */
380 RxStatus = 0x0c,
381};
382enum ASICCtrl_HiWord_bit {
383 GlobalReset = 0x0001,
384 RxReset = 0x0002,
385 TxReset = 0x0004,
386 DMAReset = 0x0008,
387 FIFOReset = 0x0010,
388 NetworkReset = 0x0020,
389 HostReset = 0x0040,
390 ResetBusy = 0x0400,
391};
392
393/* Bits in the interrupt status/mask registers. */
394enum intr_status_bits {
395 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
396 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
397 IntrDrvRqst=0x0040,
398 StatsMax=0x0080, LinkChange=0x0100,
399 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
400};
401
402/* Bits in the RxMode register. */
403enum rx_mode_bits {
404 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
405 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
406};
407/* Bits in MACCtrl. */
408enum mac_ctrl0_bits {
409 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
410 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
411};
412enum mac_ctrl1_bits {
413 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
414 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
415 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
416};
417
418/* The Rx and Tx buffer descriptors. */
419/* Note that using only 32 bit fields simplifies conversion to big-endian
420 architectures. */
421struct netdev_desc {
422 u32 next_desc;
423 u32 status;
424 struct desc_frag { u32 addr, length; } frag[1];
425};
426
427/* Bits in netdev_desc.status */
428enum desc_status_bits {
429 DescOwn=0x8000,
430 DescEndPacket=0x4000,
431 DescEndRing=0x2000,
432 LastFrag=0x80000000,
433 DescIntrOnTx=0x8000,
434 DescIntrOnDMADone=0x80000000,
435 DisableAlign = 0x00000001,
436};
437
438#define PRIV_ALIGN 15 /* Required alignment mask */
439/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
440 within the structure. */
441#define MII_CNT 4
442struct netdev_private {
443 /* Descriptor rings first for alignment. */
444 struct netdev_desc *rx_ring;
445 struct netdev_desc *tx_ring;
446 struct sk_buff* rx_skbuff[RX_RING_SIZE];
447 struct sk_buff* tx_skbuff[TX_RING_SIZE];
448 dma_addr_t tx_ring_dma;
449 dma_addr_t rx_ring_dma;
450 struct net_device_stats stats;
451 struct timer_list timer; /* Media monitoring timer. */
452 /* Frequently used values: keep some adjacent for cache effect. */
453 spinlock_t lock;
454 spinlock_t rx_lock; /* Group with Tx control cache line. */
455 int msg_enable;
456 int chip_id;
457 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
458 unsigned int rx_buf_sz; /* Based on MTU+slack. */
459 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
460 unsigned int cur_tx, dirty_tx;
461 /* These values are keep track of the transceiver/media in use. */
462 unsigned int flowctrl:1;
463 unsigned int default_port:4; /* Last dev->if_port value. */
464 unsigned int an_enable:1;
465 unsigned int speed;
466 struct tasklet_struct rx_tasklet;
467 struct tasklet_struct tx_tasklet;
468 int budget;
469 int cur_task;
470 /* Multicast and receive mode. */
471 spinlock_t mcastlock; /* SMP lock multicast updates. */
472 u16 mcast_filter[4];
473 /* MII transceiver section. */
474 struct mii_if_info mii_if;
475 int mii_preamble_required;
476 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
477 struct pci_dev *pci_dev;
478 void __iomem *base;
479 unsigned char pci_rev_id;
480};
481
482/* The station address location in the EEPROM. */
483#define EEPROM_SA_OFFSET 0x10
484#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
485 IntrDrvRqst | IntrTxDone | StatsMax | \
486 LinkChange)
487
488static int change_mtu(struct net_device *dev, int new_mtu);
489static int eeprom_read(void __iomem *ioaddr, int location);
490static int mdio_read(struct net_device *dev, int phy_id, int location);
491static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
492static int netdev_open(struct net_device *dev);
493static void check_duplex(struct net_device *dev);
494static void netdev_timer(unsigned long data);
495static void tx_timeout(struct net_device *dev);
496static void init_ring(struct net_device *dev);
497static int start_tx(struct sk_buff *skb, struct net_device *dev);
498static int reset_tx (struct net_device *dev);
499static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
500static void rx_poll(unsigned long data);
501static void tx_poll(unsigned long data);
502static void refill_rx (struct net_device *dev);
503static void netdev_error(struct net_device *dev, int intr_status);
504static void netdev_error(struct net_device *dev, int intr_status);
505static void set_rx_mode(struct net_device *dev);
506static int __set_mac_addr(struct net_device *dev);
507static struct net_device_stats *get_stats(struct net_device *dev);
508static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509static int netdev_close(struct net_device *dev);
510static struct ethtool_ops ethtool_ops;
511
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200512static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
513{
514 struct netdev_private *np = netdev_priv(dev);
515 void __iomem *ioaddr = np->base + ASICCtrl;
516 int countdown;
517
518 /* ST201 documentation states ASICCtrl is a 32bit register */
519 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
520 /* ST201 documentation states reset can take up to 1 ms */
521 countdown = 10 + 1;
522 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
523 if (--countdown == 0) {
524 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
525 break;
526 }
527 udelay(100);
528 }
529}
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531static int __devinit sundance_probe1 (struct pci_dev *pdev,
532 const struct pci_device_id *ent)
533{
534 struct net_device *dev;
535 struct netdev_private *np;
536 static int card_idx;
537 int chip_idx = ent->driver_data;
538 int irq;
539 int i;
540 void __iomem *ioaddr;
541 u16 mii_ctl;
542 void *ring_space;
543 dma_addr_t ring_dma;
544#ifdef USE_IO_OPS
545 int bar = 0;
546#else
547 int bar = 1;
548#endif
John W. Linville67ec2f82005-10-18 21:31:01 -0400549 int phy, phy_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
551
552/* when built into the kernel, we only print version if device is found */
553#ifndef MODULE
554 static int printed_version;
555 if (!printed_version++)
556 printk(version);
557#endif
558
559 if (pci_enable_device(pdev))
560 return -EIO;
561 pci_set_master(pdev);
562
563 irq = pdev->irq;
564
565 dev = alloc_etherdev(sizeof(*np));
566 if (!dev)
567 return -ENOMEM;
568 SET_MODULE_OWNER(dev);
569 SET_NETDEV_DEV(dev, &pdev->dev);
570
571 if (pci_request_regions(pdev, DRV_NAME))
572 goto err_out_netdev;
573
574 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
575 if (!ioaddr)
576 goto err_out_res;
577
578 for (i = 0; i < 3; i++)
579 ((u16 *)dev->dev_addr)[i] =
580 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
John W. Linville30d60a82005-09-12 10:48:58 -0400581 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 dev->base_addr = (unsigned long)ioaddr;
584 dev->irq = irq;
585
586 np = netdev_priv(dev);
587 np->base = ioaddr;
588 np->pci_dev = pdev;
589 np->chip_id = chip_idx;
590 np->msg_enable = (1 << debug) - 1;
591 spin_lock_init(&np->lock);
592 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
593 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
594
595 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
596 if (!ring_space)
597 goto err_out_cleardev;
598 np->tx_ring = (struct netdev_desc *)ring_space;
599 np->tx_ring_dma = ring_dma;
600
601 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
602 if (!ring_space)
603 goto err_out_unmap_tx;
604 np->rx_ring = (struct netdev_desc *)ring_space;
605 np->rx_ring_dma = ring_dma;
606
607 np->mii_if.dev = dev;
608 np->mii_if.mdio_read = mdio_read;
609 np->mii_if.mdio_write = mdio_write;
610 np->mii_if.phy_id_mask = 0x1f;
611 np->mii_if.reg_num_mask = 0x1f;
612
613 /* The chip-specific entries in the device structure. */
614 dev->open = &netdev_open;
615 dev->hard_start_xmit = &start_tx;
616 dev->stop = &netdev_close;
617 dev->get_stats = &get_stats;
618 dev->set_multicast_list = &set_rx_mode;
619 dev->do_ioctl = &netdev_ioctl;
620 SET_ETHTOOL_OPS(dev, &ethtool_ops);
621 dev->tx_timeout = &tx_timeout;
622 dev->watchdog_timeo = TX_TIMEOUT;
623 dev->change_mtu = &change_mtu;
624 pci_set_drvdata(pdev, dev);
625
626 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
627
628 i = register_netdev(dev);
629 if (i)
630 goto err_out_unmap_rx;
631
632 printk(KERN_INFO "%s: %s at %p, ",
633 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
634 for (i = 0; i < 5; i++)
635 printk("%2.2x:", dev->dev_addr[i]);
636 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
637
John W. Linville67ec2f82005-10-18 21:31:01 -0400638 np->phys[0] = 1; /* Default setting */
639 np->mii_preamble_required++;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500640 /*
641 * It seems some phys doesn't deal well with address 0 being accessed
642 * first, so leave address zero to the end of the loop (32 & 31).
643 */
John W. Linvilleb06c0932005-10-19 08:07:34 -0400644 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400645 int phyx = phy & 0x1f;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500646 int mii_status = mdio_read(dev, phyx, MII_BMSR);
John W. Linville67ec2f82005-10-18 21:31:01 -0400647 if (mii_status != 0xffff && mii_status != 0x0000) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400648 np->phys[phy_idx++] = phyx;
649 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
John W. Linville67ec2f82005-10-18 21:31:01 -0400650 if ((mii_status & 0x0040) == 0)
651 np->mii_preamble_required++;
652 printk(KERN_INFO "%s: MII PHY found at address %d, status "
653 "0x%4.4x advertising %4.4x.\n",
John W. Linvilleb06c0932005-10-19 08:07:34 -0400654 dev->name, phyx, mii_status, np->mii_if.advertising);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
John W. Linville67ec2f82005-10-18 21:31:01 -0400657 np->mii_preamble_required--;
658
659 if (phy_idx == 0) {
660 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
661 dev->name, ioread32(ioaddr + ASICCtrl));
662 goto err_out_unregister;
663 }
664
665 np->mii_if.phy_id = np->phys[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667 /* Parse override configuration */
668 np->an_enable = 1;
669 if (card_idx < MAX_UNITS) {
670 if (media[card_idx] != NULL) {
671 np->an_enable = 0;
672 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
673 strcmp (media[card_idx], "4") == 0) {
674 np->speed = 100;
675 np->mii_if.full_duplex = 1;
676 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
677 || strcmp (media[card_idx], "3") == 0) {
678 np->speed = 100;
679 np->mii_if.full_duplex = 0;
680 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
681 strcmp (media[card_idx], "2") == 0) {
682 np->speed = 10;
683 np->mii_if.full_duplex = 1;
684 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
685 strcmp (media[card_idx], "1") == 0) {
686 np->speed = 10;
687 np->mii_if.full_duplex = 0;
688 } else {
689 np->an_enable = 1;
690 }
691 }
692 if (flowctrl == 1)
693 np->flowctrl = 1;
694 }
695
696 /* Fibre PHY? */
697 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
698 /* Default 100Mbps Full */
699 if (np->an_enable) {
700 np->speed = 100;
701 np->mii_if.full_duplex = 1;
702 np->an_enable = 0;
703 }
704 }
705 /* Reset PHY */
706 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
707 mdelay (300);
708 /* If flow control enabled, we need to advertise it.*/
709 if (np->flowctrl)
710 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
711 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
712 /* Force media type */
713 if (!np->an_enable) {
714 mii_ctl = 0;
715 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
716 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
717 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
718 printk (KERN_INFO "Override speed=%d, %s duplex\n",
719 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
720
721 }
722
723 /* Perhaps move the reset here? */
724 /* Reset the chip to erase previous misconfiguration. */
725 if (netif_msg_hw(np))
726 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
John W. Linville2aa1d822005-10-18 21:31:02 -0400727 iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 if (netif_msg_hw(np))
729 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
730
731 card_idx++;
732 return 0;
733
734err_out_unregister:
735 unregister_netdev(dev);
736err_out_unmap_rx:
737 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
738err_out_unmap_tx:
739 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
740err_out_cleardev:
741 pci_set_drvdata(pdev, NULL);
742 pci_iounmap(pdev, ioaddr);
743err_out_res:
744 pci_release_regions(pdev);
745err_out_netdev:
746 free_netdev (dev);
747 return -ENODEV;
748}
749
750static int change_mtu(struct net_device *dev, int new_mtu)
751{
752 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
753 return -EINVAL;
754 if (netif_running(dev))
755 return -EBUSY;
756 dev->mtu = new_mtu;
757 return 0;
758}
759
760#define eeprom_delay(ee_addr) ioread32(ee_addr)
761/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
762static int __devinit eeprom_read(void __iomem *ioaddr, int location)
763{
764 int boguscnt = 10000; /* Typical 1900 ticks. */
765 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
766 do {
767 eeprom_delay(ioaddr + EECtrl);
768 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
769 return ioread16(ioaddr + EEData);
770 }
771 } while (--boguscnt > 0);
772 return 0;
773}
774
775/* MII transceiver control section.
776 Read and write the MII registers using software-generated serial
777 MDIO protocol. See the MII specifications or DP83840A data sheet
778 for details.
779
780 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
781 met by back-to-back 33Mhz PCI cycles. */
782#define mdio_delay() ioread8(mdio_addr)
783
784enum mii_reg_bits {
785 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
786};
787#define MDIO_EnbIn (0)
788#define MDIO_WRITE0 (MDIO_EnbOutput)
789#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
790
791/* Generate the preamble required for initial synchronization and
792 a few older transceivers. */
793static void mdio_sync(void __iomem *mdio_addr)
794{
795 int bits = 32;
796
797 /* Establish sync by sending at least 32 logic ones. */
798 while (--bits >= 0) {
799 iowrite8(MDIO_WRITE1, mdio_addr);
800 mdio_delay();
801 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
802 mdio_delay();
803 }
804}
805
806static int mdio_read(struct net_device *dev, int phy_id, int location)
807{
808 struct netdev_private *np = netdev_priv(dev);
809 void __iomem *mdio_addr = np->base + MIICtrl;
810 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
811 int i, retval = 0;
812
813 if (np->mii_preamble_required)
814 mdio_sync(mdio_addr);
815
816 /* Shift the read command bits out. */
817 for (i = 15; i >= 0; i--) {
818 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
819
820 iowrite8(dataval, mdio_addr);
821 mdio_delay();
822 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
823 mdio_delay();
824 }
825 /* Read the two transition, 16 data, and wire-idle bits. */
826 for (i = 19; i > 0; i--) {
827 iowrite8(MDIO_EnbIn, mdio_addr);
828 mdio_delay();
829 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
830 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
831 mdio_delay();
832 }
833 return (retval>>1) & 0xffff;
834}
835
836static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
837{
838 struct netdev_private *np = netdev_priv(dev);
839 void __iomem *mdio_addr = np->base + MIICtrl;
840 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
841 int i;
842
843 if (np->mii_preamble_required)
844 mdio_sync(mdio_addr);
845
846 /* Shift the command bits out. */
847 for (i = 31; i >= 0; i--) {
848 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
849
850 iowrite8(dataval, mdio_addr);
851 mdio_delay();
852 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
853 mdio_delay();
854 }
855 /* Clear out extra bits. */
856 for (i = 2; i > 0; i--) {
857 iowrite8(MDIO_EnbIn, mdio_addr);
858 mdio_delay();
859 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
860 mdio_delay();
861 }
862 return;
863}
864
865static int netdev_open(struct net_device *dev)
866{
867 struct netdev_private *np = netdev_priv(dev);
868 void __iomem *ioaddr = np->base;
869 int i;
870
871 /* Do we need to reset the chip??? */
872
873 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
874 if (i)
875 return i;
876
877 if (netif_msg_ifup(np))
878 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
879 dev->name, dev->irq);
880 init_ring(dev);
881
882 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
883 /* The Tx list pointer is written as packets are queued. */
884
885 /* Initialize other registers. */
886 __set_mac_addr(dev);
887#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
888 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
889#else
890 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
891#endif
892 if (dev->mtu > 2047)
893 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
894
895 /* Configure the PCI bus bursts and FIFO thresholds. */
896
897 if (dev->if_port == 0)
898 dev->if_port = np->default_port;
899
900 spin_lock_init(&np->mcastlock);
901
902 set_rx_mode(dev);
903 iowrite16(0, ioaddr + IntrEnable);
904 iowrite16(0, ioaddr + DownCounter);
905 /* Set the chip to poll every N*320nsec. */
906 iowrite8(100, ioaddr + RxDMAPollPeriod);
907 iowrite8(127, ioaddr + TxDMAPollPeriod);
908 /* Fix DFE-580TX packet drop issue */
909 if (np->pci_rev_id >= 0x14)
910 iowrite8(0x01, ioaddr + DebugCtrl1);
911 netif_start_queue(dev);
912
913 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
914
915 if (netif_msg_ifup(np))
916 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
917 "MAC Control %x, %4.4x %4.4x.\n",
918 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
919 ioread32(ioaddr + MACCtrl0),
920 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
921
922 /* Set the timer to check for link beat. */
923 init_timer(&np->timer);
924 np->timer.expires = jiffies + 3*HZ;
925 np->timer.data = (unsigned long)dev;
926 np->timer.function = &netdev_timer; /* timer handler */
927 add_timer(&np->timer);
928
929 /* Enable interrupts by setting the interrupt mask. */
930 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
931
932 return 0;
933}
934
935static void check_duplex(struct net_device *dev)
936{
937 struct netdev_private *np = netdev_priv(dev);
938 void __iomem *ioaddr = np->base;
939 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
940 int negotiated = mii_lpa & np->mii_if.advertising;
941 int duplex;
942
943 /* Force media */
944 if (!np->an_enable || mii_lpa == 0xffff) {
945 if (np->mii_if.full_duplex)
946 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
947 ioaddr + MACCtrl0);
948 return;
949 }
950
951 /* Autonegotiation */
952 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
953 if (np->mii_if.full_duplex != duplex) {
954 np->mii_if.full_duplex = duplex;
955 if (netif_msg_link(np))
956 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
957 "negotiated capability %4.4x.\n", dev->name,
958 duplex ? "full" : "half", np->phys[0], negotiated);
959 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
960 }
961}
962
963static void netdev_timer(unsigned long data)
964{
965 struct net_device *dev = (struct net_device *)data;
966 struct netdev_private *np = netdev_priv(dev);
967 void __iomem *ioaddr = np->base;
968 int next_tick = 10*HZ;
969
970 if (netif_msg_timer(np)) {
971 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
972 "Tx %x Rx %x.\n",
973 dev->name, ioread16(ioaddr + IntrEnable),
974 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
975 }
976 check_duplex(dev);
977 np->timer.expires = jiffies + next_tick;
978 add_timer(&np->timer);
979}
980
981static void tx_timeout(struct net_device *dev)
982{
983 struct netdev_private *np = netdev_priv(dev);
984 void __iomem *ioaddr = np->base;
985 unsigned long flag;
986
987 netif_stop_queue(dev);
988 tasklet_disable(&np->tx_tasklet);
989 iowrite16(0, ioaddr + IntrEnable);
990 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
991 "TxFrameId %2.2x,"
992 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
993 ioread8(ioaddr + TxFrameId));
994
995 {
996 int i;
997 for (i=0; i<TX_RING_SIZE; i++) {
998 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
999 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1000 le32_to_cpu(np->tx_ring[i].next_desc),
1001 le32_to_cpu(np->tx_ring[i].status),
1002 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
1003 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1004 le32_to_cpu(np->tx_ring[i].frag[0].length));
1005 }
1006 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1007 ioread32(np->base + TxListPtr),
1008 netif_queue_stopped(dev));
1009 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1010 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1011 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1012 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1013 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1014 }
1015 spin_lock_irqsave(&np->lock, flag);
1016
1017 /* Stop and restart the chip's Tx processes . */
1018 reset_tx(dev);
1019 spin_unlock_irqrestore(&np->lock, flag);
1020
1021 dev->if_port = 0;
1022
1023 dev->trans_start = jiffies;
1024 np->stats.tx_errors++;
1025 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1026 netif_wake_queue(dev);
1027 }
1028 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1029 tasklet_enable(&np->tx_tasklet);
1030}
1031
1032
1033/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1034static void init_ring(struct net_device *dev)
1035{
1036 struct netdev_private *np = netdev_priv(dev);
1037 int i;
1038
1039 np->cur_rx = np->cur_tx = 0;
1040 np->dirty_rx = np->dirty_tx = 0;
1041 np->cur_task = 0;
1042
1043 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1044
1045 /* Initialize all Rx descriptors. */
1046 for (i = 0; i < RX_RING_SIZE; i++) {
1047 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1048 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1049 np->rx_ring[i].status = 0;
1050 np->rx_ring[i].frag[0].length = 0;
1051 np->rx_skbuff[i] = NULL;
1052 }
1053
1054 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1055 for (i = 0; i < RX_RING_SIZE; i++) {
1056 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1057 np->rx_skbuff[i] = skb;
1058 if (skb == NULL)
1059 break;
1060 skb->dev = dev; /* Mark as being used by this device. */
1061 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1062 np->rx_ring[i].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001063 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 PCI_DMA_FROMDEVICE));
1065 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1066 }
1067 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1068
1069 for (i = 0; i < TX_RING_SIZE; i++) {
1070 np->tx_skbuff[i] = NULL;
1071 np->tx_ring[i].status = 0;
1072 }
1073 return;
1074}
1075
1076static void tx_poll (unsigned long data)
1077{
1078 struct net_device *dev = (struct net_device *)data;
1079 struct netdev_private *np = netdev_priv(dev);
1080 unsigned head = np->cur_task % TX_RING_SIZE;
1081 struct netdev_desc *txdesc =
1082 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1083
1084 /* Chain the next pointer */
1085 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1086 int entry = np->cur_task % TX_RING_SIZE;
1087 txdesc = &np->tx_ring[entry];
1088 if (np->last_tx) {
1089 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1090 entry*sizeof(struct netdev_desc));
1091 }
1092 np->last_tx = txdesc;
1093 }
1094 /* Indicate the latest descriptor of tx ring */
1095 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1096
1097 if (ioread32 (np->base + TxListPtr) == 0)
1098 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1099 np->base + TxListPtr);
1100 return;
1101}
1102
1103static int
1104start_tx (struct sk_buff *skb, struct net_device *dev)
1105{
1106 struct netdev_private *np = netdev_priv(dev);
1107 struct netdev_desc *txdesc;
1108 unsigned entry;
1109
1110 /* Calculate the next Tx descriptor entry. */
1111 entry = np->cur_tx % TX_RING_SIZE;
1112 np->tx_skbuff[entry] = skb;
1113 txdesc = &np->tx_ring[entry];
1114
1115 txdesc->next_desc = 0;
1116 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1117 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1118 skb->len,
1119 PCI_DMA_TODEVICE));
1120 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1121
1122 /* Increment cur_tx before tasklet_schedule() */
1123 np->cur_tx++;
1124 mb();
1125 /* Schedule a tx_poll() task */
1126 tasklet_schedule(&np->tx_tasklet);
1127
1128 /* On some architectures: explicitly flush cache lines here. */
1129 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1130 && !netif_queue_stopped(dev)) {
1131 /* do nothing */
1132 } else {
1133 netif_stop_queue (dev);
1134 }
1135 dev->trans_start = jiffies;
1136 if (netif_msg_tx_queued(np)) {
1137 printk (KERN_DEBUG
1138 "%s: Transmit frame #%d queued in slot %d.\n",
1139 dev->name, np->cur_tx, entry);
1140 }
1141 return 0;
1142}
1143
1144/* Reset hardware tx and free all of tx buffers */
1145static int
1146reset_tx (struct net_device *dev)
1147{
1148 struct netdev_private *np = netdev_priv(dev);
1149 void __iomem *ioaddr = np->base;
1150 struct sk_buff *skb;
1151 int i;
1152 int irq = in_interrupt();
1153
1154 /* Reset tx logic, TxListPtr will be cleaned */
1155 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1156 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1157 ioaddr + ASICCtrl + 2);
1158 for (i=50; i > 0; i--) {
1159 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1160 break;
1161 mdelay(1);
1162 }
1163 /* free all tx skbuff */
1164 for (i = 0; i < TX_RING_SIZE; i++) {
1165 skb = np->tx_skbuff[i];
1166 if (skb) {
1167 pci_unmap_single(np->pci_dev,
1168 np->tx_ring[i].frag[0].addr, skb->len,
1169 PCI_DMA_TODEVICE);
1170 if (irq)
1171 dev_kfree_skb_irq (skb);
1172 else
1173 dev_kfree_skb (skb);
1174 np->tx_skbuff[i] = NULL;
1175 np->stats.tx_dropped++;
1176 }
1177 }
1178 np->cur_tx = np->dirty_tx = 0;
1179 np->cur_task = 0;
1180 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1181 return 0;
1182}
1183
1184/* The interrupt handler cleans up after the Tx thread,
1185 and schedule a Rx thread work */
1186static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1187{
1188 struct net_device *dev = (struct net_device *)dev_instance;
1189 struct netdev_private *np = netdev_priv(dev);
1190 void __iomem *ioaddr = np->base;
1191 int hw_frame_id;
1192 int tx_cnt;
1193 int tx_status;
1194 int handled = 0;
1195
1196
1197 do {
1198 int intr_status = ioread16(ioaddr + IntrStatus);
1199 iowrite16(intr_status, ioaddr + IntrStatus);
1200
1201 if (netif_msg_intr(np))
1202 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1203 dev->name, intr_status);
1204
1205 if (!(intr_status & DEFAULT_INTR))
1206 break;
1207
1208 handled = 1;
1209
1210 if (intr_status & (IntrRxDMADone)) {
1211 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1212 ioaddr + IntrEnable);
1213 if (np->budget < 0)
1214 np->budget = RX_BUDGET;
1215 tasklet_schedule(&np->rx_tasklet);
1216 }
1217 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1218 tx_status = ioread16 (ioaddr + TxStatus);
1219 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1220 if (netif_msg_tx_done(np))
1221 printk
1222 ("%s: Transmit status is %2.2x.\n",
1223 dev->name, tx_status);
1224 if (tx_status & 0x1e) {
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001225 if (netif_msg_tx_err(np))
1226 printk("%s: Transmit error status %4.4x.\n",
1227 dev->name, tx_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 np->stats.tx_errors++;
1229 if (tx_status & 0x10)
1230 np->stats.tx_fifo_errors++;
1231 if (tx_status & 0x08)
1232 np->stats.collisions++;
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001233 if (tx_status & 0x04)
1234 np->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 if (tx_status & 0x02)
1236 np->stats.tx_window_errors++;
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001237 /*
1238 ** This reset has been verified on
1239 ** DFE-580TX boards ! phdm@macqel.be.
1240 */
1241 if (tx_status & 0x10) { /* TxUnderrun */
1242 unsigned short txthreshold;
1243
1244 txthreshold = ioread16 (ioaddr + TxStartThresh);
1245 /* Restart Tx FIFO and transmitter */
1246 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1247 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1248 /* No need to reset the Tx pointer here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 }
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001250 /* Restart the Tx. */
1251 iowrite16 (TxEnable, ioaddr + MACCtrl1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 }
1253 /* Yup, this is a documentation bug. It cost me *hours*. */
1254 iowrite16 (0, ioaddr + TxStatus);
1255 if (tx_cnt < 0) {
1256 iowrite32(5000, ioaddr + DownCounter);
1257 break;
1258 }
1259 tx_status = ioread16 (ioaddr + TxStatus);
1260 }
1261 hw_frame_id = (tx_status >> 8) & 0xff;
1262 } else {
1263 hw_frame_id = ioread8(ioaddr + TxFrameId);
1264 }
1265
1266 if (np->pci_rev_id >= 0x14) {
1267 spin_lock(&np->lock);
1268 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1269 int entry = np->dirty_tx % TX_RING_SIZE;
1270 struct sk_buff *skb;
1271 int sw_frame_id;
1272 sw_frame_id = (le32_to_cpu(
1273 np->tx_ring[entry].status) >> 2) & 0xff;
1274 if (sw_frame_id == hw_frame_id &&
1275 !(le32_to_cpu(np->tx_ring[entry].status)
1276 & 0x00010000))
1277 break;
1278 if (sw_frame_id == (hw_frame_id + 1) %
1279 TX_RING_SIZE)
1280 break;
1281 skb = np->tx_skbuff[entry];
1282 /* Free the original skb. */
1283 pci_unmap_single(np->pci_dev,
1284 np->tx_ring[entry].frag[0].addr,
1285 skb->len, PCI_DMA_TODEVICE);
1286 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1287 np->tx_skbuff[entry] = NULL;
1288 np->tx_ring[entry].frag[0].addr = 0;
1289 np->tx_ring[entry].frag[0].length = 0;
1290 }
1291 spin_unlock(&np->lock);
1292 } else {
1293 spin_lock(&np->lock);
1294 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1295 int entry = np->dirty_tx % TX_RING_SIZE;
1296 struct sk_buff *skb;
1297 if (!(le32_to_cpu(np->tx_ring[entry].status)
1298 & 0x00010000))
1299 break;
1300 skb = np->tx_skbuff[entry];
1301 /* Free the original skb. */
1302 pci_unmap_single(np->pci_dev,
1303 np->tx_ring[entry].frag[0].addr,
1304 skb->len, PCI_DMA_TODEVICE);
1305 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1306 np->tx_skbuff[entry] = NULL;
1307 np->tx_ring[entry].frag[0].addr = 0;
1308 np->tx_ring[entry].frag[0].length = 0;
1309 }
1310 spin_unlock(&np->lock);
1311 }
1312
1313 if (netif_queue_stopped(dev) &&
1314 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1315 /* The ring is no longer full, clear busy flag. */
1316 netif_wake_queue (dev);
1317 }
1318 /* Abnormal error summary/uncommon events handlers. */
1319 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1320 netdev_error(dev, intr_status);
1321 } while (0);
1322 if (netif_msg_intr(np))
1323 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1324 dev->name, ioread16(ioaddr + IntrStatus));
1325 return IRQ_RETVAL(handled);
1326}
1327
1328static void rx_poll(unsigned long data)
1329{
1330 struct net_device *dev = (struct net_device *)data;
1331 struct netdev_private *np = netdev_priv(dev);
1332 int entry = np->cur_rx % RX_RING_SIZE;
1333 int boguscnt = np->budget;
1334 void __iomem *ioaddr = np->base;
1335 int received = 0;
1336
1337 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1338 while (1) {
1339 struct netdev_desc *desc = &(np->rx_ring[entry]);
1340 u32 frame_status = le32_to_cpu(desc->status);
1341 int pkt_len;
1342
1343 if (--boguscnt < 0) {
1344 goto not_done;
1345 }
1346 if (!(frame_status & DescOwn))
1347 break;
1348 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1349 if (netif_msg_rx_status(np))
1350 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1351 frame_status);
1352 if (frame_status & 0x001f4000) {
1353 /* There was a error. */
1354 if (netif_msg_rx_err(np))
1355 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1356 frame_status);
1357 np->stats.rx_errors++;
1358 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1359 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1360 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1361 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1362 if (frame_status & 0x00100000) {
1363 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1364 " status %8.8x.\n",
1365 dev->name, frame_status);
1366 }
1367 } else {
1368 struct sk_buff *skb;
1369#ifndef final_version
1370 if (netif_msg_rx_status(np))
1371 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1372 ", bogus_cnt %d.\n",
1373 pkt_len, boguscnt);
1374#endif
1375 /* Check if the packet is long enough to accept without copying
1376 to a minimally-sized skbuff. */
1377 if (pkt_len < rx_copybreak
1378 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1379 skb->dev = dev;
1380 skb_reserve(skb, 2); /* 16 byte align the IP header */
1381 pci_dma_sync_single_for_cpu(np->pci_dev,
1382 desc->frag[0].addr,
1383 np->rx_buf_sz,
1384 PCI_DMA_FROMDEVICE);
1385
David S. Miller689be432005-06-28 15:25:31 -07001386 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 pci_dma_sync_single_for_device(np->pci_dev,
1388 desc->frag[0].addr,
1389 np->rx_buf_sz,
1390 PCI_DMA_FROMDEVICE);
1391 skb_put(skb, pkt_len);
1392 } else {
1393 pci_unmap_single(np->pci_dev,
1394 desc->frag[0].addr,
1395 np->rx_buf_sz,
1396 PCI_DMA_FROMDEVICE);
1397 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1398 np->rx_skbuff[entry] = NULL;
1399 }
1400 skb->protocol = eth_type_trans(skb, dev);
1401 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1402 netif_rx(skb);
1403 dev->last_rx = jiffies;
1404 }
1405 entry = (entry + 1) % RX_RING_SIZE;
1406 received++;
1407 }
1408 np->cur_rx = entry;
1409 refill_rx (dev);
1410 np->budget -= received;
1411 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1412 return;
1413
1414not_done:
1415 np->cur_rx = entry;
1416 refill_rx (dev);
1417 if (!received)
1418 received = 1;
1419 np->budget -= received;
1420 if (np->budget <= 0)
1421 np->budget = RX_BUDGET;
1422 tasklet_schedule(&np->rx_tasklet);
1423 return;
1424}
1425
1426static void refill_rx (struct net_device *dev)
1427{
1428 struct netdev_private *np = netdev_priv(dev);
1429 int entry;
1430 int cnt = 0;
1431
1432 /* Refill the Rx ring buffers. */
1433 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1434 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1435 struct sk_buff *skb;
1436 entry = np->dirty_rx % RX_RING_SIZE;
1437 if (np->rx_skbuff[entry] == NULL) {
1438 skb = dev_alloc_skb(np->rx_buf_sz);
1439 np->rx_skbuff[entry] = skb;
1440 if (skb == NULL)
1441 break; /* Better luck next round. */
1442 skb->dev = dev; /* Mark as being used by this device. */
1443 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1444 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001445 pci_map_single(np->pci_dev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1447 }
1448 /* Perhaps we need not reset this field. */
1449 np->rx_ring[entry].frag[0].length =
1450 cpu_to_le32(np->rx_buf_sz | LastFrag);
1451 np->rx_ring[entry].status = 0;
1452 cnt++;
1453 }
1454 return;
1455}
1456static void netdev_error(struct net_device *dev, int intr_status)
1457{
1458 struct netdev_private *np = netdev_priv(dev);
1459 void __iomem *ioaddr = np->base;
1460 u16 mii_ctl, mii_advertise, mii_lpa;
1461 int speed;
1462
1463 if (intr_status & LinkChange) {
1464 if (np->an_enable) {
1465 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1466 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1467 mii_advertise &= mii_lpa;
1468 printk (KERN_INFO "%s: Link changed: ", dev->name);
1469 if (mii_advertise & ADVERTISE_100FULL) {
1470 np->speed = 100;
1471 printk ("100Mbps, full duplex\n");
1472 } else if (mii_advertise & ADVERTISE_100HALF) {
1473 np->speed = 100;
1474 printk ("100Mbps, half duplex\n");
1475 } else if (mii_advertise & ADVERTISE_10FULL) {
1476 np->speed = 10;
1477 printk ("10Mbps, full duplex\n");
1478 } else if (mii_advertise & ADVERTISE_10HALF) {
1479 np->speed = 10;
1480 printk ("10Mbps, half duplex\n");
1481 } else
1482 printk ("\n");
1483
1484 } else {
1485 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1486 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1487 np->speed = speed;
1488 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1489 dev->name, speed);
1490 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1491 "full" : "half");
1492 }
1493 check_duplex (dev);
1494 if (np->flowctrl && np->mii_if.full_duplex) {
1495 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1496 ioaddr + MulticastFilter1+2);
1497 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1498 ioaddr + MACCtrl0);
1499 }
1500 }
1501 if (intr_status & StatsMax) {
1502 get_stats(dev);
1503 }
1504 if (intr_status & IntrPCIErr) {
1505 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1506 dev->name, intr_status);
1507 /* We must do a global reset of DMA to continue. */
1508 }
1509}
1510
1511static struct net_device_stats *get_stats(struct net_device *dev)
1512{
1513 struct netdev_private *np = netdev_priv(dev);
1514 void __iomem *ioaddr = np->base;
1515 int i;
1516
1517 /* We should lock this segment of code for SMP eventually, although
1518 the vulnerability window is very small and statistics are
1519 non-critical. */
1520 /* The chip only need report frame silently dropped. */
1521 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1522 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1523 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1524 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1525 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1526 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1527 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1528 ioread8(ioaddr + StatsTxDefer);
1529 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1530 ioread8(ioaddr + i);
1531 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1532 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1533 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1534 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1535
1536 return &np->stats;
1537}
1538
1539static void set_rx_mode(struct net_device *dev)
1540{
1541 struct netdev_private *np = netdev_priv(dev);
1542 void __iomem *ioaddr = np->base;
1543 u16 mc_filter[4]; /* Multicast hash filter */
1544 u32 rx_mode;
1545 int i;
1546
1547 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1548 /* Unconditionally log net taps. */
1549 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1550 memset(mc_filter, 0xff, sizeof(mc_filter));
1551 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1552 } else if ((dev->mc_count > multicast_filter_limit)
1553 || (dev->flags & IFF_ALLMULTI)) {
1554 /* Too many to match, or accept all multicasts. */
1555 memset(mc_filter, 0xff, sizeof(mc_filter));
1556 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1557 } else if (dev->mc_count) {
1558 struct dev_mc_list *mclist;
1559 int bit;
1560 int index;
1561 int crc;
1562 memset (mc_filter, 0, sizeof (mc_filter));
1563 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1564 i++, mclist = mclist->next) {
1565 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1566 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1567 if (crc & 0x80000000) index |= 1 << bit;
1568 mc_filter[index/16] |= (1 << (index % 16));
1569 }
1570 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1571 } else {
1572 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1573 return;
1574 }
1575 if (np->mii_if.full_duplex && np->flowctrl)
1576 mc_filter[3] |= 0x0200;
1577
1578 for (i = 0; i < 4; i++)
1579 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1580 iowrite8(rx_mode, ioaddr + RxMode);
1581}
1582
1583static int __set_mac_addr(struct net_device *dev)
1584{
1585 struct netdev_private *np = netdev_priv(dev);
1586 u16 addr16;
1587
1588 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1589 iowrite16(addr16, np->base + StationAddr);
1590 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1591 iowrite16(addr16, np->base + StationAddr+2);
1592 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1593 iowrite16(addr16, np->base + StationAddr+4);
1594 return 0;
1595}
1596
1597static int check_if_running(struct net_device *dev)
1598{
1599 if (!netif_running(dev))
1600 return -EINVAL;
1601 return 0;
1602}
1603
1604static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1605{
1606 struct netdev_private *np = netdev_priv(dev);
1607 strcpy(info->driver, DRV_NAME);
1608 strcpy(info->version, DRV_VERSION);
1609 strcpy(info->bus_info, pci_name(np->pci_dev));
1610}
1611
1612static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1613{
1614 struct netdev_private *np = netdev_priv(dev);
1615 spin_lock_irq(&np->lock);
1616 mii_ethtool_gset(&np->mii_if, ecmd);
1617 spin_unlock_irq(&np->lock);
1618 return 0;
1619}
1620
1621static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1622{
1623 struct netdev_private *np = netdev_priv(dev);
1624 int res;
1625 spin_lock_irq(&np->lock);
1626 res = mii_ethtool_sset(&np->mii_if, ecmd);
1627 spin_unlock_irq(&np->lock);
1628 return res;
1629}
1630
1631static int nway_reset(struct net_device *dev)
1632{
1633 struct netdev_private *np = netdev_priv(dev);
1634 return mii_nway_restart(&np->mii_if);
1635}
1636
1637static u32 get_link(struct net_device *dev)
1638{
1639 struct netdev_private *np = netdev_priv(dev);
1640 return mii_link_ok(&np->mii_if);
1641}
1642
1643static u32 get_msglevel(struct net_device *dev)
1644{
1645 struct netdev_private *np = netdev_priv(dev);
1646 return np->msg_enable;
1647}
1648
1649static void set_msglevel(struct net_device *dev, u32 val)
1650{
1651 struct netdev_private *np = netdev_priv(dev);
1652 np->msg_enable = val;
1653}
1654
1655static struct ethtool_ops ethtool_ops = {
1656 .begin = check_if_running,
1657 .get_drvinfo = get_drvinfo,
1658 .get_settings = get_settings,
1659 .set_settings = set_settings,
1660 .nway_reset = nway_reset,
1661 .get_link = get_link,
1662 .get_msglevel = get_msglevel,
1663 .set_msglevel = set_msglevel,
John W. Linville30d60a82005-09-12 10:48:58 -04001664 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665};
1666
1667static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1668{
1669 struct netdev_private *np = netdev_priv(dev);
1670 void __iomem *ioaddr = np->base;
1671 int rc;
1672 int i;
1673
1674 if (!netif_running(dev))
1675 return -EINVAL;
1676
1677 spin_lock_irq(&np->lock);
1678 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1679 spin_unlock_irq(&np->lock);
1680 switch (cmd) {
1681 case SIOCDEVPRIVATE:
1682 for (i=0; i<TX_RING_SIZE; i++) {
1683 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1684 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1685 le32_to_cpu(np->tx_ring[i].next_desc),
1686 le32_to_cpu(np->tx_ring[i].status),
1687 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1688 & 0xff,
1689 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1690 le32_to_cpu(np->tx_ring[i].frag[0].length));
1691 }
1692 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1693 ioread32(np->base + TxListPtr),
1694 netif_queue_stopped(dev));
1695 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1696 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1697 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1698 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1699 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1700 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1701 return 0;
1702 }
1703
1704
1705 return rc;
1706}
1707
1708static int netdev_close(struct net_device *dev)
1709{
1710 struct netdev_private *np = netdev_priv(dev);
1711 void __iomem *ioaddr = np->base;
1712 struct sk_buff *skb;
1713 int i;
1714
1715 netif_stop_queue(dev);
1716
1717 if (netif_msg_ifdown(np)) {
1718 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1719 "Rx %4.4x Int %2.2x.\n",
1720 dev->name, ioread8(ioaddr + TxStatus),
1721 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1722 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1723 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1724 }
1725
1726 /* Disable interrupts by clearing the interrupt mask. */
1727 iowrite16(0x0000, ioaddr + IntrEnable);
1728
1729 /* Stop the chip's Tx and Rx processes. */
1730 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1731
1732 /* Wait and kill tasklet */
1733 tasklet_kill(&np->rx_tasklet);
1734 tasklet_kill(&np->tx_tasklet);
1735
1736#ifdef __i386__
1737 if (netif_msg_hw(np)) {
1738 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1739 (int)(np->tx_ring_dma));
1740 for (i = 0; i < TX_RING_SIZE; i++)
1741 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1742 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1743 np->tx_ring[i].frag[0].length);
1744 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1745 (int)(np->rx_ring_dma));
1746 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1747 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1748 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1749 np->rx_ring[i].frag[0].length);
1750 }
1751 }
1752#endif /* __i386__ debugging only */
1753
1754 free_irq(dev->irq, dev);
1755
1756 del_timer_sync(&np->timer);
1757
1758 /* Free all the skbuffs in the Rx queue. */
1759 for (i = 0; i < RX_RING_SIZE; i++) {
1760 np->rx_ring[i].status = 0;
1761 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1762 skb = np->rx_skbuff[i];
1763 if (skb) {
1764 pci_unmap_single(np->pci_dev,
1765 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1766 PCI_DMA_FROMDEVICE);
1767 dev_kfree_skb(skb);
1768 np->rx_skbuff[i] = NULL;
1769 }
1770 }
1771 for (i = 0; i < TX_RING_SIZE; i++) {
1772 skb = np->tx_skbuff[i];
1773 if (skb) {
1774 pci_unmap_single(np->pci_dev,
1775 np->tx_ring[i].frag[0].addr, skb->len,
1776 PCI_DMA_TODEVICE);
1777 dev_kfree_skb(skb);
1778 np->tx_skbuff[i] = NULL;
1779 }
1780 }
1781
1782 return 0;
1783}
1784
1785static void __devexit sundance_remove1 (struct pci_dev *pdev)
1786{
1787 struct net_device *dev = pci_get_drvdata(pdev);
1788
1789 if (dev) {
1790 struct netdev_private *np = netdev_priv(dev);
1791
1792 unregister_netdev(dev);
1793 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1794 np->rx_ring_dma);
1795 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1796 np->tx_ring_dma);
1797 pci_iounmap(pdev, np->base);
1798 pci_release_regions(pdev);
1799 free_netdev(dev);
1800 pci_set_drvdata(pdev, NULL);
1801 }
1802}
1803
1804static struct pci_driver sundance_driver = {
1805 .name = DRV_NAME,
1806 .id_table = sundance_pci_tbl,
1807 .probe = sundance_probe1,
1808 .remove = __devexit_p(sundance_remove1),
1809};
1810
1811static int __init sundance_init(void)
1812{
1813/* when a module, this is printed whether or not devices are found in probe */
1814#ifdef MODULE
1815 printk(version);
1816#endif
1817 return pci_module_init(&sundance_driver);
1818}
1819
1820static void __exit sundance_exit(void)
1821{
1822 pci_unregister_driver(&sundance_driver);
1823}
1824
1825module_init(sundance_init);
1826module_exit(sundance_exit);
1827
1828