blob: 6b8f4baf87fd309ff33978ebc62876887fa26d7a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040019 [link no longer provides useful info -jgarzik]
Philippe De Muytere714d992006-08-03 18:42:15 +020020 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023*/
24
25#define DRV_NAME "sundance"
Andy Gospodarekd5b20692006-09-11 17:39:18 -040026#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
Arjan van de Venf71e1302006-03-03 21:33:57 -050035static const int multicast_filter_limit = 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/pci.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/skbuff.h>
93#include <linux/init.h>
94#include <linux/bitops.h>
95#include <asm/uaccess.h>
96#include <asm/processor.h> /* Processor type for cache alignment. */
97#include <asm/io.h>
98#include <linux/delay.h>
99#include <linux/spinlock.h>
100#ifndef _COMPAT_WITH_OLD_KERNEL
101#include <linux/crc32.h>
102#include <linux/ethtool.h>
103#include <linux/mii.h>
104#else
105#include "crc32.h"
106#include "ethtool.h"
107#include "mii.h"
108#include "compat.h"
109#endif
110
111/* These identify the driver base version and may not be removed. */
Andrew Morton3418e462006-08-14 23:00:10 -0700112static char version[] =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
114KERN_INFO " http://www.scyld.com/network/sundance.html\n";
115
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118MODULE_LICENSE("GPL");
119
120module_param(debug, int, 0);
121module_param(rx_copybreak, int, 0);
122module_param_array(media, charp, NULL, 0);
123module_param(flowctrl, int, 0);
124MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
127
128/*
129 Theory of Operation
130
131I. Board Compatibility
132
133This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134
135II. Board-specific settings
136
137III. Driver operation
138
139IIIa. Ring buffers
140
141This driver uses two statically allocated fixed-size descriptor lists
142formed into rings by a branch from the final descriptor to the beginning of
143the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144Some chips explicitly use only 2^N sized rings, while others use a
145'next descriptor' pointer that the driver forms into rings.
146
147IIIb/c. Transmit/Receive Structure
148
149This driver uses a zero-copy receive and transmit scheme.
150The driver allocates full frame size skbuffs for the Rx ring buffers at
151open() time and passes the skb->data field to the chip as receive data
152buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153a fresh skbuff is allocated and the frame is copied to the new skbuff.
154When the incoming frame is larger, the skbuff is passed directly up the
155protocol stack. Buffers consumed this way are replaced by newly allocated
156skbuffs in a later phase of receives.
157
158The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159using a full-sized skbuff for small frames vs. the copying costs of larger
160frames. New boards are typically used in generously configured machines
161and the underfilled buffers have negligible impact compared to the benefit of
162a single allocation size, so the default value of zero results in never
163copying packets. When copying is done, the cost is usually mitigated by using
164a combined copy/checksum routine. Copying also preloads the cache, which is
165most useful with small frames.
166
167A subtle aspect of the operation is that the IP header at offset 14 in an
168ethernet frame isn't longword aligned for further processing.
169Unaligned buffers are permitted by the Sundance hardware, so
170frames are received into the skbuff at an offset of "+2", 16-byte aligning
171the IP header.
172
173IIId. Synchronization
174
175The driver runs as two independent, single-threaded flows of control. One
176is the send-packet routine, which enforces single-threaded use by the
177dev->tbusy flag. The other thread is the interrupt handler, which is single
178threaded by the hardware and interrupt handling software.
179
180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183the 'lp->tx_full' flag.
184
185The interrupt handler has exclusive control over the Rx ring and records stats
186from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188clears both the tx_full and tbusy flags.
189
190IV. Notes
191
192IVb. References
193
194The Sundance ST201 datasheet, preliminary version.
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200195The Kendin KS8723 datasheet, preliminary version.
196The ICplus IP100 datasheet, preliminary version.
197http://www.scyld.com/expert/100mbps.html
198http://www.scyld.com/expert/NWay.html
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200IVc. Errata
201
202*/
203
204/* Work-around for Kendin chip bugs. */
205#ifndef CONFIG_SUNDANCE_MMIO
206#define USE_IO_OPS 1
207#endif
208
Jeff Garzik46009c82006-06-27 09:12:38 -0400209static const struct pci_device_id sundance_pci_tbl[] = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
217 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218};
219MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
220
221enum {
222 netdev_io_size = 128
223};
224
225struct pci_id_info {
226 const char *name;
227};
Jeff Garzik46009c82006-06-27 09:12:38 -0400228static const struct pci_id_info pci_id_tbl[] __devinitdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +0200235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
Jeff Garzik46009c82006-06-27 09:12:38 -0400236 { } /* terminate list. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237};
238
239/* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
241
242/* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
249*/
250enum alta_offsets {
251 DMACtrl = 0x00,
252 TxListPtr = 0x04,
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
256 RxDMAStatus = 0x0c,
257 RxListPtr = 0x10,
258 DebugCtrl0 = 0x1a,
259 DebugCtrl1 = 0x1c,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
263 LEDCtrl = 0x1a,
264 ASICCtrl = 0x30,
265 EEData = 0x34,
266 EECtrl = 0x36,
267 TxStartThresh = 0x3c,
268 RxEarlyThresh = 0x3e,
269 FlashAddr = 0x40,
270 FlashData = 0x44,
271 TxStatus = 0x46,
272 TxFrameId = 0x47,
273 DownCounter = 0x18,
274 IntrClear = 0x4a,
275 IntrEnable = 0x4c,
276 IntrStatus = 0x4e,
277 MACCtrl0 = 0x50,
278 MACCtrl1 = 0x52,
279 StationAddr = 0x54,
280 MaxFrameSize = 0x5A,
281 RxMode = 0x5c,
282 MIICtrl = 0x5e,
283 MulticastFilter0 = 0x60,
284 MulticastFilter1 = 0x64,
285 RxOctetsLow = 0x68,
286 RxOctetsHigh = 0x6a,
287 TxOctetsLow = 0x6c,
288 TxOctetsHigh = 0x6e,
289 TxFramesOK = 0x70,
290 RxFramesOK = 0x72,
291 StatsCarrierError = 0x74,
292 StatsLateColl = 0x75,
293 StatsMultiColl = 0x76,
294 StatsOneColl = 0x77,
295 StatsTxDefer = 0x78,
296 RxMissed = 0x79,
297 StatsTxXSDefer = 0x7a,
298 StatsTxAbort = 0x7b,
299 StatsBcastTx = 0x7c,
300 StatsBcastRx = 0x7d,
301 StatsMcastTx = 0x7e,
302 StatsMcastRx = 0x7f,
303 /* Aliased and bogus values! */
304 RxStatus = 0x0c,
305};
306enum ASICCtrl_HiWord_bit {
307 GlobalReset = 0x0001,
308 RxReset = 0x0002,
309 TxReset = 0x0004,
310 DMAReset = 0x0008,
311 FIFOReset = 0x0010,
312 NetworkReset = 0x0020,
313 HostReset = 0x0040,
314 ResetBusy = 0x0400,
315};
316
317/* Bits in the interrupt status/mask registers. */
318enum intr_status_bits {
319 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
320 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
321 IntrDrvRqst=0x0040,
322 StatsMax=0x0080, LinkChange=0x0100,
323 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
324};
325
326/* Bits in the RxMode register. */
327enum rx_mode_bits {
328 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
329 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
330};
331/* Bits in MACCtrl. */
332enum mac_ctrl0_bits {
333 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
334 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
335};
336enum mac_ctrl1_bits {
337 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
338 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
339 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
340};
341
342/* The Rx and Tx buffer descriptors. */
343/* Note that using only 32 bit fields simplifies conversion to big-endian
344 architectures. */
345struct netdev_desc {
346 u32 next_desc;
347 u32 status;
348 struct desc_frag { u32 addr, length; } frag[1];
349};
350
351/* Bits in netdev_desc.status */
352enum desc_status_bits {
353 DescOwn=0x8000,
354 DescEndPacket=0x4000,
355 DescEndRing=0x2000,
356 LastFrag=0x80000000,
357 DescIntrOnTx=0x8000,
358 DescIntrOnDMADone=0x80000000,
359 DisableAlign = 0x00000001,
360};
361
362#define PRIV_ALIGN 15 /* Required alignment mask */
363/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
364 within the structure. */
365#define MII_CNT 4
366struct netdev_private {
367 /* Descriptor rings first for alignment. */
368 struct netdev_desc *rx_ring;
369 struct netdev_desc *tx_ring;
370 struct sk_buff* rx_skbuff[RX_RING_SIZE];
371 struct sk_buff* tx_skbuff[TX_RING_SIZE];
372 dma_addr_t tx_ring_dma;
373 dma_addr_t rx_ring_dma;
374 struct net_device_stats stats;
375 struct timer_list timer; /* Media monitoring timer. */
376 /* Frequently used values: keep some adjacent for cache effect. */
377 spinlock_t lock;
378 spinlock_t rx_lock; /* Group with Tx control cache line. */
379 int msg_enable;
380 int chip_id;
381 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
382 unsigned int rx_buf_sz; /* Based on MTU+slack. */
383 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
384 unsigned int cur_tx, dirty_tx;
385 /* These values are keep track of the transceiver/media in use. */
386 unsigned int flowctrl:1;
387 unsigned int default_port:4; /* Last dev->if_port value. */
388 unsigned int an_enable:1;
389 unsigned int speed;
390 struct tasklet_struct rx_tasklet;
391 struct tasklet_struct tx_tasklet;
392 int budget;
393 int cur_task;
394 /* Multicast and receive mode. */
395 spinlock_t mcastlock; /* SMP lock multicast updates. */
396 u16 mcast_filter[4];
397 /* MII transceiver section. */
398 struct mii_if_info mii_if;
399 int mii_preamble_required;
400 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
401 struct pci_dev *pci_dev;
402 void __iomem *base;
403 unsigned char pci_rev_id;
404};
405
406/* The station address location in the EEPROM. */
407#define EEPROM_SA_OFFSET 0x10
408#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
409 IntrDrvRqst | IntrTxDone | StatsMax | \
410 LinkChange)
411
412static int change_mtu(struct net_device *dev, int new_mtu);
413static int eeprom_read(void __iomem *ioaddr, int location);
414static int mdio_read(struct net_device *dev, int phy_id, int location);
415static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
416static int netdev_open(struct net_device *dev);
417static void check_duplex(struct net_device *dev);
418static void netdev_timer(unsigned long data);
419static void tx_timeout(struct net_device *dev);
420static void init_ring(struct net_device *dev);
421static int start_tx(struct sk_buff *skb, struct net_device *dev);
422static int reset_tx (struct net_device *dev);
423static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
424static void rx_poll(unsigned long data);
425static void tx_poll(unsigned long data);
426static void refill_rx (struct net_device *dev);
427static void netdev_error(struct net_device *dev, int intr_status);
428static void netdev_error(struct net_device *dev, int intr_status);
429static void set_rx_mode(struct net_device *dev);
430static int __set_mac_addr(struct net_device *dev);
431static struct net_device_stats *get_stats(struct net_device *dev);
432static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
433static int netdev_close(struct net_device *dev);
Jeff Garzik7282d492006-09-13 14:30:00 -0400434static const struct ethtool_ops ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200436static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
437{
438 struct netdev_private *np = netdev_priv(dev);
439 void __iomem *ioaddr = np->base + ASICCtrl;
440 int countdown;
441
442 /* ST201 documentation states ASICCtrl is a 32bit register */
443 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
444 /* ST201 documentation states reset can take up to 1 ms */
445 countdown = 10 + 1;
446 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
447 if (--countdown == 0) {
448 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
449 break;
450 }
451 udelay(100);
452 }
453}
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455static int __devinit sundance_probe1 (struct pci_dev *pdev,
456 const struct pci_device_id *ent)
457{
458 struct net_device *dev;
459 struct netdev_private *np;
460 static int card_idx;
461 int chip_idx = ent->driver_data;
462 int irq;
463 int i;
464 void __iomem *ioaddr;
465 u16 mii_ctl;
466 void *ring_space;
467 dma_addr_t ring_dma;
468#ifdef USE_IO_OPS
469 int bar = 0;
470#else
471 int bar = 1;
472#endif
John W. Linville67ec2f82005-10-18 21:31:01 -0400473 int phy, phy_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475
476/* when built into the kernel, we only print version if device is found */
477#ifndef MODULE
478 static int printed_version;
479 if (!printed_version++)
480 printk(version);
481#endif
482
483 if (pci_enable_device(pdev))
484 return -EIO;
485 pci_set_master(pdev);
486
487 irq = pdev->irq;
488
489 dev = alloc_etherdev(sizeof(*np));
490 if (!dev)
491 return -ENOMEM;
492 SET_MODULE_OWNER(dev);
493 SET_NETDEV_DEV(dev, &pdev->dev);
494
495 if (pci_request_regions(pdev, DRV_NAME))
496 goto err_out_netdev;
497
498 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
499 if (!ioaddr)
500 goto err_out_res;
501
502 for (i = 0; i < 3; i++)
503 ((u16 *)dev->dev_addr)[i] =
504 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
John W. Linville30d60a82005-09-12 10:48:58 -0400505 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507 dev->base_addr = (unsigned long)ioaddr;
508 dev->irq = irq;
509
510 np = netdev_priv(dev);
511 np->base = ioaddr;
512 np->pci_dev = pdev;
513 np->chip_id = chip_idx;
514 np->msg_enable = (1 << debug) - 1;
515 spin_lock_init(&np->lock);
516 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
517 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
518
519 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
520 if (!ring_space)
521 goto err_out_cleardev;
522 np->tx_ring = (struct netdev_desc *)ring_space;
523 np->tx_ring_dma = ring_dma;
524
525 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
526 if (!ring_space)
527 goto err_out_unmap_tx;
528 np->rx_ring = (struct netdev_desc *)ring_space;
529 np->rx_ring_dma = ring_dma;
530
531 np->mii_if.dev = dev;
532 np->mii_if.mdio_read = mdio_read;
533 np->mii_if.mdio_write = mdio_write;
534 np->mii_if.phy_id_mask = 0x1f;
535 np->mii_if.reg_num_mask = 0x1f;
536
537 /* The chip-specific entries in the device structure. */
538 dev->open = &netdev_open;
539 dev->hard_start_xmit = &start_tx;
540 dev->stop = &netdev_close;
541 dev->get_stats = &get_stats;
542 dev->set_multicast_list = &set_rx_mode;
543 dev->do_ioctl = &netdev_ioctl;
544 SET_ETHTOOL_OPS(dev, &ethtool_ops);
545 dev->tx_timeout = &tx_timeout;
546 dev->watchdog_timeo = TX_TIMEOUT;
547 dev->change_mtu = &change_mtu;
548 pci_set_drvdata(pdev, dev);
549
550 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
551
552 i = register_netdev(dev);
553 if (i)
554 goto err_out_unmap_rx;
555
556 printk(KERN_INFO "%s: %s at %p, ",
557 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
558 for (i = 0; i < 5; i++)
559 printk("%2.2x:", dev->dev_addr[i]);
560 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
561
John W. Linville67ec2f82005-10-18 21:31:01 -0400562 np->phys[0] = 1; /* Default setting */
563 np->mii_preamble_required++;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500564 /*
565 * It seems some phys doesn't deal well with address 0 being accessed
566 * first, so leave address zero to the end of the loop (32 & 31).
567 */
John W. Linvilleb06c0932005-10-19 08:07:34 -0400568 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400569 int phyx = phy & 0x1f;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500570 int mii_status = mdio_read(dev, phyx, MII_BMSR);
John W. Linville67ec2f82005-10-18 21:31:01 -0400571 if (mii_status != 0xffff && mii_status != 0x0000) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400572 np->phys[phy_idx++] = phyx;
573 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
John W. Linville67ec2f82005-10-18 21:31:01 -0400574 if ((mii_status & 0x0040) == 0)
575 np->mii_preamble_required++;
576 printk(KERN_INFO "%s: MII PHY found at address %d, status "
577 "0x%4.4x advertising %4.4x.\n",
John W. Linvilleb06c0932005-10-19 08:07:34 -0400578 dev->name, phyx, mii_status, np->mii_if.advertising);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 }
John W. Linville67ec2f82005-10-18 21:31:01 -0400581 np->mii_preamble_required--;
582
583 if (phy_idx == 0) {
584 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
585 dev->name, ioread32(ioaddr + ASICCtrl));
586 goto err_out_unregister;
587 }
588
589 np->mii_if.phy_id = np->phys[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591 /* Parse override configuration */
592 np->an_enable = 1;
593 if (card_idx < MAX_UNITS) {
594 if (media[card_idx] != NULL) {
595 np->an_enable = 0;
596 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
597 strcmp (media[card_idx], "4") == 0) {
598 np->speed = 100;
599 np->mii_if.full_duplex = 1;
600 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
601 || strcmp (media[card_idx], "3") == 0) {
602 np->speed = 100;
603 np->mii_if.full_duplex = 0;
604 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
605 strcmp (media[card_idx], "2") == 0) {
606 np->speed = 10;
607 np->mii_if.full_duplex = 1;
608 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
609 strcmp (media[card_idx], "1") == 0) {
610 np->speed = 10;
611 np->mii_if.full_duplex = 0;
612 } else {
613 np->an_enable = 1;
614 }
615 }
616 if (flowctrl == 1)
617 np->flowctrl = 1;
618 }
619
620 /* Fibre PHY? */
621 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
622 /* Default 100Mbps Full */
623 if (np->an_enable) {
624 np->speed = 100;
625 np->mii_if.full_duplex = 1;
626 np->an_enable = 0;
627 }
628 }
629 /* Reset PHY */
630 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
631 mdelay (300);
632 /* If flow control enabled, we need to advertise it.*/
633 if (np->flowctrl)
634 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
635 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
636 /* Force media type */
637 if (!np->an_enable) {
638 mii_ctl = 0;
639 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
640 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
641 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
642 printk (KERN_INFO "Override speed=%d, %s duplex\n",
643 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
644
645 }
646
647 /* Perhaps move the reset here? */
648 /* Reset the chip to erase previous misconfiguration. */
649 if (netif_msg_hw(np))
650 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
Philippe De Muytere714d992006-08-03 18:42:15 +0200651 sundance_reset(dev, 0x00ff << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (netif_msg_hw(np))
653 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
654
655 card_idx++;
656 return 0;
657
658err_out_unregister:
659 unregister_netdev(dev);
660err_out_unmap_rx:
661 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
662err_out_unmap_tx:
663 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
664err_out_cleardev:
665 pci_set_drvdata(pdev, NULL);
666 pci_iounmap(pdev, ioaddr);
667err_out_res:
668 pci_release_regions(pdev);
669err_out_netdev:
670 free_netdev (dev);
671 return -ENODEV;
672}
673
674static int change_mtu(struct net_device *dev, int new_mtu)
675{
676 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
677 return -EINVAL;
678 if (netif_running(dev))
679 return -EBUSY;
680 dev->mtu = new_mtu;
681 return 0;
682}
683
684#define eeprom_delay(ee_addr) ioread32(ee_addr)
685/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
686static int __devinit eeprom_read(void __iomem *ioaddr, int location)
687{
688 int boguscnt = 10000; /* Typical 1900 ticks. */
689 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
690 do {
691 eeprom_delay(ioaddr + EECtrl);
692 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
693 return ioread16(ioaddr + EEData);
694 }
695 } while (--boguscnt > 0);
696 return 0;
697}
698
699/* MII transceiver control section.
700 Read and write the MII registers using software-generated serial
701 MDIO protocol. See the MII specifications or DP83840A data sheet
702 for details.
703
704 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
705 met by back-to-back 33Mhz PCI cycles. */
706#define mdio_delay() ioread8(mdio_addr)
707
708enum mii_reg_bits {
709 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
710};
711#define MDIO_EnbIn (0)
712#define MDIO_WRITE0 (MDIO_EnbOutput)
713#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
714
715/* Generate the preamble required for initial synchronization and
716 a few older transceivers. */
717static void mdio_sync(void __iomem *mdio_addr)
718{
719 int bits = 32;
720
721 /* Establish sync by sending at least 32 logic ones. */
722 while (--bits >= 0) {
723 iowrite8(MDIO_WRITE1, mdio_addr);
724 mdio_delay();
725 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
726 mdio_delay();
727 }
728}
729
730static int mdio_read(struct net_device *dev, int phy_id, int location)
731{
732 struct netdev_private *np = netdev_priv(dev);
733 void __iomem *mdio_addr = np->base + MIICtrl;
734 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
735 int i, retval = 0;
736
737 if (np->mii_preamble_required)
738 mdio_sync(mdio_addr);
739
740 /* Shift the read command bits out. */
741 for (i = 15; i >= 0; i--) {
742 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
743
744 iowrite8(dataval, mdio_addr);
745 mdio_delay();
746 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
747 mdio_delay();
748 }
749 /* Read the two transition, 16 data, and wire-idle bits. */
750 for (i = 19; i > 0; i--) {
751 iowrite8(MDIO_EnbIn, mdio_addr);
752 mdio_delay();
753 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
754 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
755 mdio_delay();
756 }
757 return (retval>>1) & 0xffff;
758}
759
760static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
761{
762 struct netdev_private *np = netdev_priv(dev);
763 void __iomem *mdio_addr = np->base + MIICtrl;
764 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
765 int i;
766
767 if (np->mii_preamble_required)
768 mdio_sync(mdio_addr);
769
770 /* Shift the command bits out. */
771 for (i = 31; i >= 0; i--) {
772 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
773
774 iowrite8(dataval, mdio_addr);
775 mdio_delay();
776 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
777 mdio_delay();
778 }
779 /* Clear out extra bits. */
780 for (i = 2; i > 0; i--) {
781 iowrite8(MDIO_EnbIn, mdio_addr);
782 mdio_delay();
783 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
784 mdio_delay();
785 }
786 return;
787}
788
789static int netdev_open(struct net_device *dev)
790{
791 struct netdev_private *np = netdev_priv(dev);
792 void __iomem *ioaddr = np->base;
793 int i;
794
795 /* Do we need to reset the chip??? */
796
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700797 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 if (i)
799 return i;
800
801 if (netif_msg_ifup(np))
802 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
803 dev->name, dev->irq);
804 init_ring(dev);
805
806 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
807 /* The Tx list pointer is written as packets are queued. */
808
809 /* Initialize other registers. */
810 __set_mac_addr(dev);
811#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
812 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
813#else
814 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
815#endif
816 if (dev->mtu > 2047)
817 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
818
819 /* Configure the PCI bus bursts and FIFO thresholds. */
820
821 if (dev->if_port == 0)
822 dev->if_port = np->default_port;
823
824 spin_lock_init(&np->mcastlock);
825
826 set_rx_mode(dev);
827 iowrite16(0, ioaddr + IntrEnable);
828 iowrite16(0, ioaddr + DownCounter);
829 /* Set the chip to poll every N*320nsec. */
830 iowrite8(100, ioaddr + RxDMAPollPeriod);
831 iowrite8(127, ioaddr + TxDMAPollPeriod);
832 /* Fix DFE-580TX packet drop issue */
833 if (np->pci_rev_id >= 0x14)
834 iowrite8(0x01, ioaddr + DebugCtrl1);
835 netif_start_queue(dev);
836
837 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
838
839 if (netif_msg_ifup(np))
840 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
841 "MAC Control %x, %4.4x %4.4x.\n",
842 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
843 ioread32(ioaddr + MACCtrl0),
844 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
845
846 /* Set the timer to check for link beat. */
847 init_timer(&np->timer);
848 np->timer.expires = jiffies + 3*HZ;
849 np->timer.data = (unsigned long)dev;
850 np->timer.function = &netdev_timer; /* timer handler */
851 add_timer(&np->timer);
852
853 /* Enable interrupts by setting the interrupt mask. */
854 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
855
856 return 0;
857}
858
859static void check_duplex(struct net_device *dev)
860{
861 struct netdev_private *np = netdev_priv(dev);
862 void __iomem *ioaddr = np->base;
863 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
864 int negotiated = mii_lpa & np->mii_if.advertising;
865 int duplex;
866
867 /* Force media */
868 if (!np->an_enable || mii_lpa == 0xffff) {
869 if (np->mii_if.full_duplex)
870 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
871 ioaddr + MACCtrl0);
872 return;
873 }
874
875 /* Autonegotiation */
876 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
877 if (np->mii_if.full_duplex != duplex) {
878 np->mii_if.full_duplex = duplex;
879 if (netif_msg_link(np))
880 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
881 "negotiated capability %4.4x.\n", dev->name,
882 duplex ? "full" : "half", np->phys[0], negotiated);
883 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
884 }
885}
886
887static void netdev_timer(unsigned long data)
888{
889 struct net_device *dev = (struct net_device *)data;
890 struct netdev_private *np = netdev_priv(dev);
891 void __iomem *ioaddr = np->base;
892 int next_tick = 10*HZ;
893
894 if (netif_msg_timer(np)) {
895 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
896 "Tx %x Rx %x.\n",
897 dev->name, ioread16(ioaddr + IntrEnable),
898 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
899 }
900 check_duplex(dev);
901 np->timer.expires = jiffies + next_tick;
902 add_timer(&np->timer);
903}
904
905static void tx_timeout(struct net_device *dev)
906{
907 struct netdev_private *np = netdev_priv(dev);
908 void __iomem *ioaddr = np->base;
909 unsigned long flag;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 netif_stop_queue(dev);
912 tasklet_disable(&np->tx_tasklet);
913 iowrite16(0, ioaddr + IntrEnable);
914 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
915 "TxFrameId %2.2x,"
916 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
917 ioread8(ioaddr + TxFrameId));
918
919 {
920 int i;
921 for (i=0; i<TX_RING_SIZE; i++) {
922 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
923 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
924 le32_to_cpu(np->tx_ring[i].next_desc),
925 le32_to_cpu(np->tx_ring[i].status),
926 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400927 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 le32_to_cpu(np->tx_ring[i].frag[0].length));
929 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400930 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
931 ioread32(np->base + TxListPtr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 netif_queue_stopped(dev));
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400933 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 np->cur_tx, np->cur_tx % TX_RING_SIZE,
935 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
936 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
937 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
938 }
939 spin_lock_irqsave(&np->lock, flag);
940
941 /* Stop and restart the chip's Tx processes . */
942 reset_tx(dev);
943 spin_unlock_irqrestore(&np->lock, flag);
944
945 dev->if_port = 0;
946
947 dev->trans_start = jiffies;
948 np->stats.tx_errors++;
949 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
950 netif_wake_queue(dev);
951 }
952 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
953 tasklet_enable(&np->tx_tasklet);
954}
955
956
957/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
958static void init_ring(struct net_device *dev)
959{
960 struct netdev_private *np = netdev_priv(dev);
961 int i;
962
963 np->cur_rx = np->cur_tx = 0;
964 np->dirty_rx = np->dirty_tx = 0;
965 np->cur_task = 0;
966
967 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
968
969 /* Initialize all Rx descriptors. */
970 for (i = 0; i < RX_RING_SIZE; i++) {
971 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
972 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
973 np->rx_ring[i].status = 0;
974 np->rx_ring[i].frag[0].length = 0;
975 np->rx_skbuff[i] = NULL;
976 }
977
978 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
979 for (i = 0; i < RX_RING_SIZE; i++) {
980 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
981 np->rx_skbuff[i] = skb;
982 if (skb == NULL)
983 break;
984 skb->dev = dev; /* Mark as being used by this device. */
985 skb_reserve(skb, 2); /* 16 byte align the IP header. */
986 np->rx_ring[i].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -0700987 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 PCI_DMA_FROMDEVICE));
989 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
990 }
991 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
992
993 for (i = 0; i < TX_RING_SIZE; i++) {
994 np->tx_skbuff[i] = NULL;
995 np->tx_ring[i].status = 0;
996 }
997 return;
998}
999
1000static void tx_poll (unsigned long data)
1001{
1002 struct net_device *dev = (struct net_device *)data;
1003 struct netdev_private *np = netdev_priv(dev);
1004 unsigned head = np->cur_task % TX_RING_SIZE;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001005 struct netdev_desc *txdesc =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 /* Chain the next pointer */
1009 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1010 int entry = np->cur_task % TX_RING_SIZE;
1011 txdesc = &np->tx_ring[entry];
1012 if (np->last_tx) {
1013 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1014 entry*sizeof(struct netdev_desc));
1015 }
1016 np->last_tx = txdesc;
1017 }
1018 /* Indicate the latest descriptor of tx ring */
1019 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1020
1021 if (ioread32 (np->base + TxListPtr) == 0)
1022 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1023 np->base + TxListPtr);
1024 return;
1025}
1026
1027static int
1028start_tx (struct sk_buff *skb, struct net_device *dev)
1029{
1030 struct netdev_private *np = netdev_priv(dev);
1031 struct netdev_desc *txdesc;
1032 unsigned entry;
1033
1034 /* Calculate the next Tx descriptor entry. */
1035 entry = np->cur_tx % TX_RING_SIZE;
1036 np->tx_skbuff[entry] = skb;
1037 txdesc = &np->tx_ring[entry];
1038
1039 txdesc->next_desc = 0;
1040 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1041 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1042 skb->len,
1043 PCI_DMA_TODEVICE));
1044 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1045
1046 /* Increment cur_tx before tasklet_schedule() */
1047 np->cur_tx++;
1048 mb();
1049 /* Schedule a tx_poll() task */
1050 tasklet_schedule(&np->tx_tasklet);
1051
1052 /* On some architectures: explicitly flush cache lines here. */
1053 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1054 && !netif_queue_stopped(dev)) {
1055 /* do nothing */
1056 } else {
1057 netif_stop_queue (dev);
1058 }
1059 dev->trans_start = jiffies;
1060 if (netif_msg_tx_queued(np)) {
1061 printk (KERN_DEBUG
1062 "%s: Transmit frame #%d queued in slot %d.\n",
1063 dev->name, np->cur_tx, entry);
1064 }
1065 return 0;
1066}
1067
1068/* Reset hardware tx and free all of tx buffers */
1069static int
1070reset_tx (struct net_device *dev)
1071{
1072 struct netdev_private *np = netdev_priv(dev);
1073 void __iomem *ioaddr = np->base;
1074 struct sk_buff *skb;
1075 int i;
1076 int irq = in_interrupt();
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 /* Reset tx logic, TxListPtr will be cleaned */
1079 iowrite16 (TxDisable, ioaddr + MACCtrl1);
Philippe De Muytere714d992006-08-03 18:42:15 +02001080 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 /* free all tx skbuff */
1083 for (i = 0; i < TX_RING_SIZE; i++) {
1084 skb = np->tx_skbuff[i];
1085 if (skb) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001086 pci_unmap_single(np->pci_dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 np->tx_ring[i].frag[0].addr, skb->len,
1088 PCI_DMA_TODEVICE);
1089 if (irq)
1090 dev_kfree_skb_irq (skb);
1091 else
1092 dev_kfree_skb (skb);
1093 np->tx_skbuff[i] = NULL;
1094 np->stats.tx_dropped++;
1095 }
1096 }
1097 np->cur_tx = np->dirty_tx = 0;
1098 np->cur_task = 0;
1099 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1100 return 0;
1101}
1102
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001103/* The interrupt handler cleans up after the Tx thread,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 and schedule a Rx thread work */
1105static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1106{
1107 struct net_device *dev = (struct net_device *)dev_instance;
1108 struct netdev_private *np = netdev_priv(dev);
1109 void __iomem *ioaddr = np->base;
1110 int hw_frame_id;
1111 int tx_cnt;
1112 int tx_status;
1113 int handled = 0;
1114
1115
1116 do {
1117 int intr_status = ioread16(ioaddr + IntrStatus);
1118 iowrite16(intr_status, ioaddr + IntrStatus);
1119
1120 if (netif_msg_intr(np))
1121 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1122 dev->name, intr_status);
1123
1124 if (!(intr_status & DEFAULT_INTR))
1125 break;
1126
1127 handled = 1;
1128
1129 if (intr_status & (IntrRxDMADone)) {
1130 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1131 ioaddr + IntrEnable);
1132 if (np->budget < 0)
1133 np->budget = RX_BUDGET;
1134 tasklet_schedule(&np->rx_tasklet);
1135 }
1136 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1137 tx_status = ioread16 (ioaddr + TxStatus);
1138 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1139 if (netif_msg_tx_done(np))
1140 printk
1141 ("%s: Transmit status is %2.2x.\n",
1142 dev->name, tx_status);
1143 if (tx_status & 0x1e) {
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001144 if (netif_msg_tx_err(np))
1145 printk("%s: Transmit error status %4.4x.\n",
1146 dev->name, tx_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 np->stats.tx_errors++;
1148 if (tx_status & 0x10)
1149 np->stats.tx_fifo_errors++;
1150 if (tx_status & 0x08)
1151 np->stats.collisions++;
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001152 if (tx_status & 0x04)
1153 np->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 if (tx_status & 0x02)
1155 np->stats.tx_window_errors++;
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001156 /*
1157 ** This reset has been verified on
1158 ** DFE-580TX boards ! phdm@macqel.be.
1159 */
1160 if (tx_status & 0x10) { /* TxUnderrun */
1161 unsigned short txthreshold;
1162
1163 txthreshold = ioread16 (ioaddr + TxStartThresh);
1164 /* Restart Tx FIFO and transmitter */
1165 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1166 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1167 /* No need to reset the Tx pointer here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 }
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001169 /* Restart the Tx. */
1170 iowrite16 (TxEnable, ioaddr + MACCtrl1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172 /* Yup, this is a documentation bug. It cost me *hours*. */
1173 iowrite16 (0, ioaddr + TxStatus);
1174 if (tx_cnt < 0) {
1175 iowrite32(5000, ioaddr + DownCounter);
1176 break;
1177 }
1178 tx_status = ioread16 (ioaddr + TxStatus);
1179 }
1180 hw_frame_id = (tx_status >> 8) & 0xff;
1181 } else {
1182 hw_frame_id = ioread8(ioaddr + TxFrameId);
1183 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001184
1185 if (np->pci_rev_id >= 0x14) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 spin_lock(&np->lock);
1187 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1188 int entry = np->dirty_tx % TX_RING_SIZE;
1189 struct sk_buff *skb;
1190 int sw_frame_id;
1191 sw_frame_id = (le32_to_cpu(
1192 np->tx_ring[entry].status) >> 2) & 0xff;
1193 if (sw_frame_id == hw_frame_id &&
1194 !(le32_to_cpu(np->tx_ring[entry].status)
1195 & 0x00010000))
1196 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001197 if (sw_frame_id == (hw_frame_id + 1) %
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 TX_RING_SIZE)
1199 break;
1200 skb = np->tx_skbuff[entry];
1201 /* Free the original skb. */
1202 pci_unmap_single(np->pci_dev,
1203 np->tx_ring[entry].frag[0].addr,
1204 skb->len, PCI_DMA_TODEVICE);
1205 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1206 np->tx_skbuff[entry] = NULL;
1207 np->tx_ring[entry].frag[0].addr = 0;
1208 np->tx_ring[entry].frag[0].length = 0;
1209 }
1210 spin_unlock(&np->lock);
1211 } else {
1212 spin_lock(&np->lock);
1213 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1214 int entry = np->dirty_tx % TX_RING_SIZE;
1215 struct sk_buff *skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001216 if (!(le32_to_cpu(np->tx_ring[entry].status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 & 0x00010000))
1218 break;
1219 skb = np->tx_skbuff[entry];
1220 /* Free the original skb. */
1221 pci_unmap_single(np->pci_dev,
1222 np->tx_ring[entry].frag[0].addr,
1223 skb->len, PCI_DMA_TODEVICE);
1224 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1225 np->tx_skbuff[entry] = NULL;
1226 np->tx_ring[entry].frag[0].addr = 0;
1227 np->tx_ring[entry].frag[0].length = 0;
1228 }
1229 spin_unlock(&np->lock);
1230 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 if (netif_queue_stopped(dev) &&
1233 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1234 /* The ring is no longer full, clear busy flag. */
1235 netif_wake_queue (dev);
1236 }
1237 /* Abnormal error summary/uncommon events handlers. */
1238 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1239 netdev_error(dev, intr_status);
1240 } while (0);
1241 if (netif_msg_intr(np))
1242 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1243 dev->name, ioread16(ioaddr + IntrStatus));
1244 return IRQ_RETVAL(handled);
1245}
1246
1247static void rx_poll(unsigned long data)
1248{
1249 struct net_device *dev = (struct net_device *)data;
1250 struct netdev_private *np = netdev_priv(dev);
1251 int entry = np->cur_rx % RX_RING_SIZE;
1252 int boguscnt = np->budget;
1253 void __iomem *ioaddr = np->base;
1254 int received = 0;
1255
1256 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1257 while (1) {
1258 struct netdev_desc *desc = &(np->rx_ring[entry]);
1259 u32 frame_status = le32_to_cpu(desc->status);
1260 int pkt_len;
1261
1262 if (--boguscnt < 0) {
1263 goto not_done;
1264 }
1265 if (!(frame_status & DescOwn))
1266 break;
1267 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1268 if (netif_msg_rx_status(np))
1269 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1270 frame_status);
1271 if (frame_status & 0x001f4000) {
1272 /* There was a error. */
1273 if (netif_msg_rx_err(np))
1274 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1275 frame_status);
1276 np->stats.rx_errors++;
1277 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1278 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1279 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1280 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1281 if (frame_status & 0x00100000) {
1282 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1283 " status %8.8x.\n",
1284 dev->name, frame_status);
1285 }
1286 } else {
1287 struct sk_buff *skb;
1288#ifndef final_version
1289 if (netif_msg_rx_status(np))
1290 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1291 ", bogus_cnt %d.\n",
1292 pkt_len, boguscnt);
1293#endif
1294 /* Check if the packet is long enough to accept without copying
1295 to a minimally-sized skbuff. */
1296 if (pkt_len < rx_copybreak
1297 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1298 skb->dev = dev;
1299 skb_reserve(skb, 2); /* 16 byte align the IP header */
1300 pci_dma_sync_single_for_cpu(np->pci_dev,
1301 desc->frag[0].addr,
1302 np->rx_buf_sz,
1303 PCI_DMA_FROMDEVICE);
1304
David S. Miller689be432005-06-28 15:25:31 -07001305 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 pci_dma_sync_single_for_device(np->pci_dev,
1307 desc->frag[0].addr,
1308 np->rx_buf_sz,
1309 PCI_DMA_FROMDEVICE);
1310 skb_put(skb, pkt_len);
1311 } else {
1312 pci_unmap_single(np->pci_dev,
1313 desc->frag[0].addr,
1314 np->rx_buf_sz,
1315 PCI_DMA_FROMDEVICE);
1316 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1317 np->rx_skbuff[entry] = NULL;
1318 }
1319 skb->protocol = eth_type_trans(skb, dev);
1320 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1321 netif_rx(skb);
1322 dev->last_rx = jiffies;
1323 }
1324 entry = (entry + 1) % RX_RING_SIZE;
1325 received++;
1326 }
1327 np->cur_rx = entry;
1328 refill_rx (dev);
1329 np->budget -= received;
1330 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1331 return;
1332
1333not_done:
1334 np->cur_rx = entry;
1335 refill_rx (dev);
1336 if (!received)
1337 received = 1;
1338 np->budget -= received;
1339 if (np->budget <= 0)
1340 np->budget = RX_BUDGET;
1341 tasklet_schedule(&np->rx_tasklet);
1342 return;
1343}
1344
1345static void refill_rx (struct net_device *dev)
1346{
1347 struct netdev_private *np = netdev_priv(dev);
1348 int entry;
1349 int cnt = 0;
1350
1351 /* Refill the Rx ring buffers. */
1352 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1353 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1354 struct sk_buff *skb;
1355 entry = np->dirty_rx % RX_RING_SIZE;
1356 if (np->rx_skbuff[entry] == NULL) {
1357 skb = dev_alloc_skb(np->rx_buf_sz);
1358 np->rx_skbuff[entry] = skb;
1359 if (skb == NULL)
1360 break; /* Better luck next round. */
1361 skb->dev = dev; /* Mark as being used by this device. */
1362 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1363 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001364 pci_map_single(np->pci_dev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1366 }
1367 /* Perhaps we need not reset this field. */
1368 np->rx_ring[entry].frag[0].length =
1369 cpu_to_le32(np->rx_buf_sz | LastFrag);
1370 np->rx_ring[entry].status = 0;
1371 cnt++;
1372 }
1373 return;
1374}
1375static void netdev_error(struct net_device *dev, int intr_status)
1376{
1377 struct netdev_private *np = netdev_priv(dev);
1378 void __iomem *ioaddr = np->base;
1379 u16 mii_ctl, mii_advertise, mii_lpa;
1380 int speed;
1381
1382 if (intr_status & LinkChange) {
1383 if (np->an_enable) {
1384 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1385 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1386 mii_advertise &= mii_lpa;
1387 printk (KERN_INFO "%s: Link changed: ", dev->name);
1388 if (mii_advertise & ADVERTISE_100FULL) {
1389 np->speed = 100;
1390 printk ("100Mbps, full duplex\n");
1391 } else if (mii_advertise & ADVERTISE_100HALF) {
1392 np->speed = 100;
1393 printk ("100Mbps, half duplex\n");
1394 } else if (mii_advertise & ADVERTISE_10FULL) {
1395 np->speed = 10;
1396 printk ("10Mbps, full duplex\n");
1397 } else if (mii_advertise & ADVERTISE_10HALF) {
1398 np->speed = 10;
1399 printk ("10Mbps, half duplex\n");
1400 } else
1401 printk ("\n");
1402
1403 } else {
1404 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1405 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1406 np->speed = speed;
1407 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1408 dev->name, speed);
1409 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1410 "full" : "half");
1411 }
1412 check_duplex (dev);
1413 if (np->flowctrl && np->mii_if.full_duplex) {
1414 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1415 ioaddr + MulticastFilter1+2);
1416 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1417 ioaddr + MACCtrl0);
1418 }
1419 }
1420 if (intr_status & StatsMax) {
1421 get_stats(dev);
1422 }
1423 if (intr_status & IntrPCIErr) {
1424 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1425 dev->name, intr_status);
1426 /* We must do a global reset of DMA to continue. */
1427 }
1428}
1429
1430static struct net_device_stats *get_stats(struct net_device *dev)
1431{
1432 struct netdev_private *np = netdev_priv(dev);
1433 void __iomem *ioaddr = np->base;
1434 int i;
1435
1436 /* We should lock this segment of code for SMP eventually, although
1437 the vulnerability window is very small and statistics are
1438 non-critical. */
1439 /* The chip only need report frame silently dropped. */
1440 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1441 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1442 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1443 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1444 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1445 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1446 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1447 ioread8(ioaddr + StatsTxDefer);
1448 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1449 ioread8(ioaddr + i);
1450 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1451 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1452 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1453 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1454
1455 return &np->stats;
1456}
1457
1458static void set_rx_mode(struct net_device *dev)
1459{
1460 struct netdev_private *np = netdev_priv(dev);
1461 void __iomem *ioaddr = np->base;
1462 u16 mc_filter[4]; /* Multicast hash filter */
1463 u32 rx_mode;
1464 int i;
1465
1466 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 memset(mc_filter, 0xff, sizeof(mc_filter));
1468 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1469 } else if ((dev->mc_count > multicast_filter_limit)
1470 || (dev->flags & IFF_ALLMULTI)) {
1471 /* Too many to match, or accept all multicasts. */
1472 memset(mc_filter, 0xff, sizeof(mc_filter));
1473 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1474 } else if (dev->mc_count) {
1475 struct dev_mc_list *mclist;
1476 int bit;
1477 int index;
1478 int crc;
1479 memset (mc_filter, 0, sizeof (mc_filter));
1480 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1481 i++, mclist = mclist->next) {
1482 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1483 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1484 if (crc & 0x80000000) index |= 1 << bit;
1485 mc_filter[index/16] |= (1 << (index % 16));
1486 }
1487 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1488 } else {
1489 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1490 return;
1491 }
1492 if (np->mii_if.full_duplex && np->flowctrl)
1493 mc_filter[3] |= 0x0200;
1494
1495 for (i = 0; i < 4; i++)
1496 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1497 iowrite8(rx_mode, ioaddr + RxMode);
1498}
1499
1500static int __set_mac_addr(struct net_device *dev)
1501{
1502 struct netdev_private *np = netdev_priv(dev);
1503 u16 addr16;
1504
1505 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1506 iowrite16(addr16, np->base + StationAddr);
1507 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1508 iowrite16(addr16, np->base + StationAddr+2);
1509 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1510 iowrite16(addr16, np->base + StationAddr+4);
1511 return 0;
1512}
1513
1514static int check_if_running(struct net_device *dev)
1515{
1516 if (!netif_running(dev))
1517 return -EINVAL;
1518 return 0;
1519}
1520
1521static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1522{
1523 struct netdev_private *np = netdev_priv(dev);
1524 strcpy(info->driver, DRV_NAME);
1525 strcpy(info->version, DRV_VERSION);
1526 strcpy(info->bus_info, pci_name(np->pci_dev));
1527}
1528
1529static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1530{
1531 struct netdev_private *np = netdev_priv(dev);
1532 spin_lock_irq(&np->lock);
1533 mii_ethtool_gset(&np->mii_if, ecmd);
1534 spin_unlock_irq(&np->lock);
1535 return 0;
1536}
1537
1538static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1539{
1540 struct netdev_private *np = netdev_priv(dev);
1541 int res;
1542 spin_lock_irq(&np->lock);
1543 res = mii_ethtool_sset(&np->mii_if, ecmd);
1544 spin_unlock_irq(&np->lock);
1545 return res;
1546}
1547
1548static int nway_reset(struct net_device *dev)
1549{
1550 struct netdev_private *np = netdev_priv(dev);
1551 return mii_nway_restart(&np->mii_if);
1552}
1553
1554static u32 get_link(struct net_device *dev)
1555{
1556 struct netdev_private *np = netdev_priv(dev);
1557 return mii_link_ok(&np->mii_if);
1558}
1559
1560static u32 get_msglevel(struct net_device *dev)
1561{
1562 struct netdev_private *np = netdev_priv(dev);
1563 return np->msg_enable;
1564}
1565
1566static void set_msglevel(struct net_device *dev, u32 val)
1567{
1568 struct netdev_private *np = netdev_priv(dev);
1569 np->msg_enable = val;
1570}
1571
Jeff Garzik7282d492006-09-13 14:30:00 -04001572static const struct ethtool_ops ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 .begin = check_if_running,
1574 .get_drvinfo = get_drvinfo,
1575 .get_settings = get_settings,
1576 .set_settings = set_settings,
1577 .nway_reset = nway_reset,
1578 .get_link = get_link,
1579 .get_msglevel = get_msglevel,
1580 .set_msglevel = set_msglevel,
John W. Linville30d60a82005-09-12 10:48:58 -04001581 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582};
1583
1584static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1585{
1586 struct netdev_private *np = netdev_priv(dev);
1587 void __iomem *ioaddr = np->base;
1588 int rc;
1589 int i;
1590
1591 if (!netif_running(dev))
1592 return -EINVAL;
1593
1594 spin_lock_irq(&np->lock);
1595 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1596 spin_unlock_irq(&np->lock);
1597 switch (cmd) {
1598 case SIOCDEVPRIVATE:
1599 for (i=0; i<TX_RING_SIZE; i++) {
1600 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001601 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 le32_to_cpu(np->tx_ring[i].next_desc),
1603 le32_to_cpu(np->tx_ring[i].status),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001604 (le32_to_cpu(np->tx_ring[i].status) >> 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 & 0xff,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001606 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 le32_to_cpu(np->tx_ring[i].frag[0].length));
1608 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001609 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1610 ioread32(np->base + TxListPtr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 netif_queue_stopped(dev));
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001612 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1614 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1615 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1616 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1617 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1618 return 0;
1619 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
1622 return rc;
1623}
1624
1625static int netdev_close(struct net_device *dev)
1626{
1627 struct netdev_private *np = netdev_priv(dev);
1628 void __iomem *ioaddr = np->base;
1629 struct sk_buff *skb;
1630 int i;
1631
1632 netif_stop_queue(dev);
1633
1634 if (netif_msg_ifdown(np)) {
1635 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1636 "Rx %4.4x Int %2.2x.\n",
1637 dev->name, ioread8(ioaddr + TxStatus),
1638 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1639 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1640 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1641 }
1642
1643 /* Disable interrupts by clearing the interrupt mask. */
1644 iowrite16(0x0000, ioaddr + IntrEnable);
1645
1646 /* Stop the chip's Tx and Rx processes. */
1647 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1648
1649 /* Wait and kill tasklet */
1650 tasklet_kill(&np->rx_tasklet);
1651 tasklet_kill(&np->tx_tasklet);
1652
1653#ifdef __i386__
1654 if (netif_msg_hw(np)) {
1655 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1656 (int)(np->tx_ring_dma));
1657 for (i = 0; i < TX_RING_SIZE; i++)
1658 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1659 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1660 np->tx_ring[i].frag[0].length);
1661 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1662 (int)(np->rx_ring_dma));
1663 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1664 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1665 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1666 np->rx_ring[i].frag[0].length);
1667 }
1668 }
1669#endif /* __i386__ debugging only */
1670
1671 free_irq(dev->irq, dev);
1672
1673 del_timer_sync(&np->timer);
1674
1675 /* Free all the skbuffs in the Rx queue. */
1676 for (i = 0; i < RX_RING_SIZE; i++) {
1677 np->rx_ring[i].status = 0;
1678 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1679 skb = np->rx_skbuff[i];
1680 if (skb) {
1681 pci_unmap_single(np->pci_dev,
1682 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1683 PCI_DMA_FROMDEVICE);
1684 dev_kfree_skb(skb);
1685 np->rx_skbuff[i] = NULL;
1686 }
1687 }
1688 for (i = 0; i < TX_RING_SIZE; i++) {
1689 skb = np->tx_skbuff[i];
1690 if (skb) {
1691 pci_unmap_single(np->pci_dev,
1692 np->tx_ring[i].frag[0].addr, skb->len,
1693 PCI_DMA_TODEVICE);
1694 dev_kfree_skb(skb);
1695 np->tx_skbuff[i] = NULL;
1696 }
1697 }
1698
1699 return 0;
1700}
1701
1702static void __devexit sundance_remove1 (struct pci_dev *pdev)
1703{
1704 struct net_device *dev = pci_get_drvdata(pdev);
1705
1706 if (dev) {
1707 struct netdev_private *np = netdev_priv(dev);
1708
1709 unregister_netdev(dev);
1710 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1711 np->rx_ring_dma);
1712 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1713 np->tx_ring_dma);
1714 pci_iounmap(pdev, np->base);
1715 pci_release_regions(pdev);
1716 free_netdev(dev);
1717 pci_set_drvdata(pdev, NULL);
1718 }
1719}
1720
1721static struct pci_driver sundance_driver = {
1722 .name = DRV_NAME,
1723 .id_table = sundance_pci_tbl,
1724 .probe = sundance_probe1,
1725 .remove = __devexit_p(sundance_remove1),
1726};
1727
1728static int __init sundance_init(void)
1729{
1730/* when a module, this is printed whether or not devices are found in probe */
1731#ifdef MODULE
1732 printk(version);
1733#endif
Jeff Garzik29917622006-08-19 17:48:59 -04001734 return pci_register_driver(&sundance_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735}
1736
1737static void __exit sundance_exit(void)
1738{
1739 pci_unregister_driver(&sundance_driver);
1740}
1741
1742module_init(sundance_init);
1743module_exit(sundance_exit);
1744
1745