blob: c8ba534c17bf969972a088fdb7e838ad59c15738 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040019 [link no longer provides useful info -jgarzik]
Philippe De Muytere714d992006-08-03 18:42:15 +020020 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023*/
24
25#define DRV_NAME "sundance"
Andy Gospodarekd5b20692006-09-11 17:39:18 -040026#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
Arjan van de Venf71e1302006-03-03 21:33:57 -050035static const int multicast_filter_limit = 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/pci.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/skbuff.h>
93#include <linux/init.h>
94#include <linux/bitops.h>
95#include <asm/uaccess.h>
96#include <asm/processor.h> /* Processor type for cache alignment. */
97#include <asm/io.h>
98#include <linux/delay.h>
99#include <linux/spinlock.h>
100#ifndef _COMPAT_WITH_OLD_KERNEL
101#include <linux/crc32.h>
102#include <linux/ethtool.h>
103#include <linux/mii.h>
104#else
105#include "crc32.h"
106#include "ethtool.h"
107#include "mii.h"
108#include "compat.h"
109#endif
110
111/* These identify the driver base version and may not be removed. */
Andrew Morton3418e462006-08-14 23:00:10 -0700112static char version[] =
Markus Dahms2c2a8c52007-05-09 07:58:10 +0200113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117MODULE_LICENSE("GPL");
118
119module_param(debug, int, 0);
120module_param(rx_copybreak, int, 0);
121module_param_array(media, charp, NULL, 0);
122module_param(flowctrl, int, 0);
123MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
126
127/*
128 Theory of Operation
129
130I. Board Compatibility
131
132This driver is designed for the Sundance Technologies "Alta" ST201 chip.
133
134II. Board-specific settings
135
136III. Driver operation
137
138IIIa. Ring buffers
139
140This driver uses two statically allocated fixed-size descriptor lists
141formed into rings by a branch from the final descriptor to the beginning of
142the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143Some chips explicitly use only 2^N sized rings, while others use a
144'next descriptor' pointer that the driver forms into rings.
145
146IIIb/c. Transmit/Receive Structure
147
148This driver uses a zero-copy receive and transmit scheme.
149The driver allocates full frame size skbuffs for the Rx ring buffers at
150open() time and passes the skb->data field to the chip as receive data
151buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152a fresh skbuff is allocated and the frame is copied to the new skbuff.
153When the incoming frame is larger, the skbuff is passed directly up the
154protocol stack. Buffers consumed this way are replaced by newly allocated
155skbuffs in a later phase of receives.
156
157The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158using a full-sized skbuff for small frames vs. the copying costs of larger
159frames. New boards are typically used in generously configured machines
160and the underfilled buffers have negligible impact compared to the benefit of
161a single allocation size, so the default value of zero results in never
162copying packets. When copying is done, the cost is usually mitigated by using
163a combined copy/checksum routine. Copying also preloads the cache, which is
164most useful with small frames.
165
166A subtle aspect of the operation is that the IP header at offset 14 in an
167ethernet frame isn't longword aligned for further processing.
168Unaligned buffers are permitted by the Sundance hardware, so
169frames are received into the skbuff at an offset of "+2", 16-byte aligning
170the IP header.
171
172IIId. Synchronization
173
174The driver runs as two independent, single-threaded flows of control. One
175is the send-packet routine, which enforces single-threaded use by the
176dev->tbusy flag. The other thread is the interrupt handler, which is single
177threaded by the hardware and interrupt handling software.
178
179The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182the 'lp->tx_full' flag.
183
184The interrupt handler has exclusive control over the Rx ring and records stats
185from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187clears both the tx_full and tbusy flags.
188
189IV. Notes
190
191IVb. References
192
193The Sundance ST201 datasheet, preliminary version.
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200194The Kendin KS8723 datasheet, preliminary version.
195The ICplus IP100 datasheet, preliminary version.
196http://www.scyld.com/expert/100mbps.html
197http://www.scyld.com/expert/NWay.html
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199IVc. Errata
200
201*/
202
203/* Work-around for Kendin chip bugs. */
204#ifndef CONFIG_SUNDANCE_MMIO
205#define USE_IO_OPS 1
206#endif
207
Jeff Garzik46009c82006-06-27 09:12:38 -0400208static const struct pci_device_id sundance_pci_tbl[] = {
209 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
216 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217};
218MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
219
220enum {
221 netdev_io_size = 128
222};
223
224struct pci_id_info {
225 const char *name;
226};
Jeff Garzik46009c82006-06-27 09:12:38 -0400227static const struct pci_id_info pci_id_tbl[] __devinitdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 {"D-Link DFE-550TX FAST Ethernet Adapter"},
229 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 {"D-Link DFE-580TX 4 port Server Adapter"},
231 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 {"D-Link DL10050-based FAST Ethernet Adapter"},
233 {"Sundance Technology Alta"},
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +0200234 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
Jeff Garzik46009c82006-06-27 09:12:38 -0400235 { } /* terminate list. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236};
237
238/* This driver was written to use PCI memory space, however x86-oriented
239 hardware often uses I/O space accesses. */
240
241/* Offsets to the device registers.
242 Unlike software-only systems, device drivers interact with complex hardware.
243 It's not useful to define symbolic names for every register bit in the
244 device. The name can only partially document the semantics and make
245 the driver longer and more difficult to read.
246 In general, only the important configuration values or bits changed
247 multiple times should be defined symbolically.
248*/
249enum alta_offsets {
250 DMACtrl = 0x00,
251 TxListPtr = 0x04,
252 TxDMABurstThresh = 0x08,
253 TxDMAUrgentThresh = 0x09,
254 TxDMAPollPeriod = 0x0a,
255 RxDMAStatus = 0x0c,
256 RxListPtr = 0x10,
257 DebugCtrl0 = 0x1a,
258 DebugCtrl1 = 0x1c,
259 RxDMABurstThresh = 0x14,
260 RxDMAUrgentThresh = 0x15,
261 RxDMAPollPeriod = 0x16,
262 LEDCtrl = 0x1a,
263 ASICCtrl = 0x30,
264 EEData = 0x34,
265 EECtrl = 0x36,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 FlashAddr = 0x40,
267 FlashData = 0x44,
268 TxStatus = 0x46,
269 TxFrameId = 0x47,
270 DownCounter = 0x18,
271 IntrClear = 0x4a,
272 IntrEnable = 0x4c,
273 IntrStatus = 0x4e,
274 MACCtrl0 = 0x50,
275 MACCtrl1 = 0x52,
276 StationAddr = 0x54,
277 MaxFrameSize = 0x5A,
278 RxMode = 0x5c,
279 MIICtrl = 0x5e,
280 MulticastFilter0 = 0x60,
281 MulticastFilter1 = 0x64,
282 RxOctetsLow = 0x68,
283 RxOctetsHigh = 0x6a,
284 TxOctetsLow = 0x6c,
285 TxOctetsHigh = 0x6e,
286 TxFramesOK = 0x70,
287 RxFramesOK = 0x72,
288 StatsCarrierError = 0x74,
289 StatsLateColl = 0x75,
290 StatsMultiColl = 0x76,
291 StatsOneColl = 0x77,
292 StatsTxDefer = 0x78,
293 RxMissed = 0x79,
294 StatsTxXSDefer = 0x7a,
295 StatsTxAbort = 0x7b,
296 StatsBcastTx = 0x7c,
297 StatsBcastRx = 0x7d,
298 StatsMcastTx = 0x7e,
299 StatsMcastRx = 0x7f,
300 /* Aliased and bogus values! */
301 RxStatus = 0x0c,
302};
303enum ASICCtrl_HiWord_bit {
304 GlobalReset = 0x0001,
305 RxReset = 0x0002,
306 TxReset = 0x0004,
307 DMAReset = 0x0008,
308 FIFOReset = 0x0010,
309 NetworkReset = 0x0020,
310 HostReset = 0x0040,
311 ResetBusy = 0x0400,
312};
313
314/* Bits in the interrupt status/mask registers. */
315enum intr_status_bits {
316 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
318 IntrDrvRqst=0x0040,
319 StatsMax=0x0080, LinkChange=0x0100,
320 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321};
322
323/* Bits in the RxMode register. */
324enum rx_mode_bits {
325 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
327};
328/* Bits in MACCtrl. */
329enum mac_ctrl0_bits {
330 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
332};
333enum mac_ctrl1_bits {
334 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
335 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337};
338
339/* The Rx and Tx buffer descriptors. */
340/* Note that using only 32 bit fields simplifies conversion to big-endian
341 architectures. */
342struct netdev_desc {
343 u32 next_desc;
344 u32 status;
345 struct desc_frag { u32 addr, length; } frag[1];
346};
347
348/* Bits in netdev_desc.status */
349enum desc_status_bits {
350 DescOwn=0x8000,
351 DescEndPacket=0x4000,
352 DescEndRing=0x2000,
353 LastFrag=0x80000000,
354 DescIntrOnTx=0x8000,
355 DescIntrOnDMADone=0x80000000,
356 DisableAlign = 0x00000001,
357};
358
359#define PRIV_ALIGN 15 /* Required alignment mask */
360/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
361 within the structure. */
362#define MII_CNT 4
363struct netdev_private {
364 /* Descriptor rings first for alignment. */
365 struct netdev_desc *rx_ring;
366 struct netdev_desc *tx_ring;
367 struct sk_buff* rx_skbuff[RX_RING_SIZE];
368 struct sk_buff* tx_skbuff[TX_RING_SIZE];
369 dma_addr_t tx_ring_dma;
370 dma_addr_t rx_ring_dma;
371 struct net_device_stats stats;
372 struct timer_list timer; /* Media monitoring timer. */
373 /* Frequently used values: keep some adjacent for cache effect. */
374 spinlock_t lock;
375 spinlock_t rx_lock; /* Group with Tx control cache line. */
376 int msg_enable;
377 int chip_id;
378 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
379 unsigned int rx_buf_sz; /* Based on MTU+slack. */
380 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
381 unsigned int cur_tx, dirty_tx;
382 /* These values are keep track of the transceiver/media in use. */
383 unsigned int flowctrl:1;
384 unsigned int default_port:4; /* Last dev->if_port value. */
385 unsigned int an_enable:1;
386 unsigned int speed;
387 struct tasklet_struct rx_tasklet;
388 struct tasklet_struct tx_tasklet;
389 int budget;
390 int cur_task;
391 /* Multicast and receive mode. */
392 spinlock_t mcastlock; /* SMP lock multicast updates. */
393 u16 mcast_filter[4];
394 /* MII transceiver section. */
395 struct mii_if_info mii_if;
396 int mii_preamble_required;
397 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
398 struct pci_dev *pci_dev;
399 void __iomem *base;
400 unsigned char pci_rev_id;
401};
402
403/* The station address location in the EEPROM. */
404#define EEPROM_SA_OFFSET 0x10
405#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
406 IntrDrvRqst | IntrTxDone | StatsMax | \
407 LinkChange)
408
409static int change_mtu(struct net_device *dev, int new_mtu);
410static int eeprom_read(void __iomem *ioaddr, int location);
411static int mdio_read(struct net_device *dev, int phy_id, int location);
412static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413static int netdev_open(struct net_device *dev);
414static void check_duplex(struct net_device *dev);
415static void netdev_timer(unsigned long data);
416static void tx_timeout(struct net_device *dev);
417static void init_ring(struct net_device *dev);
418static int start_tx(struct sk_buff *skb, struct net_device *dev);
419static int reset_tx (struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100420static irqreturn_t intr_handler(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421static void rx_poll(unsigned long data);
422static void tx_poll(unsigned long data);
423static void refill_rx (struct net_device *dev);
424static void netdev_error(struct net_device *dev, int intr_status);
425static void netdev_error(struct net_device *dev, int intr_status);
426static void set_rx_mode(struct net_device *dev);
427static int __set_mac_addr(struct net_device *dev);
428static struct net_device_stats *get_stats(struct net_device *dev);
429static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
430static int netdev_close(struct net_device *dev);
Jeff Garzik7282d492006-09-13 14:30:00 -0400431static const struct ethtool_ops ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200433static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
434{
435 struct netdev_private *np = netdev_priv(dev);
436 void __iomem *ioaddr = np->base + ASICCtrl;
437 int countdown;
438
439 /* ST201 documentation states ASICCtrl is a 32bit register */
440 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
441 /* ST201 documentation states reset can take up to 1 ms */
442 countdown = 10 + 1;
443 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
444 if (--countdown == 0) {
445 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
446 break;
447 }
448 udelay(100);
449 }
450}
451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452static int __devinit sundance_probe1 (struct pci_dev *pdev,
453 const struct pci_device_id *ent)
454{
455 struct net_device *dev;
456 struct netdev_private *np;
457 static int card_idx;
458 int chip_idx = ent->driver_data;
459 int irq;
460 int i;
461 void __iomem *ioaddr;
462 u16 mii_ctl;
463 void *ring_space;
464 dma_addr_t ring_dma;
465#ifdef USE_IO_OPS
466 int bar = 0;
467#else
468 int bar = 1;
469#endif
John W. Linville67ec2f82005-10-18 21:31:01 -0400470 int phy, phy_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472
473/* when built into the kernel, we only print version if device is found */
474#ifndef MODULE
475 static int printed_version;
476 if (!printed_version++)
477 printk(version);
478#endif
479
480 if (pci_enable_device(pdev))
481 return -EIO;
482 pci_set_master(pdev);
483
484 irq = pdev->irq;
485
486 dev = alloc_etherdev(sizeof(*np));
487 if (!dev)
488 return -ENOMEM;
489 SET_MODULE_OWNER(dev);
490 SET_NETDEV_DEV(dev, &pdev->dev);
491
492 if (pci_request_regions(pdev, DRV_NAME))
493 goto err_out_netdev;
494
495 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
496 if (!ioaddr)
497 goto err_out_res;
498
499 for (i = 0; i < 3; i++)
500 ((u16 *)dev->dev_addr)[i] =
501 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
John W. Linville30d60a82005-09-12 10:48:58 -0400502 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 dev->base_addr = (unsigned long)ioaddr;
505 dev->irq = irq;
506
507 np = netdev_priv(dev);
508 np->base = ioaddr;
509 np->pci_dev = pdev;
510 np->chip_id = chip_idx;
511 np->msg_enable = (1 << debug) - 1;
512 spin_lock_init(&np->lock);
513 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
514 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
515
516 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
517 if (!ring_space)
518 goto err_out_cleardev;
519 np->tx_ring = (struct netdev_desc *)ring_space;
520 np->tx_ring_dma = ring_dma;
521
522 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
523 if (!ring_space)
524 goto err_out_unmap_tx;
525 np->rx_ring = (struct netdev_desc *)ring_space;
526 np->rx_ring_dma = ring_dma;
527
528 np->mii_if.dev = dev;
529 np->mii_if.mdio_read = mdio_read;
530 np->mii_if.mdio_write = mdio_write;
531 np->mii_if.phy_id_mask = 0x1f;
532 np->mii_if.reg_num_mask = 0x1f;
533
534 /* The chip-specific entries in the device structure. */
535 dev->open = &netdev_open;
536 dev->hard_start_xmit = &start_tx;
537 dev->stop = &netdev_close;
538 dev->get_stats = &get_stats;
539 dev->set_multicast_list = &set_rx_mode;
540 dev->do_ioctl = &netdev_ioctl;
541 SET_ETHTOOL_OPS(dev, &ethtool_ops);
542 dev->tx_timeout = &tx_timeout;
543 dev->watchdog_timeo = TX_TIMEOUT;
544 dev->change_mtu = &change_mtu;
545 pci_set_drvdata(pdev, dev);
546
547 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
548
549 i = register_netdev(dev);
550 if (i)
551 goto err_out_unmap_rx;
552
553 printk(KERN_INFO "%s: %s at %p, ",
554 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
555 for (i = 0; i < 5; i++)
556 printk("%2.2x:", dev->dev_addr[i]);
557 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
558
John W. Linville67ec2f82005-10-18 21:31:01 -0400559 np->phys[0] = 1; /* Default setting */
560 np->mii_preamble_required++;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500561 /*
562 * It seems some phys doesn't deal well with address 0 being accessed
563 * first, so leave address zero to the end of the loop (32 & 31).
564 */
John W. Linvilleb06c0932005-10-19 08:07:34 -0400565 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400566 int phyx = phy & 0x1f;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500567 int mii_status = mdio_read(dev, phyx, MII_BMSR);
John W. Linville67ec2f82005-10-18 21:31:01 -0400568 if (mii_status != 0xffff && mii_status != 0x0000) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400569 np->phys[phy_idx++] = phyx;
570 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
John W. Linville67ec2f82005-10-18 21:31:01 -0400571 if ((mii_status & 0x0040) == 0)
572 np->mii_preamble_required++;
573 printk(KERN_INFO "%s: MII PHY found at address %d, status "
574 "0x%4.4x advertising %4.4x.\n",
John W. Linvilleb06c0932005-10-19 08:07:34 -0400575 dev->name, phyx, mii_status, np->mii_if.advertising);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 }
John W. Linville67ec2f82005-10-18 21:31:01 -0400578 np->mii_preamble_required--;
579
580 if (phy_idx == 0) {
581 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
582 dev->name, ioread32(ioaddr + ASICCtrl));
583 goto err_out_unregister;
584 }
585
586 np->mii_if.phy_id = np->phys[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 /* Parse override configuration */
589 np->an_enable = 1;
590 if (card_idx < MAX_UNITS) {
591 if (media[card_idx] != NULL) {
592 np->an_enable = 0;
593 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
594 strcmp (media[card_idx], "4") == 0) {
595 np->speed = 100;
596 np->mii_if.full_duplex = 1;
597 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
598 || strcmp (media[card_idx], "3") == 0) {
599 np->speed = 100;
600 np->mii_if.full_duplex = 0;
601 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
602 strcmp (media[card_idx], "2") == 0) {
603 np->speed = 10;
604 np->mii_if.full_duplex = 1;
605 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
606 strcmp (media[card_idx], "1") == 0) {
607 np->speed = 10;
608 np->mii_if.full_duplex = 0;
609 } else {
610 np->an_enable = 1;
611 }
612 }
613 if (flowctrl == 1)
614 np->flowctrl = 1;
615 }
616
617 /* Fibre PHY? */
618 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
619 /* Default 100Mbps Full */
620 if (np->an_enable) {
621 np->speed = 100;
622 np->mii_if.full_duplex = 1;
623 np->an_enable = 0;
624 }
625 }
626 /* Reset PHY */
627 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
628 mdelay (300);
629 /* If flow control enabled, we need to advertise it.*/
630 if (np->flowctrl)
631 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
632 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
633 /* Force media type */
634 if (!np->an_enable) {
635 mii_ctl = 0;
636 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
637 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
638 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
639 printk (KERN_INFO "Override speed=%d, %s duplex\n",
640 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
641
642 }
643
644 /* Perhaps move the reset here? */
645 /* Reset the chip to erase previous misconfiguration. */
646 if (netif_msg_hw(np))
647 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
Philippe De Muytere714d992006-08-03 18:42:15 +0200648 sundance_reset(dev, 0x00ff << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (netif_msg_hw(np))
650 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
651
652 card_idx++;
653 return 0;
654
655err_out_unregister:
656 unregister_netdev(dev);
657err_out_unmap_rx:
658 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
659err_out_unmap_tx:
660 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
661err_out_cleardev:
662 pci_set_drvdata(pdev, NULL);
663 pci_iounmap(pdev, ioaddr);
664err_out_res:
665 pci_release_regions(pdev);
666err_out_netdev:
667 free_netdev (dev);
668 return -ENODEV;
669}
670
671static int change_mtu(struct net_device *dev, int new_mtu)
672{
673 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
674 return -EINVAL;
675 if (netif_running(dev))
676 return -EBUSY;
677 dev->mtu = new_mtu;
678 return 0;
679}
680
681#define eeprom_delay(ee_addr) ioread32(ee_addr)
682/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
683static int __devinit eeprom_read(void __iomem *ioaddr, int location)
684{
685 int boguscnt = 10000; /* Typical 1900 ticks. */
686 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
687 do {
688 eeprom_delay(ioaddr + EECtrl);
689 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
690 return ioread16(ioaddr + EEData);
691 }
692 } while (--boguscnt > 0);
693 return 0;
694}
695
696/* MII transceiver control section.
697 Read and write the MII registers using software-generated serial
698 MDIO protocol. See the MII specifications or DP83840A data sheet
699 for details.
700
701 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
702 met by back-to-back 33Mhz PCI cycles. */
703#define mdio_delay() ioread8(mdio_addr)
704
705enum mii_reg_bits {
706 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
707};
708#define MDIO_EnbIn (0)
709#define MDIO_WRITE0 (MDIO_EnbOutput)
710#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
711
712/* Generate the preamble required for initial synchronization and
713 a few older transceivers. */
714static void mdio_sync(void __iomem *mdio_addr)
715{
716 int bits = 32;
717
718 /* Establish sync by sending at least 32 logic ones. */
719 while (--bits >= 0) {
720 iowrite8(MDIO_WRITE1, mdio_addr);
721 mdio_delay();
722 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
723 mdio_delay();
724 }
725}
726
727static int mdio_read(struct net_device *dev, int phy_id, int location)
728{
729 struct netdev_private *np = netdev_priv(dev);
730 void __iomem *mdio_addr = np->base + MIICtrl;
731 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
732 int i, retval = 0;
733
734 if (np->mii_preamble_required)
735 mdio_sync(mdio_addr);
736
737 /* Shift the read command bits out. */
738 for (i = 15; i >= 0; i--) {
739 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
740
741 iowrite8(dataval, mdio_addr);
742 mdio_delay();
743 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
744 mdio_delay();
745 }
746 /* Read the two transition, 16 data, and wire-idle bits. */
747 for (i = 19; i > 0; i--) {
748 iowrite8(MDIO_EnbIn, mdio_addr);
749 mdio_delay();
750 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
751 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
752 mdio_delay();
753 }
754 return (retval>>1) & 0xffff;
755}
756
757static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
758{
759 struct netdev_private *np = netdev_priv(dev);
760 void __iomem *mdio_addr = np->base + MIICtrl;
761 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
762 int i;
763
764 if (np->mii_preamble_required)
765 mdio_sync(mdio_addr);
766
767 /* Shift the command bits out. */
768 for (i = 31; i >= 0; i--) {
769 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
770
771 iowrite8(dataval, mdio_addr);
772 mdio_delay();
773 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
774 mdio_delay();
775 }
776 /* Clear out extra bits. */
777 for (i = 2; i > 0; i--) {
778 iowrite8(MDIO_EnbIn, mdio_addr);
779 mdio_delay();
780 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
781 mdio_delay();
782 }
783 return;
784}
785
786static int netdev_open(struct net_device *dev)
787{
788 struct netdev_private *np = netdev_priv(dev);
789 void __iomem *ioaddr = np->base;
Jesse Huangacd70c22006-10-20 14:42:13 -0700790 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 int i;
792
793 /* Do we need to reset the chip??? */
794
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700795 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 if (i)
797 return i;
798
799 if (netif_msg_ifup(np))
800 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
801 dev->name, dev->irq);
802 init_ring(dev);
803
804 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
805 /* The Tx list pointer is written as packets are queued. */
806
807 /* Initialize other registers. */
808 __set_mac_addr(dev);
809#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
810 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
811#else
812 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
813#endif
814 if (dev->mtu > 2047)
815 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
816
817 /* Configure the PCI bus bursts and FIFO thresholds. */
818
819 if (dev->if_port == 0)
820 dev->if_port = np->default_port;
821
822 spin_lock_init(&np->mcastlock);
823
824 set_rx_mode(dev);
825 iowrite16(0, ioaddr + IntrEnable);
826 iowrite16(0, ioaddr + DownCounter);
827 /* Set the chip to poll every N*320nsec. */
828 iowrite8(100, ioaddr + RxDMAPollPeriod);
829 iowrite8(127, ioaddr + TxDMAPollPeriod);
830 /* Fix DFE-580TX packet drop issue */
831 if (np->pci_rev_id >= 0x14)
832 iowrite8(0x01, ioaddr + DebugCtrl1);
833 netif_start_queue(dev);
834
Jesse Huangacd70c22006-10-20 14:42:13 -0700835 spin_lock_irqsave(&np->lock, flags);
836 reset_tx(dev);
837 spin_unlock_irqrestore(&np->lock, flags);
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
840
841 if (netif_msg_ifup(np))
842 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
843 "MAC Control %x, %4.4x %4.4x.\n",
844 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
845 ioread32(ioaddr + MACCtrl0),
846 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
847
848 /* Set the timer to check for link beat. */
849 init_timer(&np->timer);
850 np->timer.expires = jiffies + 3*HZ;
851 np->timer.data = (unsigned long)dev;
852 np->timer.function = &netdev_timer; /* timer handler */
853 add_timer(&np->timer);
854
855 /* Enable interrupts by setting the interrupt mask. */
856 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
857
858 return 0;
859}
860
861static void check_duplex(struct net_device *dev)
862{
863 struct netdev_private *np = netdev_priv(dev);
864 void __iomem *ioaddr = np->base;
865 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
866 int negotiated = mii_lpa & np->mii_if.advertising;
867 int duplex;
868
869 /* Force media */
870 if (!np->an_enable || mii_lpa == 0xffff) {
871 if (np->mii_if.full_duplex)
872 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
873 ioaddr + MACCtrl0);
874 return;
875 }
876
877 /* Autonegotiation */
878 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
879 if (np->mii_if.full_duplex != duplex) {
880 np->mii_if.full_duplex = duplex;
881 if (netif_msg_link(np))
882 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
883 "negotiated capability %4.4x.\n", dev->name,
884 duplex ? "full" : "half", np->phys[0], negotiated);
885 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
886 }
887}
888
889static void netdev_timer(unsigned long data)
890{
891 struct net_device *dev = (struct net_device *)data;
892 struct netdev_private *np = netdev_priv(dev);
893 void __iomem *ioaddr = np->base;
894 int next_tick = 10*HZ;
895
896 if (netif_msg_timer(np)) {
897 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
898 "Tx %x Rx %x.\n",
899 dev->name, ioread16(ioaddr + IntrEnable),
900 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
901 }
902 check_duplex(dev);
903 np->timer.expires = jiffies + next_tick;
904 add_timer(&np->timer);
905}
906
907static void tx_timeout(struct net_device *dev)
908{
909 struct netdev_private *np = netdev_priv(dev);
910 void __iomem *ioaddr = np->base;
911 unsigned long flag;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400912
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 netif_stop_queue(dev);
914 tasklet_disable(&np->tx_tasklet);
915 iowrite16(0, ioaddr + IntrEnable);
916 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
917 "TxFrameId %2.2x,"
918 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
919 ioread8(ioaddr + TxFrameId));
920
921 {
922 int i;
923 for (i=0; i<TX_RING_SIZE; i++) {
924 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
925 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
926 le32_to_cpu(np->tx_ring[i].next_desc),
927 le32_to_cpu(np->tx_ring[i].status),
928 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400929 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 le32_to_cpu(np->tx_ring[i].frag[0].length));
931 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400932 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
933 ioread32(np->base + TxListPtr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 netif_queue_stopped(dev));
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400935 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 np->cur_tx, np->cur_tx % TX_RING_SIZE,
937 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
938 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
939 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
940 }
941 spin_lock_irqsave(&np->lock, flag);
942
943 /* Stop and restart the chip's Tx processes . */
944 reset_tx(dev);
945 spin_unlock_irqrestore(&np->lock, flag);
946
947 dev->if_port = 0;
948
949 dev->trans_start = jiffies;
950 np->stats.tx_errors++;
951 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
952 netif_wake_queue(dev);
953 }
954 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
955 tasklet_enable(&np->tx_tasklet);
956}
957
958
959/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
960static void init_ring(struct net_device *dev)
961{
962 struct netdev_private *np = netdev_priv(dev);
963 int i;
964
965 np->cur_rx = np->cur_tx = 0;
966 np->dirty_rx = np->dirty_tx = 0;
967 np->cur_task = 0;
968
969 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
970
971 /* Initialize all Rx descriptors. */
972 for (i = 0; i < RX_RING_SIZE; i++) {
973 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
974 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
975 np->rx_ring[i].status = 0;
976 np->rx_ring[i].frag[0].length = 0;
977 np->rx_skbuff[i] = NULL;
978 }
979
980 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
981 for (i = 0; i < RX_RING_SIZE; i++) {
982 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
983 np->rx_skbuff[i] = skb;
984 if (skb == NULL)
985 break;
986 skb->dev = dev; /* Mark as being used by this device. */
987 skb_reserve(skb, 2); /* 16 byte align the IP header. */
988 np->rx_ring[i].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -0700989 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 PCI_DMA_FROMDEVICE));
991 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
992 }
993 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
994
995 for (i = 0; i < TX_RING_SIZE; i++) {
996 np->tx_skbuff[i] = NULL;
997 np->tx_ring[i].status = 0;
998 }
999 return;
1000}
1001
1002static void tx_poll (unsigned long data)
1003{
1004 struct net_device *dev = (struct net_device *)data;
1005 struct netdev_private *np = netdev_priv(dev);
1006 unsigned head = np->cur_task % TX_RING_SIZE;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001007 struct netdev_desc *txdesc =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 /* Chain the next pointer */
1011 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1012 int entry = np->cur_task % TX_RING_SIZE;
1013 txdesc = &np->tx_ring[entry];
1014 if (np->last_tx) {
1015 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1016 entry*sizeof(struct netdev_desc));
1017 }
1018 np->last_tx = txdesc;
1019 }
1020 /* Indicate the latest descriptor of tx ring */
1021 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1022
1023 if (ioread32 (np->base + TxListPtr) == 0)
1024 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1025 np->base + TxListPtr);
1026 return;
1027}
1028
1029static int
1030start_tx (struct sk_buff *skb, struct net_device *dev)
1031{
1032 struct netdev_private *np = netdev_priv(dev);
1033 struct netdev_desc *txdesc;
1034 unsigned entry;
1035
1036 /* Calculate the next Tx descriptor entry. */
1037 entry = np->cur_tx % TX_RING_SIZE;
1038 np->tx_skbuff[entry] = skb;
1039 txdesc = &np->tx_ring[entry];
1040
1041 txdesc->next_desc = 0;
1042 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1043 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1044 skb->len,
1045 PCI_DMA_TODEVICE));
1046 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1047
1048 /* Increment cur_tx before tasklet_schedule() */
1049 np->cur_tx++;
1050 mb();
1051 /* Schedule a tx_poll() task */
1052 tasklet_schedule(&np->tx_tasklet);
1053
1054 /* On some architectures: explicitly flush cache lines here. */
1055 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1056 && !netif_queue_stopped(dev)) {
1057 /* do nothing */
1058 } else {
1059 netif_stop_queue (dev);
1060 }
1061 dev->trans_start = jiffies;
1062 if (netif_msg_tx_queued(np)) {
1063 printk (KERN_DEBUG
1064 "%s: Transmit frame #%d queued in slot %d.\n",
1065 dev->name, np->cur_tx, entry);
1066 }
1067 return 0;
1068}
1069
1070/* Reset hardware tx and free all of tx buffers */
1071static int
1072reset_tx (struct net_device *dev)
1073{
1074 struct netdev_private *np = netdev_priv(dev);
1075 void __iomem *ioaddr = np->base;
1076 struct sk_buff *skb;
1077 int i;
1078 int irq = in_interrupt();
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 /* Reset tx logic, TxListPtr will be cleaned */
1081 iowrite16 (TxDisable, ioaddr + MACCtrl1);
Philippe De Muytere714d992006-08-03 18:42:15 +02001082 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 /* free all tx skbuff */
1085 for (i = 0; i < TX_RING_SIZE; i++) {
Jesse Huang2109f892006-10-20 14:42:11 -07001086 np->tx_ring[i].next_desc = 0;
1087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 skb = np->tx_skbuff[i];
1089 if (skb) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001090 pci_unmap_single(np->pci_dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 np->tx_ring[i].frag[0].addr, skb->len,
1092 PCI_DMA_TODEVICE);
1093 if (irq)
1094 dev_kfree_skb_irq (skb);
1095 else
1096 dev_kfree_skb (skb);
1097 np->tx_skbuff[i] = NULL;
1098 np->stats.tx_dropped++;
1099 }
1100 }
1101 np->cur_tx = np->dirty_tx = 0;
1102 np->cur_task = 0;
Jesse Huang2109f892006-10-20 14:42:11 -07001103
Randy Dunlapbca79eb2006-11-29 13:15:17 -08001104 np->last_tx = NULL;
Jesse Huang2109f892006-10-20 14:42:11 -07001105 iowrite8(127, ioaddr + TxDMAPollPeriod);
1106
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1108 return 0;
1109}
1110
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001111/* The interrupt handler cleans up after the Tx thread,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 and schedule a Rx thread work */
David Howells7d12e782006-10-05 14:55:46 +01001113static irqreturn_t intr_handler(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114{
1115 struct net_device *dev = (struct net_device *)dev_instance;
1116 struct netdev_private *np = netdev_priv(dev);
1117 void __iomem *ioaddr = np->base;
1118 int hw_frame_id;
1119 int tx_cnt;
1120 int tx_status;
1121 int handled = 0;
Jesse Huange2420402006-10-20 14:42:05 -07001122 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
1124
1125 do {
1126 int intr_status = ioread16(ioaddr + IntrStatus);
1127 iowrite16(intr_status, ioaddr + IntrStatus);
1128
1129 if (netif_msg_intr(np))
1130 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1131 dev->name, intr_status);
1132
1133 if (!(intr_status & DEFAULT_INTR))
1134 break;
1135
1136 handled = 1;
1137
1138 if (intr_status & (IntrRxDMADone)) {
1139 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1140 ioaddr + IntrEnable);
1141 if (np->budget < 0)
1142 np->budget = RX_BUDGET;
1143 tasklet_schedule(&np->rx_tasklet);
1144 }
1145 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1146 tx_status = ioread16 (ioaddr + TxStatus);
1147 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1148 if (netif_msg_tx_done(np))
1149 printk
1150 ("%s: Transmit status is %2.2x.\n",
1151 dev->name, tx_status);
1152 if (tx_status & 0x1e) {
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001153 if (netif_msg_tx_err(np))
1154 printk("%s: Transmit error status %4.4x.\n",
1155 dev->name, tx_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 np->stats.tx_errors++;
1157 if (tx_status & 0x10)
1158 np->stats.tx_fifo_errors++;
1159 if (tx_status & 0x08)
1160 np->stats.collisions++;
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001161 if (tx_status & 0x04)
1162 np->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 if (tx_status & 0x02)
1164 np->stats.tx_window_errors++;
Jesse Huange2420402006-10-20 14:42:05 -07001165
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001166 /*
1167 ** This reset has been verified on
1168 ** DFE-580TX boards ! phdm@macqel.be.
1169 */
1170 if (tx_status & 0x10) { /* TxUnderrun */
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001171 /* Restart Tx FIFO and transmitter */
1172 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001173 /* No need to reset the Tx pointer here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 }
Jesse Huang2109f892006-10-20 14:42:11 -07001175 /* Restart the Tx. Need to make sure tx enabled */
1176 i = 10;
1177 do {
1178 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1179 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1180 break;
1181 mdelay(1);
1182 } while (--i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 }
1184 /* Yup, this is a documentation bug. It cost me *hours*. */
1185 iowrite16 (0, ioaddr + TxStatus);
1186 if (tx_cnt < 0) {
1187 iowrite32(5000, ioaddr + DownCounter);
1188 break;
1189 }
1190 tx_status = ioread16 (ioaddr + TxStatus);
1191 }
1192 hw_frame_id = (tx_status >> 8) & 0xff;
1193 } else {
1194 hw_frame_id = ioread8(ioaddr + TxFrameId);
1195 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001196
1197 if (np->pci_rev_id >= 0x14) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 spin_lock(&np->lock);
1199 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1200 int entry = np->dirty_tx % TX_RING_SIZE;
1201 struct sk_buff *skb;
1202 int sw_frame_id;
1203 sw_frame_id = (le32_to_cpu(
1204 np->tx_ring[entry].status) >> 2) & 0xff;
1205 if (sw_frame_id == hw_frame_id &&
1206 !(le32_to_cpu(np->tx_ring[entry].status)
1207 & 0x00010000))
1208 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001209 if (sw_frame_id == (hw_frame_id + 1) %
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 TX_RING_SIZE)
1211 break;
1212 skb = np->tx_skbuff[entry];
1213 /* Free the original skb. */
1214 pci_unmap_single(np->pci_dev,
1215 np->tx_ring[entry].frag[0].addr,
1216 skb->len, PCI_DMA_TODEVICE);
1217 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1218 np->tx_skbuff[entry] = NULL;
1219 np->tx_ring[entry].frag[0].addr = 0;
1220 np->tx_ring[entry].frag[0].length = 0;
1221 }
1222 spin_unlock(&np->lock);
1223 } else {
1224 spin_lock(&np->lock);
1225 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1226 int entry = np->dirty_tx % TX_RING_SIZE;
1227 struct sk_buff *skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001228 if (!(le32_to_cpu(np->tx_ring[entry].status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 & 0x00010000))
1230 break;
1231 skb = np->tx_skbuff[entry];
1232 /* Free the original skb. */
1233 pci_unmap_single(np->pci_dev,
1234 np->tx_ring[entry].frag[0].addr,
1235 skb->len, PCI_DMA_TODEVICE);
1236 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1237 np->tx_skbuff[entry] = NULL;
1238 np->tx_ring[entry].frag[0].addr = 0;
1239 np->tx_ring[entry].frag[0].length = 0;
1240 }
1241 spin_unlock(&np->lock);
1242 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 if (netif_queue_stopped(dev) &&
1245 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1246 /* The ring is no longer full, clear busy flag. */
1247 netif_wake_queue (dev);
1248 }
1249 /* Abnormal error summary/uncommon events handlers. */
1250 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1251 netdev_error(dev, intr_status);
1252 } while (0);
1253 if (netif_msg_intr(np))
1254 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1255 dev->name, ioread16(ioaddr + IntrStatus));
1256 return IRQ_RETVAL(handled);
1257}
1258
1259static void rx_poll(unsigned long data)
1260{
1261 struct net_device *dev = (struct net_device *)data;
1262 struct netdev_private *np = netdev_priv(dev);
1263 int entry = np->cur_rx % RX_RING_SIZE;
1264 int boguscnt = np->budget;
1265 void __iomem *ioaddr = np->base;
1266 int received = 0;
1267
1268 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1269 while (1) {
1270 struct netdev_desc *desc = &(np->rx_ring[entry]);
1271 u32 frame_status = le32_to_cpu(desc->status);
1272 int pkt_len;
1273
1274 if (--boguscnt < 0) {
1275 goto not_done;
1276 }
1277 if (!(frame_status & DescOwn))
1278 break;
1279 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1280 if (netif_msg_rx_status(np))
1281 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1282 frame_status);
1283 if (frame_status & 0x001f4000) {
1284 /* There was a error. */
1285 if (netif_msg_rx_err(np))
1286 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1287 frame_status);
1288 np->stats.rx_errors++;
1289 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1290 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1291 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1292 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1293 if (frame_status & 0x00100000) {
1294 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1295 " status %8.8x.\n",
1296 dev->name, frame_status);
1297 }
1298 } else {
1299 struct sk_buff *skb;
1300#ifndef final_version
1301 if (netif_msg_rx_status(np))
1302 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1303 ", bogus_cnt %d.\n",
1304 pkt_len, boguscnt);
1305#endif
1306 /* Check if the packet is long enough to accept without copying
1307 to a minimally-sized skbuff. */
1308 if (pkt_len < rx_copybreak
1309 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 skb_reserve(skb, 2); /* 16 byte align the IP header */
1311 pci_dma_sync_single_for_cpu(np->pci_dev,
1312 desc->frag[0].addr,
1313 np->rx_buf_sz,
1314 PCI_DMA_FROMDEVICE);
1315
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001316 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 pci_dma_sync_single_for_device(np->pci_dev,
1318 desc->frag[0].addr,
1319 np->rx_buf_sz,
1320 PCI_DMA_FROMDEVICE);
1321 skb_put(skb, pkt_len);
1322 } else {
1323 pci_unmap_single(np->pci_dev,
1324 desc->frag[0].addr,
1325 np->rx_buf_sz,
1326 PCI_DMA_FROMDEVICE);
1327 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1328 np->rx_skbuff[entry] = NULL;
1329 }
1330 skb->protocol = eth_type_trans(skb, dev);
1331 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1332 netif_rx(skb);
1333 dev->last_rx = jiffies;
1334 }
1335 entry = (entry + 1) % RX_RING_SIZE;
1336 received++;
1337 }
1338 np->cur_rx = entry;
1339 refill_rx (dev);
1340 np->budget -= received;
1341 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1342 return;
1343
1344not_done:
1345 np->cur_rx = entry;
1346 refill_rx (dev);
1347 if (!received)
1348 received = 1;
1349 np->budget -= received;
1350 if (np->budget <= 0)
1351 np->budget = RX_BUDGET;
1352 tasklet_schedule(&np->rx_tasklet);
1353 return;
1354}
1355
1356static void refill_rx (struct net_device *dev)
1357{
1358 struct netdev_private *np = netdev_priv(dev);
1359 int entry;
1360 int cnt = 0;
1361
1362 /* Refill the Rx ring buffers. */
1363 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1364 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1365 struct sk_buff *skb;
1366 entry = np->dirty_rx % RX_RING_SIZE;
1367 if (np->rx_skbuff[entry] == NULL) {
1368 skb = dev_alloc_skb(np->rx_buf_sz);
1369 np->rx_skbuff[entry] = skb;
1370 if (skb == NULL)
1371 break; /* Better luck next round. */
1372 skb->dev = dev; /* Mark as being used by this device. */
1373 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1374 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
David S. Miller689be432005-06-28 15:25:31 -07001375 pci_map_single(np->pci_dev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1377 }
1378 /* Perhaps we need not reset this field. */
1379 np->rx_ring[entry].frag[0].length =
1380 cpu_to_le32(np->rx_buf_sz | LastFrag);
1381 np->rx_ring[entry].status = 0;
1382 cnt++;
1383 }
1384 return;
1385}
1386static void netdev_error(struct net_device *dev, int intr_status)
1387{
1388 struct netdev_private *np = netdev_priv(dev);
1389 void __iomem *ioaddr = np->base;
1390 u16 mii_ctl, mii_advertise, mii_lpa;
1391 int speed;
1392
1393 if (intr_status & LinkChange) {
1394 if (np->an_enable) {
1395 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1396 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1397 mii_advertise &= mii_lpa;
1398 printk (KERN_INFO "%s: Link changed: ", dev->name);
1399 if (mii_advertise & ADVERTISE_100FULL) {
1400 np->speed = 100;
1401 printk ("100Mbps, full duplex\n");
1402 } else if (mii_advertise & ADVERTISE_100HALF) {
1403 np->speed = 100;
1404 printk ("100Mbps, half duplex\n");
1405 } else if (mii_advertise & ADVERTISE_10FULL) {
1406 np->speed = 10;
1407 printk ("10Mbps, full duplex\n");
1408 } else if (mii_advertise & ADVERTISE_10HALF) {
1409 np->speed = 10;
1410 printk ("10Mbps, half duplex\n");
1411 } else
1412 printk ("\n");
1413
1414 } else {
1415 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1416 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1417 np->speed = speed;
1418 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1419 dev->name, speed);
1420 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1421 "full" : "half");
1422 }
1423 check_duplex (dev);
1424 if (np->flowctrl && np->mii_if.full_duplex) {
1425 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1426 ioaddr + MulticastFilter1+2);
1427 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1428 ioaddr + MACCtrl0);
1429 }
1430 }
1431 if (intr_status & StatsMax) {
1432 get_stats(dev);
1433 }
1434 if (intr_status & IntrPCIErr) {
1435 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1436 dev->name, intr_status);
1437 /* We must do a global reset of DMA to continue. */
1438 }
1439}
1440
1441static struct net_device_stats *get_stats(struct net_device *dev)
1442{
1443 struct netdev_private *np = netdev_priv(dev);
1444 void __iomem *ioaddr = np->base;
1445 int i;
1446
1447 /* We should lock this segment of code for SMP eventually, although
1448 the vulnerability window is very small and statistics are
1449 non-critical. */
1450 /* The chip only need report frame silently dropped. */
1451 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1452 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1453 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1454 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1455 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1456 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1457 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1458 ioread8(ioaddr + StatsTxDefer);
1459 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1460 ioread8(ioaddr + i);
1461 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1462 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1463 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1464 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1465
1466 return &np->stats;
1467}
1468
1469static void set_rx_mode(struct net_device *dev)
1470{
1471 struct netdev_private *np = netdev_priv(dev);
1472 void __iomem *ioaddr = np->base;
1473 u16 mc_filter[4]; /* Multicast hash filter */
1474 u32 rx_mode;
1475 int i;
1476
1477 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 memset(mc_filter, 0xff, sizeof(mc_filter));
1479 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1480 } else if ((dev->mc_count > multicast_filter_limit)
1481 || (dev->flags & IFF_ALLMULTI)) {
1482 /* Too many to match, or accept all multicasts. */
1483 memset(mc_filter, 0xff, sizeof(mc_filter));
1484 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1485 } else if (dev->mc_count) {
1486 struct dev_mc_list *mclist;
1487 int bit;
1488 int index;
1489 int crc;
1490 memset (mc_filter, 0, sizeof (mc_filter));
1491 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1492 i++, mclist = mclist->next) {
1493 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1494 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1495 if (crc & 0x80000000) index |= 1 << bit;
1496 mc_filter[index/16] |= (1 << (index % 16));
1497 }
1498 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1499 } else {
1500 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1501 return;
1502 }
1503 if (np->mii_if.full_duplex && np->flowctrl)
1504 mc_filter[3] |= 0x0200;
1505
1506 for (i = 0; i < 4; i++)
1507 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1508 iowrite8(rx_mode, ioaddr + RxMode);
1509}
1510
1511static int __set_mac_addr(struct net_device *dev)
1512{
1513 struct netdev_private *np = netdev_priv(dev);
1514 u16 addr16;
1515
1516 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1517 iowrite16(addr16, np->base + StationAddr);
1518 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1519 iowrite16(addr16, np->base + StationAddr+2);
1520 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1521 iowrite16(addr16, np->base + StationAddr+4);
1522 return 0;
1523}
1524
1525static int check_if_running(struct net_device *dev)
1526{
1527 if (!netif_running(dev))
1528 return -EINVAL;
1529 return 0;
1530}
1531
1532static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1533{
1534 struct netdev_private *np = netdev_priv(dev);
1535 strcpy(info->driver, DRV_NAME);
1536 strcpy(info->version, DRV_VERSION);
1537 strcpy(info->bus_info, pci_name(np->pci_dev));
1538}
1539
1540static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1541{
1542 struct netdev_private *np = netdev_priv(dev);
1543 spin_lock_irq(&np->lock);
1544 mii_ethtool_gset(&np->mii_if, ecmd);
1545 spin_unlock_irq(&np->lock);
1546 return 0;
1547}
1548
1549static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1550{
1551 struct netdev_private *np = netdev_priv(dev);
1552 int res;
1553 spin_lock_irq(&np->lock);
1554 res = mii_ethtool_sset(&np->mii_if, ecmd);
1555 spin_unlock_irq(&np->lock);
1556 return res;
1557}
1558
1559static int nway_reset(struct net_device *dev)
1560{
1561 struct netdev_private *np = netdev_priv(dev);
1562 return mii_nway_restart(&np->mii_if);
1563}
1564
1565static u32 get_link(struct net_device *dev)
1566{
1567 struct netdev_private *np = netdev_priv(dev);
1568 return mii_link_ok(&np->mii_if);
1569}
1570
1571static u32 get_msglevel(struct net_device *dev)
1572{
1573 struct netdev_private *np = netdev_priv(dev);
1574 return np->msg_enable;
1575}
1576
1577static void set_msglevel(struct net_device *dev, u32 val)
1578{
1579 struct netdev_private *np = netdev_priv(dev);
1580 np->msg_enable = val;
1581}
1582
Jeff Garzik7282d492006-09-13 14:30:00 -04001583static const struct ethtool_ops ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 .begin = check_if_running,
1585 .get_drvinfo = get_drvinfo,
1586 .get_settings = get_settings,
1587 .set_settings = set_settings,
1588 .nway_reset = nway_reset,
1589 .get_link = get_link,
1590 .get_msglevel = get_msglevel,
1591 .set_msglevel = set_msglevel,
John W. Linville30d60a82005-09-12 10:48:58 -04001592 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593};
1594
1595static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1596{
1597 struct netdev_private *np = netdev_priv(dev);
1598 void __iomem *ioaddr = np->base;
1599 int rc;
1600 int i;
1601
1602 if (!netif_running(dev))
1603 return -EINVAL;
1604
1605 spin_lock_irq(&np->lock);
1606 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1607 spin_unlock_irq(&np->lock);
1608 switch (cmd) {
1609 case SIOCDEVPRIVATE:
1610 for (i=0; i<TX_RING_SIZE; i++) {
1611 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001612 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 le32_to_cpu(np->tx_ring[i].next_desc),
1614 le32_to_cpu(np->tx_ring[i].status),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001615 (le32_to_cpu(np->tx_ring[i].status) >> 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 & 0xff,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001617 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 le32_to_cpu(np->tx_ring[i].frag[0].length));
1619 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001620 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1621 ioread32(np->base + TxListPtr),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 netif_queue_stopped(dev));
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001623 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1625 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1626 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1627 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1628 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1629 return 0;
1630 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 return rc;
1634}
1635
1636static int netdev_close(struct net_device *dev)
1637{
1638 struct netdev_private *np = netdev_priv(dev);
1639 void __iomem *ioaddr = np->base;
1640 struct sk_buff *skb;
1641 int i;
1642
Jesse Huang31f817e2006-11-08 19:49:12 -08001643 /* Wait and kill tasklet */
1644 tasklet_kill(&np->rx_tasklet);
1645 tasklet_kill(&np->tx_tasklet);
1646 np->cur_tx = 0;
1647 np->dirty_tx = 0;
1648 np->cur_task = 0;
Randy Dunlapbca79eb2006-11-29 13:15:17 -08001649 np->last_tx = NULL;
Jesse Huang31f817e2006-11-08 19:49:12 -08001650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 netif_stop_queue(dev);
1652
1653 if (netif_msg_ifdown(np)) {
1654 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1655 "Rx %4.4x Int %2.2x.\n",
1656 dev->name, ioread8(ioaddr + TxStatus),
1657 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1658 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1659 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1660 }
1661
1662 /* Disable interrupts by clearing the interrupt mask. */
1663 iowrite16(0x0000, ioaddr + IntrEnable);
1664
Jesse Huangacd70c22006-10-20 14:42:13 -07001665 /* Disable Rx and Tx DMA for safely release resource */
1666 iowrite32(0x500, ioaddr + DMACtrl);
1667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 /* Stop the chip's Tx and Rx processes. */
1669 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1670
Jesse Huang31f817e2006-11-08 19:49:12 -08001671 for (i = 2000; i > 0; i--) {
1672 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1673 break;
1674 mdelay(1);
1675 }
1676
1677 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1678 ioaddr +ASICCtrl + 2);
1679
1680 for (i = 2000; i > 0; i--) {
1681 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1682 break;
1683 mdelay(1);
1684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
1686#ifdef __i386__
1687 if (netif_msg_hw(np)) {
1688 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1689 (int)(np->tx_ring_dma));
1690 for (i = 0; i < TX_RING_SIZE; i++)
1691 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1692 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1693 np->tx_ring[i].frag[0].length);
1694 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1695 (int)(np->rx_ring_dma));
1696 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1697 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1698 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1699 np->rx_ring[i].frag[0].length);
1700 }
1701 }
1702#endif /* __i386__ debugging only */
1703
1704 free_irq(dev->irq, dev);
1705
1706 del_timer_sync(&np->timer);
1707
1708 /* Free all the skbuffs in the Rx queue. */
1709 for (i = 0; i < RX_RING_SIZE; i++) {
1710 np->rx_ring[i].status = 0;
1711 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1712 skb = np->rx_skbuff[i];
1713 if (skb) {
1714 pci_unmap_single(np->pci_dev,
1715 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1716 PCI_DMA_FROMDEVICE);
1717 dev_kfree_skb(skb);
1718 np->rx_skbuff[i] = NULL;
1719 }
1720 }
1721 for (i = 0; i < TX_RING_SIZE; i++) {
Jesse Huang31f817e2006-11-08 19:49:12 -08001722 np->tx_ring[i].next_desc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 skb = np->tx_skbuff[i];
1724 if (skb) {
1725 pci_unmap_single(np->pci_dev,
1726 np->tx_ring[i].frag[0].addr, skb->len,
1727 PCI_DMA_TODEVICE);
1728 dev_kfree_skb(skb);
1729 np->tx_skbuff[i] = NULL;
1730 }
1731 }
1732
1733 return 0;
1734}
1735
1736static void __devexit sundance_remove1 (struct pci_dev *pdev)
1737{
1738 struct net_device *dev = pci_get_drvdata(pdev);
1739
1740 if (dev) {
1741 struct netdev_private *np = netdev_priv(dev);
1742
1743 unregister_netdev(dev);
1744 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1745 np->rx_ring_dma);
1746 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1747 np->tx_ring_dma);
1748 pci_iounmap(pdev, np->base);
1749 pci_release_regions(pdev);
1750 free_netdev(dev);
1751 pci_set_drvdata(pdev, NULL);
1752 }
1753}
1754
1755static struct pci_driver sundance_driver = {
1756 .name = DRV_NAME,
1757 .id_table = sundance_pci_tbl,
1758 .probe = sundance_probe1,
1759 .remove = __devexit_p(sundance_remove1),
1760};
1761
1762static int __init sundance_init(void)
1763{
1764/* when a module, this is printed whether or not devices are found in probe */
1765#ifdef MODULE
1766 printk(version);
1767#endif
Jeff Garzik29917622006-08-19 17:48:59 -04001768 return pci_register_driver(&sundance_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770
1771static void __exit sundance_exit(void)
1772{
1773 pci_unregister_driver(&sundance_driver);
1774}
1775
1776module_init(sundance_init);
1777module_exit(sundance_exit);
1778
1779