blob: 8916f29301d690ad9cbc27a522ba23c9943a2bba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040019 [link no longer provides useful info -jgarzik]
Philippe De Muytere714d992006-08-03 18:42:15 +020020 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023*/
24
25#define DRV_NAME "sundance"
Andy Gospodarekd5b20692006-09-11 17:39:18 -040026#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
Arjan van de Venf71e1302006-03-03 21:33:57 -050035static const int multicast_filter_limit = 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/interrupt.h>
88#include <linux/pci.h>
89#include <linux/netdevice.h>
90#include <linux/etherdevice.h>
91#include <linux/skbuff.h>
92#include <linux/init.h>
93#include <linux/bitops.h>
94#include <asm/uaccess.h>
95#include <asm/processor.h> /* Processor type for cache alignment. */
96#include <asm/io.h>
97#include <linux/delay.h>
98#include <linux/spinlock.h>
Denis Kirjanov0c8a7452010-09-20 22:56:07 +000099#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/crc32.h>
101#include <linux/ethtool.h>
102#include <linux/mii.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104/* These identify the driver base version and may not be removed. */
Stephen Hemminger3af0fe32009-02-26 10:19:33 +0000105static const char version[] __devinitconst =
106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 " Written by Donald Becker\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111MODULE_LICENSE("GPL");
112
113module_param(debug, int, 0);
114module_param(rx_copybreak, int, 0);
115module_param_array(media, charp, NULL, 0);
116module_param(flowctrl, int, 0);
117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120
121/*
122 Theory of Operation
123
124I. Board Compatibility
125
126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127
128II. Board-specific settings
129
130III. Driver operation
131
132IIIa. Ring buffers
133
134This driver uses two statically allocated fixed-size descriptor lists
135formed into rings by a branch from the final descriptor to the beginning of
136the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137Some chips explicitly use only 2^N sized rings, while others use a
138'next descriptor' pointer that the driver forms into rings.
139
140IIIb/c. Transmit/Receive Structure
141
142This driver uses a zero-copy receive and transmit scheme.
143The driver allocates full frame size skbuffs for the Rx ring buffers at
144open() time and passes the skb->data field to the chip as receive data
145buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146a fresh skbuff is allocated and the frame is copied to the new skbuff.
147When the incoming frame is larger, the skbuff is passed directly up the
148protocol stack. Buffers consumed this way are replaced by newly allocated
149skbuffs in a later phase of receives.
150
151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152using a full-sized skbuff for small frames vs. the copying costs of larger
153frames. New boards are typically used in generously configured machines
154and the underfilled buffers have negligible impact compared to the benefit of
155a single allocation size, so the default value of zero results in never
156copying packets. When copying is done, the cost is usually mitigated by using
157a combined copy/checksum routine. Copying also preloads the cache, which is
158most useful with small frames.
159
160A subtle aspect of the operation is that the IP header at offset 14 in an
161ethernet frame isn't longword aligned for further processing.
162Unaligned buffers are permitted by the Sundance hardware, so
163frames are received into the skbuff at an offset of "+2", 16-byte aligning
164the IP header.
165
166IIId. Synchronization
167
168The driver runs as two independent, single-threaded flows of control. One
169is the send-packet routine, which enforces single-threaded use by the
170dev->tbusy flag. The other thread is the interrupt handler, which is single
171threaded by the hardware and interrupt handling software.
172
173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176the 'lp->tx_full' flag.
177
178The interrupt handler has exclusive control over the Rx ring and records stats
179from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181clears both the tx_full and tbusy flags.
182
183IV. Notes
184
185IVb. References
186
187The Sundance ST201 datasheet, preliminary version.
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200188The Kendin KS8723 datasheet, preliminary version.
189The ICplus IP100 datasheet, preliminary version.
190http://www.scyld.com/expert/100mbps.html
191http://www.scyld.com/expert/NWay.html
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193IVc. Errata
194
195*/
196
197/* Work-around for Kendin chip bugs. */
198#ifndef CONFIG_SUNDANCE_MMIO
199#define USE_IO_OPS 1
200#endif
201
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000202static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211};
212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213
214enum {
215 netdev_io_size = 128
216};
217
218struct pci_id_info {
219 const char *name;
220};
Jeff Garzik46009c82006-06-27 09:12:38 -0400221static const struct pci_id_info pci_id_tbl[] __devinitdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"},
225 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {"D-Link DL10050-based FAST Ethernet Adapter"},
227 {"Sundance Technology Alta"},
Pedro Alejandro López-Valencia1668b192006-06-15 22:46:44 +0200228 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
Jeff Garzik46009c82006-06-27 09:12:38 -0400229 { } /* terminate list. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230};
231
232/* This driver was written to use PCI memory space, however x86-oriented
233 hardware often uses I/O space accesses. */
234
235/* Offsets to the device registers.
236 Unlike software-only systems, device drivers interact with complex hardware.
237 It's not useful to define symbolic names for every register bit in the
238 device. The name can only partially document the semantics and make
239 the driver longer and more difficult to read.
240 In general, only the important configuration values or bits changed
241 multiple times should be defined symbolically.
242*/
243enum alta_offsets {
244 DMACtrl = 0x00,
245 TxListPtr = 0x04,
246 TxDMABurstThresh = 0x08,
247 TxDMAUrgentThresh = 0x09,
248 TxDMAPollPeriod = 0x0a,
249 RxDMAStatus = 0x0c,
250 RxListPtr = 0x10,
251 DebugCtrl0 = 0x1a,
252 DebugCtrl1 = 0x1c,
253 RxDMABurstThresh = 0x14,
254 RxDMAUrgentThresh = 0x15,
255 RxDMAPollPeriod = 0x16,
256 LEDCtrl = 0x1a,
257 ASICCtrl = 0x30,
258 EEData = 0x34,
259 EECtrl = 0x36,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 FlashAddr = 0x40,
261 FlashData = 0x44,
262 TxStatus = 0x46,
263 TxFrameId = 0x47,
264 DownCounter = 0x18,
265 IntrClear = 0x4a,
266 IntrEnable = 0x4c,
267 IntrStatus = 0x4e,
268 MACCtrl0 = 0x50,
269 MACCtrl1 = 0x52,
270 StationAddr = 0x54,
271 MaxFrameSize = 0x5A,
272 RxMode = 0x5c,
273 MIICtrl = 0x5e,
274 MulticastFilter0 = 0x60,
275 MulticastFilter1 = 0x64,
276 RxOctetsLow = 0x68,
277 RxOctetsHigh = 0x6a,
278 TxOctetsLow = 0x6c,
279 TxOctetsHigh = 0x6e,
280 TxFramesOK = 0x70,
281 RxFramesOK = 0x72,
282 StatsCarrierError = 0x74,
283 StatsLateColl = 0x75,
284 StatsMultiColl = 0x76,
285 StatsOneColl = 0x77,
286 StatsTxDefer = 0x78,
287 RxMissed = 0x79,
288 StatsTxXSDefer = 0x7a,
289 StatsTxAbort = 0x7b,
290 StatsBcastTx = 0x7c,
291 StatsBcastRx = 0x7d,
292 StatsMcastTx = 0x7e,
293 StatsMcastRx = 0x7f,
294 /* Aliased and bogus values! */
295 RxStatus = 0x0c,
296};
Denis Kirjanov24de5282010-12-21 02:01:36 +0000297
298#define ASIC_HI_WORD(x) ((x) + 2)
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300enum ASICCtrl_HiWord_bit {
301 GlobalReset = 0x0001,
302 RxReset = 0x0002,
303 TxReset = 0x0004,
304 DMAReset = 0x0008,
305 FIFOReset = 0x0010,
306 NetworkReset = 0x0020,
307 HostReset = 0x0040,
308 ResetBusy = 0x0400,
309};
310
311/* Bits in the interrupt status/mask registers. */
312enum intr_status_bits {
313 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
314 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
315 IntrDrvRqst=0x0040,
316 StatsMax=0x0080, LinkChange=0x0100,
317 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
318};
319
320/* Bits in the RxMode register. */
321enum rx_mode_bits {
322 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
323 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
324};
325/* Bits in MACCtrl. */
326enum mac_ctrl0_bits {
327 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
328 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
329};
330enum mac_ctrl1_bits {
331 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
332 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
333 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
334};
335
336/* The Rx and Tx buffer descriptors. */
337/* Note that using only 32 bit fields simplifies conversion to big-endian
338 architectures. */
339struct netdev_desc {
Al Viro14c9d9b2007-12-09 16:50:47 +0000340 __le32 next_desc;
341 __le32 status;
342 struct desc_frag { __le32 addr, length; } frag[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343};
344
345/* Bits in netdev_desc.status */
346enum desc_status_bits {
347 DescOwn=0x8000,
348 DescEndPacket=0x4000,
349 DescEndRing=0x2000,
350 LastFrag=0x80000000,
351 DescIntrOnTx=0x8000,
352 DescIntrOnDMADone=0x80000000,
353 DisableAlign = 0x00000001,
354};
355
356#define PRIV_ALIGN 15 /* Required alignment mask */
357/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
358 within the structure. */
359#define MII_CNT 4
360struct netdev_private {
361 /* Descriptor rings first for alignment. */
362 struct netdev_desc *rx_ring;
363 struct netdev_desc *tx_ring;
364 struct sk_buff* rx_skbuff[RX_RING_SIZE];
365 struct sk_buff* tx_skbuff[TX_RING_SIZE];
366 dma_addr_t tx_ring_dma;
367 dma_addr_t rx_ring_dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 struct timer_list timer; /* Media monitoring timer. */
Denis Kirjanov725a4a42010-10-13 00:56:09 +0000369 /* ethtool extra stats */
370 struct {
371 u64 tx_multiple_collisions;
372 u64 tx_single_collisions;
373 u64 tx_late_collisions;
374 u64 tx_deferred;
375 u64 tx_deferred_excessive;
376 u64 tx_aborted;
377 u64 tx_bcasts;
378 u64 rx_bcasts;
379 u64 tx_mcasts;
380 u64 rx_mcasts;
381 } xstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 /* Frequently used values: keep some adjacent for cache effect. */
383 spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 int msg_enable;
385 int chip_id;
386 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
387 unsigned int rx_buf_sz; /* Based on MTU+slack. */
388 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
389 unsigned int cur_tx, dirty_tx;
390 /* These values are keep track of the transceiver/media in use. */
391 unsigned int flowctrl:1;
392 unsigned int default_port:4; /* Last dev->if_port value. */
393 unsigned int an_enable:1;
394 unsigned int speed;
395 struct tasklet_struct rx_tasklet;
396 struct tasklet_struct tx_tasklet;
397 int budget;
398 int cur_task;
399 /* Multicast and receive mode. */
400 spinlock_t mcastlock; /* SMP lock multicast updates. */
401 u16 mcast_filter[4];
402 /* MII transceiver section. */
403 struct mii_if_info mii_if;
404 int mii_preamble_required;
405 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
406 struct pci_dev *pci_dev;
407 void __iomem *base;
Eric Dumazet7b738b52010-10-09 02:17:01 +0000408 spinlock_t statlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409};
410
411/* The station address location in the EEPROM. */
412#define EEPROM_SA_OFFSET 0x10
413#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
414 IntrDrvRqst | IntrTxDone | StatsMax | \
415 LinkChange)
416
417static int change_mtu(struct net_device *dev, int new_mtu);
418static int eeprom_read(void __iomem *ioaddr, int location);
419static int mdio_read(struct net_device *dev, int phy_id, int location);
420static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
Dan Nicholson50500152008-08-20 16:51:59 -0700421static int mdio_wait_link(struct net_device *dev, int wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422static int netdev_open(struct net_device *dev);
423static void check_duplex(struct net_device *dev);
424static void netdev_timer(unsigned long data);
425static void tx_timeout(struct net_device *dev);
426static void init_ring(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000427static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428static int reset_tx (struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100429static irqreturn_t intr_handler(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430static void rx_poll(unsigned long data);
431static void tx_poll(unsigned long data);
432static void refill_rx (struct net_device *dev);
433static void netdev_error(struct net_device *dev, int intr_status);
434static void netdev_error(struct net_device *dev, int intr_status);
435static void set_rx_mode(struct net_device *dev);
436static int __set_mac_addr(struct net_device *dev);
437static struct net_device_stats *get_stats(struct net_device *dev);
438static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
439static int netdev_close(struct net_device *dev);
Jeff Garzik7282d492006-09-13 14:30:00 -0400440static const struct ethtool_ops ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Philippe De Muyterb71b95e2005-10-28 12:23:47 +0200442static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
443{
444 struct netdev_private *np = netdev_priv(dev);
445 void __iomem *ioaddr = np->base + ASICCtrl;
446 int countdown;
447
448 /* ST201 documentation states ASICCtrl is a 32bit register */
449 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
450 /* ST201 documentation states reset can take up to 1 ms */
451 countdown = 10 + 1;
452 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
453 if (--countdown == 0) {
454 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
455 break;
456 }
457 udelay(100);
458 }
459}
460
Stephen Hemminger633a2772009-01-07 17:58:43 -0800461static const struct net_device_ops netdev_ops = {
462 .ndo_open = netdev_open,
463 .ndo_stop = netdev_close,
464 .ndo_start_xmit = start_tx,
465 .ndo_get_stats = get_stats,
466 .ndo_set_multicast_list = set_rx_mode,
467 .ndo_do_ioctl = netdev_ioctl,
468 .ndo_tx_timeout = tx_timeout,
469 .ndo_change_mtu = change_mtu,
470 .ndo_set_mac_address = eth_mac_addr,
471 .ndo_validate_addr = eth_validate_addr,
472};
473
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474static int __devinit sundance_probe1 (struct pci_dev *pdev,
475 const struct pci_device_id *ent)
476{
477 struct net_device *dev;
478 struct netdev_private *np;
479 static int card_idx;
480 int chip_idx = ent->driver_data;
481 int irq;
482 int i;
483 void __iomem *ioaddr;
484 u16 mii_ctl;
485 void *ring_space;
486 dma_addr_t ring_dma;
487#ifdef USE_IO_OPS
488 int bar = 0;
489#else
490 int bar = 1;
491#endif
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400492 int phy, phy_end, phy_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494/* when built into the kernel, we only print version if device is found */
495#ifndef MODULE
496 static int printed_version;
497 if (!printed_version++)
498 printk(version);
499#endif
500
501 if (pci_enable_device(pdev))
502 return -EIO;
503 pci_set_master(pdev);
504
505 irq = pdev->irq;
506
507 dev = alloc_etherdev(sizeof(*np));
508 if (!dev)
509 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 SET_NETDEV_DEV(dev, &pdev->dev);
511
512 if (pci_request_regions(pdev, DRV_NAME))
513 goto err_out_netdev;
514
515 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
516 if (!ioaddr)
517 goto err_out_res;
518
519 for (i = 0; i < 3; i++)
Al Viro14c9d9b2007-12-09 16:50:47 +0000520 ((__le16 *)dev->dev_addr)[i] =
521 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
John W. Linville30d60a82005-09-12 10:48:58 -0400522 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 dev->base_addr = (unsigned long)ioaddr;
525 dev->irq = irq;
526
527 np = netdev_priv(dev);
528 np->base = ioaddr;
529 np->pci_dev = pdev;
530 np->chip_id = chip_idx;
531 np->msg_enable = (1 << debug) - 1;
532 spin_lock_init(&np->lock);
Eric Dumazet7b738b52010-10-09 02:17:01 +0000533 spin_lock_init(&np->statlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
535 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
536
Denis Kirjanov0c8a7452010-09-20 22:56:07 +0000537 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
538 &ring_dma, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 if (!ring_space)
540 goto err_out_cleardev;
541 np->tx_ring = (struct netdev_desc *)ring_space;
542 np->tx_ring_dma = ring_dma;
543
Denis Kirjanov0c8a7452010-09-20 22:56:07 +0000544 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
545 &ring_dma, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 if (!ring_space)
547 goto err_out_unmap_tx;
548 np->rx_ring = (struct netdev_desc *)ring_space;
549 np->rx_ring_dma = ring_dma;
550
551 np->mii_if.dev = dev;
552 np->mii_if.mdio_read = mdio_read;
553 np->mii_if.mdio_write = mdio_write;
554 np->mii_if.phy_id_mask = 0x1f;
555 np->mii_if.reg_num_mask = 0x1f;
556
557 /* The chip-specific entries in the device structure. */
Stephen Hemminger633a2772009-01-07 17:58:43 -0800558 dev->netdev_ops = &netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 SET_ETHTOOL_OPS(dev, &ethtool_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger633a2772009-01-07 17:58:43 -0800561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 pci_set_drvdata(pdev, dev);
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 i = register_netdev(dev);
565 if (i)
566 goto err_out_unmap_rx;
567
Johannes Berge1749612008-10-27 15:59:26 -0700568 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
Joe Perches0795af52007-10-03 17:59:30 -0700569 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
Johannes Berge1749612008-10-27 15:59:26 -0700570 dev->dev_addr, irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
John W. Linville67ec2f82005-10-18 21:31:01 -0400572 np->phys[0] = 1; /* Default setting */
573 np->mii_preamble_required++;
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400574
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500575 /*
576 * It seems some phys doesn't deal well with address 0 being accessed
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400577 * first
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500578 */
Jeff Garzikac1d49f2007-09-29 01:10:14 -0400579 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
580 phy = 0;
581 phy_end = 31;
582 } else {
583 phy = 1;
584 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
585 }
586 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400587 int phyx = phy & 0x1f;
Arnaldo Carvalho de Melo0d615ec2006-01-26 22:01:38 -0500588 int mii_status = mdio_read(dev, phyx, MII_BMSR);
John W. Linville67ec2f82005-10-18 21:31:01 -0400589 if (mii_status != 0xffff && mii_status != 0x0000) {
John W. Linvilleb06c0932005-10-19 08:07:34 -0400590 np->phys[phy_idx++] = phyx;
591 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
John W. Linville67ec2f82005-10-18 21:31:01 -0400592 if ((mii_status & 0x0040) == 0)
593 np->mii_preamble_required++;
594 printk(KERN_INFO "%s: MII PHY found at address %d, status "
595 "0x%4.4x advertising %4.4x.\n",
John W. Linvilleb06c0932005-10-19 08:07:34 -0400596 dev->name, phyx, mii_status, np->mii_if.advertising);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
John W. Linville67ec2f82005-10-18 21:31:01 -0400599 np->mii_preamble_required--;
600
601 if (phy_idx == 0) {
602 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
603 dev->name, ioread32(ioaddr + ASICCtrl));
604 goto err_out_unregister;
605 }
606
607 np->mii_if.phy_id = np->phys[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 /* Parse override configuration */
610 np->an_enable = 1;
611 if (card_idx < MAX_UNITS) {
612 if (media[card_idx] != NULL) {
613 np->an_enable = 0;
614 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
615 strcmp (media[card_idx], "4") == 0) {
616 np->speed = 100;
617 np->mii_if.full_duplex = 1;
Joe Perches8e95a202009-12-03 07:58:21 +0000618 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
619 strcmp (media[card_idx], "3") == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 np->speed = 100;
621 np->mii_if.full_duplex = 0;
622 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
623 strcmp (media[card_idx], "2") == 0) {
624 np->speed = 10;
625 np->mii_if.full_duplex = 1;
626 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
627 strcmp (media[card_idx], "1") == 0) {
628 np->speed = 10;
629 np->mii_if.full_duplex = 0;
630 } else {
631 np->an_enable = 1;
632 }
633 }
634 if (flowctrl == 1)
635 np->flowctrl = 1;
636 }
637
638 /* Fibre PHY? */
639 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
640 /* Default 100Mbps Full */
641 if (np->an_enable) {
642 np->speed = 100;
643 np->mii_if.full_duplex = 1;
644 np->an_enable = 0;
645 }
646 }
647 /* Reset PHY */
648 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
649 mdelay (300);
650 /* If flow control enabled, we need to advertise it.*/
651 if (np->flowctrl)
652 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
653 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
654 /* Force media type */
655 if (!np->an_enable) {
656 mii_ctl = 0;
657 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
658 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
659 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
660 printk (KERN_INFO "Override speed=%d, %s duplex\n",
661 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
662
663 }
664
665 /* Perhaps move the reset here? */
666 /* Reset the chip to erase previous misconfiguration. */
667 if (netif_msg_hw(np))
668 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
Philippe De Muytere714d992006-08-03 18:42:15 +0200669 sundance_reset(dev, 0x00ff << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (netif_msg_hw(np))
671 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
672
673 card_idx++;
674 return 0;
675
676err_out_unregister:
677 unregister_netdev(dev);
678err_out_unmap_rx:
Denis Kirjanov0c8a7452010-09-20 22:56:07 +0000679 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
680 np->rx_ring, np->rx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681err_out_unmap_tx:
Denis Kirjanov0c8a7452010-09-20 22:56:07 +0000682 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
683 np->tx_ring, np->tx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684err_out_cleardev:
685 pci_set_drvdata(pdev, NULL);
686 pci_iounmap(pdev, ioaddr);
687err_out_res:
688 pci_release_regions(pdev);
689err_out_netdev:
690 free_netdev (dev);
691 return -ENODEV;
692}
693
694static int change_mtu(struct net_device *dev, int new_mtu)
695{
696 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
697 return -EINVAL;
698 if (netif_running(dev))
699 return -EBUSY;
700 dev->mtu = new_mtu;
701 return 0;
702}
703
704#define eeprom_delay(ee_addr) ioread32(ee_addr)
705/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
706static int __devinit eeprom_read(void __iomem *ioaddr, int location)
707{
708 int boguscnt = 10000; /* Typical 1900 ticks. */
709 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
710 do {
711 eeprom_delay(ioaddr + EECtrl);
712 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
713 return ioread16(ioaddr + EEData);
714 }
715 } while (--boguscnt > 0);
716 return 0;
717}
718
719/* MII transceiver control section.
720 Read and write the MII registers using software-generated serial
721 MDIO protocol. See the MII specifications or DP83840A data sheet
722 for details.
723
724 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
725 met by back-to-back 33Mhz PCI cycles. */
726#define mdio_delay() ioread8(mdio_addr)
727
728enum mii_reg_bits {
729 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
730};
731#define MDIO_EnbIn (0)
732#define MDIO_WRITE0 (MDIO_EnbOutput)
733#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
734
735/* Generate the preamble required for initial synchronization and
736 a few older transceivers. */
737static void mdio_sync(void __iomem *mdio_addr)
738{
739 int bits = 32;
740
741 /* Establish sync by sending at least 32 logic ones. */
742 while (--bits >= 0) {
743 iowrite8(MDIO_WRITE1, mdio_addr);
744 mdio_delay();
745 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
746 mdio_delay();
747 }
748}
749
750static int mdio_read(struct net_device *dev, int phy_id, int location)
751{
752 struct netdev_private *np = netdev_priv(dev);
753 void __iomem *mdio_addr = np->base + MIICtrl;
754 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
755 int i, retval = 0;
756
757 if (np->mii_preamble_required)
758 mdio_sync(mdio_addr);
759
760 /* Shift the read command bits out. */
761 for (i = 15; i >= 0; i--) {
762 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
763
764 iowrite8(dataval, mdio_addr);
765 mdio_delay();
766 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
767 mdio_delay();
768 }
769 /* Read the two transition, 16 data, and wire-idle bits. */
770 for (i = 19; i > 0; i--) {
771 iowrite8(MDIO_EnbIn, mdio_addr);
772 mdio_delay();
773 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
774 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
775 mdio_delay();
776 }
777 return (retval>>1) & 0xffff;
778}
779
780static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
781{
782 struct netdev_private *np = netdev_priv(dev);
783 void __iomem *mdio_addr = np->base + MIICtrl;
784 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
785 int i;
786
787 if (np->mii_preamble_required)
788 mdio_sync(mdio_addr);
789
790 /* Shift the command bits out. */
791 for (i = 31; i >= 0; i--) {
792 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
793
794 iowrite8(dataval, mdio_addr);
795 mdio_delay();
796 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
797 mdio_delay();
798 }
799 /* Clear out extra bits. */
800 for (i = 2; i > 0; i--) {
801 iowrite8(MDIO_EnbIn, mdio_addr);
802 mdio_delay();
803 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
804 mdio_delay();
805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806}
807
Dan Nicholson50500152008-08-20 16:51:59 -0700808static int mdio_wait_link(struct net_device *dev, int wait)
809{
810 int bmsr;
811 int phy_id;
812 struct netdev_private *np;
813
814 np = netdev_priv(dev);
815 phy_id = np->phys[0];
816
817 do {
818 bmsr = mdio_read(dev, phy_id, MII_BMSR);
819 if (bmsr & 0x0004)
820 return 0;
821 mdelay(1);
822 } while (--wait > 0);
823 return -1;
824}
825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826static int netdev_open(struct net_device *dev)
827{
828 struct netdev_private *np = netdev_priv(dev);
829 void __iomem *ioaddr = np->base;
Jesse Huangacd70c22006-10-20 14:42:13 -0700830 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 int i;
832
833 /* Do we need to reset the chip??? */
834
Joe Perchesa0607fd2009-11-18 23:29:17 -0800835 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 if (i)
837 return i;
838
839 if (netif_msg_ifup(np))
840 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
841 dev->name, dev->irq);
842 init_ring(dev);
843
844 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
845 /* The Tx list pointer is written as packets are queued. */
846
847 /* Initialize other registers. */
848 __set_mac_addr(dev);
849#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
850 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
851#else
852 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
853#endif
854 if (dev->mtu > 2047)
855 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
856
857 /* Configure the PCI bus bursts and FIFO thresholds. */
858
859 if (dev->if_port == 0)
860 dev->if_port = np->default_port;
861
862 spin_lock_init(&np->mcastlock);
863
864 set_rx_mode(dev);
865 iowrite16(0, ioaddr + IntrEnable);
866 iowrite16(0, ioaddr + DownCounter);
867 /* Set the chip to poll every N*320nsec. */
868 iowrite8(100, ioaddr + RxDMAPollPeriod);
869 iowrite8(127, ioaddr + TxDMAPollPeriod);
870 /* Fix DFE-580TX packet drop issue */
Auke Kok44c10132007-06-08 15:46:36 -0700871 if (np->pci_dev->revision >= 0x14)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 iowrite8(0x01, ioaddr + DebugCtrl1);
873 netif_start_queue(dev);
874
Jesse Huangacd70c22006-10-20 14:42:13 -0700875 spin_lock_irqsave(&np->lock, flags);
876 reset_tx(dev);
877 spin_unlock_irqrestore(&np->lock, flags);
878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
880
881 if (netif_msg_ifup(np))
882 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
883 "MAC Control %x, %4.4x %4.4x.\n",
884 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
885 ioread32(ioaddr + MACCtrl0),
886 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
887
888 /* Set the timer to check for link beat. */
889 init_timer(&np->timer);
890 np->timer.expires = jiffies + 3*HZ;
891 np->timer.data = (unsigned long)dev;
Joe Perchesc061b182010-08-23 18:20:03 +0000892 np->timer.function = netdev_timer; /* timer handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 add_timer(&np->timer);
894
895 /* Enable interrupts by setting the interrupt mask. */
896 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
897
898 return 0;
899}
900
901static void check_duplex(struct net_device *dev)
902{
903 struct netdev_private *np = netdev_priv(dev);
904 void __iomem *ioaddr = np->base;
905 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
906 int negotiated = mii_lpa & np->mii_if.advertising;
907 int duplex;
908
909 /* Force media */
910 if (!np->an_enable || mii_lpa == 0xffff) {
911 if (np->mii_if.full_duplex)
912 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
913 ioaddr + MACCtrl0);
914 return;
915 }
916
917 /* Autonegotiation */
918 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
919 if (np->mii_if.full_duplex != duplex) {
920 np->mii_if.full_duplex = duplex;
921 if (netif_msg_link(np))
922 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
923 "negotiated capability %4.4x.\n", dev->name,
924 duplex ? "full" : "half", np->phys[0], negotiated);
Roel Kluin62660e22009-02-18 10:19:50 +0100925 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 }
927}
928
929static void netdev_timer(unsigned long data)
930{
931 struct net_device *dev = (struct net_device *)data;
932 struct netdev_private *np = netdev_priv(dev);
933 void __iomem *ioaddr = np->base;
934 int next_tick = 10*HZ;
935
936 if (netif_msg_timer(np)) {
937 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
938 "Tx %x Rx %x.\n",
939 dev->name, ioread16(ioaddr + IntrEnable),
940 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
941 }
942 check_duplex(dev);
943 np->timer.expires = jiffies + next_tick;
944 add_timer(&np->timer);
945}
946
947static void tx_timeout(struct net_device *dev)
948{
949 struct netdev_private *np = netdev_priv(dev);
950 void __iomem *ioaddr = np->base;
951 unsigned long flag;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 netif_stop_queue(dev);
954 tasklet_disable(&np->tx_tasklet);
955 iowrite16(0, ioaddr + IntrEnable);
956 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
957 "TxFrameId %2.2x,"
958 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
959 ioread8(ioaddr + TxFrameId));
960
961 {
962 int i;
963 for (i=0; i<TX_RING_SIZE; i++) {
964 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
965 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
966 le32_to_cpu(np->tx_ring[i].next_desc),
967 le32_to_cpu(np->tx_ring[i].status),
968 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400969 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 le32_to_cpu(np->tx_ring[i].frag[0].length));
971 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400972 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
973 ioread32(np->base + TxListPtr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 netif_queue_stopped(dev));
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400975 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 np->cur_tx, np->cur_tx % TX_RING_SIZE,
977 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
978 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
979 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
980 }
981 spin_lock_irqsave(&np->lock, flag);
982
983 /* Stop and restart the chip's Tx processes . */
984 reset_tx(dev);
985 spin_unlock_irqrestore(&np->lock, flag);
986
987 dev->if_port = 0;
988
Eric Dumazet1ae5dc32010-05-10 05:01:31 -0700989 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +0000990 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
992 netif_wake_queue(dev);
993 }
994 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
995 tasklet_enable(&np->tx_tasklet);
996}
997
998
999/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1000static void init_ring(struct net_device *dev)
1001{
1002 struct netdev_private *np = netdev_priv(dev);
1003 int i;
1004
1005 np->cur_rx = np->cur_tx = 0;
1006 np->dirty_rx = np->dirty_tx = 0;
1007 np->cur_task = 0;
1008
1009 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1010
1011 /* Initialize all Rx descriptors. */
1012 for (i = 0; i < RX_RING_SIZE; i++) {
1013 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1014 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1015 np->rx_ring[i].status = 0;
1016 np->rx_ring[i].frag[0].length = 0;
1017 np->rx_skbuff[i] = NULL;
1018 }
1019
1020 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1021 for (i = 0; i < RX_RING_SIZE; i++) {
1022 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1023 np->rx_skbuff[i] = skb;
1024 if (skb == NULL)
1025 break;
1026 skb->dev = dev; /* Mark as being used by this device. */
1027 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1028 np->rx_ring[i].frag[0].addr = cpu_to_le32(
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001029 dma_map_single(&np->pci_dev->dev, skb->data,
1030 np->rx_buf_sz, DMA_FROM_DEVICE));
Denis Kirjanovd91dc272010-09-20 22:56:57 +00001031 if (dma_mapping_error(&np->pci_dev->dev,
1032 np->rx_ring[i].frag[0].addr)) {
1033 dev_kfree_skb(skb);
1034 np->rx_skbuff[i] = NULL;
1035 break;
1036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1038 }
1039 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1040
1041 for (i = 0; i < TX_RING_SIZE; i++) {
1042 np->tx_skbuff[i] = NULL;
1043 np->tx_ring[i].status = 0;
1044 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045}
1046
1047static void tx_poll (unsigned long data)
1048{
1049 struct net_device *dev = (struct net_device *)data;
1050 struct netdev_private *np = netdev_priv(dev);
1051 unsigned head = np->cur_task % TX_RING_SIZE;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001052 struct netdev_desc *txdesc =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 /* Chain the next pointer */
1056 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1057 int entry = np->cur_task % TX_RING_SIZE;
1058 txdesc = &np->tx_ring[entry];
1059 if (np->last_tx) {
1060 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1061 entry*sizeof(struct netdev_desc));
1062 }
1063 np->last_tx = txdesc;
1064 }
1065 /* Indicate the latest descriptor of tx ring */
1066 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1067
1068 if (ioread32 (np->base + TxListPtr) == 0)
1069 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1070 np->base + TxListPtr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071}
1072
Stephen Hemminger613573252009-08-31 19:50:58 +00001073static netdev_tx_t
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074start_tx (struct sk_buff *skb, struct net_device *dev)
1075{
1076 struct netdev_private *np = netdev_priv(dev);
1077 struct netdev_desc *txdesc;
1078 unsigned entry;
1079
1080 /* Calculate the next Tx descriptor entry. */
1081 entry = np->cur_tx % TX_RING_SIZE;
1082 np->tx_skbuff[entry] = skb;
1083 txdesc = &np->tx_ring[entry];
1084
1085 txdesc->next_desc = 0;
1086 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001087 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1088 skb->data, skb->len, DMA_TO_DEVICE));
Denis Kirjanovd91dc272010-09-20 22:56:57 +00001089 if (dma_mapping_error(&np->pci_dev->dev,
1090 txdesc->frag[0].addr))
1091 goto drop_frame;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1093
1094 /* Increment cur_tx before tasklet_schedule() */
1095 np->cur_tx++;
1096 mb();
1097 /* Schedule a tx_poll() task */
1098 tasklet_schedule(&np->tx_tasklet);
1099
1100 /* On some architectures: explicitly flush cache lines here. */
Joe Perches8e95a202009-12-03 07:58:21 +00001101 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1102 !netif_queue_stopped(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 /* do nothing */
1104 } else {
1105 netif_stop_queue (dev);
1106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 if (netif_msg_tx_queued(np)) {
1108 printk (KERN_DEBUG
1109 "%s: Transmit frame #%d queued in slot %d.\n",
1110 dev->name, np->cur_tx, entry);
1111 }
Patrick McHardy6ed10652009-06-23 06:03:08 +00001112 return NETDEV_TX_OK;
Denis Kirjanovd91dc272010-09-20 22:56:57 +00001113
1114drop_frame:
1115 dev_kfree_skb(skb);
1116 np->tx_skbuff[entry] = NULL;
1117 dev->stats.tx_dropped++;
1118 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119}
1120
1121/* Reset hardware tx and free all of tx buffers */
1122static int
1123reset_tx (struct net_device *dev)
1124{
1125 struct netdev_private *np = netdev_priv(dev);
1126 void __iomem *ioaddr = np->base;
1127 struct sk_buff *skb;
1128 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 /* Reset tx logic, TxListPtr will be cleaned */
1131 iowrite16 (TxDisable, ioaddr + MACCtrl1);
Philippe De Muytere714d992006-08-03 18:42:15 +02001132 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 /* free all tx skbuff */
1135 for (i = 0; i < TX_RING_SIZE; i++) {
Jesse Huang2109f892006-10-20 14:42:11 -07001136 np->tx_ring[i].next_desc = 0;
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 skb = np->tx_skbuff[i];
1139 if (skb) {
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001140 dma_unmap_single(&np->pci_dev->dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001141 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001142 skb->len, DMA_TO_DEVICE);
Denis Kirjanova9478e32010-09-20 22:57:49 +00001143 dev_kfree_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 np->tx_skbuff[i] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001145 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 }
1147 }
1148 np->cur_tx = np->dirty_tx = 0;
1149 np->cur_task = 0;
Jesse Huang2109f892006-10-20 14:42:11 -07001150
Randy Dunlapbca79eb2006-11-29 13:15:17 -08001151 np->last_tx = NULL;
Jesse Huang2109f892006-10-20 14:42:11 -07001152 iowrite8(127, ioaddr + TxDMAPollPeriod);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1155 return 0;
1156}
1157
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001158/* The interrupt handler cleans up after the Tx thread,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 and schedule a Rx thread work */
David Howells7d12e782006-10-05 14:55:46 +01001160static irqreturn_t intr_handler(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
1162 struct net_device *dev = (struct net_device *)dev_instance;
1163 struct netdev_private *np = netdev_priv(dev);
1164 void __iomem *ioaddr = np->base;
1165 int hw_frame_id;
1166 int tx_cnt;
1167 int tx_status;
1168 int handled = 0;
Jesse Huange2420402006-10-20 14:42:05 -07001169 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
1171
1172 do {
1173 int intr_status = ioread16(ioaddr + IntrStatus);
1174 iowrite16(intr_status, ioaddr + IntrStatus);
1175
1176 if (netif_msg_intr(np))
1177 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1178 dev->name, intr_status);
1179
1180 if (!(intr_status & DEFAULT_INTR))
1181 break;
1182
1183 handled = 1;
1184
1185 if (intr_status & (IntrRxDMADone)) {
1186 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1187 ioaddr + IntrEnable);
1188 if (np->budget < 0)
1189 np->budget = RX_BUDGET;
1190 tasklet_schedule(&np->rx_tasklet);
1191 }
1192 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1193 tx_status = ioread16 (ioaddr + TxStatus);
1194 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1195 if (netif_msg_tx_done(np))
1196 printk
1197 ("%s: Transmit status is %2.2x.\n",
1198 dev->name, tx_status);
1199 if (tx_status & 0x1e) {
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001200 if (netif_msg_tx_err(np))
1201 printk("%s: Transmit error status %4.4x.\n",
1202 dev->name, tx_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001203 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 if (tx_status & 0x10)
Eric Dumazet553e2332009-05-27 10:34:50 +00001205 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 if (tx_status & 0x08)
Eric Dumazet553e2332009-05-27 10:34:50 +00001207 dev->stats.collisions++;
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001208 if (tx_status & 0x04)
Eric Dumazet553e2332009-05-27 10:34:50 +00001209 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 if (tx_status & 0x02)
Eric Dumazet553e2332009-05-27 10:34:50 +00001211 dev->stats.tx_window_errors++;
Jesse Huange2420402006-10-20 14:42:05 -07001212
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001213 /*
1214 ** This reset has been verified on
1215 ** DFE-580TX boards ! phdm@macqel.be.
1216 */
1217 if (tx_status & 0x10) { /* TxUnderrun */
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001218 /* Restart Tx FIFO and transmitter */
1219 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
Philippe De Muyterb71b95e2005-10-28 12:23:47 +02001220 /* No need to reset the Tx pointer here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 }
Jesse Huang2109f892006-10-20 14:42:11 -07001222 /* Restart the Tx. Need to make sure tx enabled */
1223 i = 10;
1224 do {
1225 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1226 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1227 break;
1228 mdelay(1);
1229 } while (--i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 }
1231 /* Yup, this is a documentation bug. It cost me *hours*. */
1232 iowrite16 (0, ioaddr + TxStatus);
1233 if (tx_cnt < 0) {
1234 iowrite32(5000, ioaddr + DownCounter);
1235 break;
1236 }
1237 tx_status = ioread16 (ioaddr + TxStatus);
1238 }
1239 hw_frame_id = (tx_status >> 8) & 0xff;
1240 } else {
1241 hw_frame_id = ioread8(ioaddr + TxFrameId);
1242 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001243
Auke Kok44c10132007-06-08 15:46:36 -07001244 if (np->pci_dev->revision >= 0x14) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 spin_lock(&np->lock);
1246 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1247 int entry = np->dirty_tx % TX_RING_SIZE;
1248 struct sk_buff *skb;
1249 int sw_frame_id;
1250 sw_frame_id = (le32_to_cpu(
1251 np->tx_ring[entry].status) >> 2) & 0xff;
1252 if (sw_frame_id == hw_frame_id &&
1253 !(le32_to_cpu(np->tx_ring[entry].status)
1254 & 0x00010000))
1255 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001256 if (sw_frame_id == (hw_frame_id + 1) %
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 TX_RING_SIZE)
1258 break;
1259 skb = np->tx_skbuff[entry];
1260 /* Free the original skb. */
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001261 dma_unmap_single(&np->pci_dev->dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001262 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001263 skb->len, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1265 np->tx_skbuff[entry] = NULL;
1266 np->tx_ring[entry].frag[0].addr = 0;
1267 np->tx_ring[entry].frag[0].length = 0;
1268 }
1269 spin_unlock(&np->lock);
1270 } else {
1271 spin_lock(&np->lock);
1272 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1273 int entry = np->dirty_tx % TX_RING_SIZE;
1274 struct sk_buff *skb;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001275 if (!(le32_to_cpu(np->tx_ring[entry].status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 & 0x00010000))
1277 break;
1278 skb = np->tx_skbuff[entry];
1279 /* Free the original skb. */
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001280 dma_unmap_single(&np->pci_dev->dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001281 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001282 skb->len, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1284 np->tx_skbuff[entry] = NULL;
1285 np->tx_ring[entry].frag[0].addr = 0;
1286 np->tx_ring[entry].frag[0].length = 0;
1287 }
1288 spin_unlock(&np->lock);
1289 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 if (netif_queue_stopped(dev) &&
1292 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1293 /* The ring is no longer full, clear busy flag. */
1294 netif_wake_queue (dev);
1295 }
1296 /* Abnormal error summary/uncommon events handlers. */
1297 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1298 netdev_error(dev, intr_status);
1299 } while (0);
1300 if (netif_msg_intr(np))
1301 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1302 dev->name, ioread16(ioaddr + IntrStatus));
1303 return IRQ_RETVAL(handled);
1304}
1305
1306static void rx_poll(unsigned long data)
1307{
1308 struct net_device *dev = (struct net_device *)data;
1309 struct netdev_private *np = netdev_priv(dev);
1310 int entry = np->cur_rx % RX_RING_SIZE;
1311 int boguscnt = np->budget;
1312 void __iomem *ioaddr = np->base;
1313 int received = 0;
1314
1315 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1316 while (1) {
1317 struct netdev_desc *desc = &(np->rx_ring[entry]);
1318 u32 frame_status = le32_to_cpu(desc->status);
1319 int pkt_len;
1320
1321 if (--boguscnt < 0) {
1322 goto not_done;
1323 }
1324 if (!(frame_status & DescOwn))
1325 break;
1326 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1327 if (netif_msg_rx_status(np))
1328 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1329 frame_status);
1330 if (frame_status & 0x001f4000) {
1331 /* There was a error. */
1332 if (netif_msg_rx_err(np))
1333 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1334 frame_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001335 dev->stats.rx_errors++;
1336 if (frame_status & 0x00100000)
1337 dev->stats.rx_length_errors++;
1338 if (frame_status & 0x00010000)
1339 dev->stats.rx_fifo_errors++;
1340 if (frame_status & 0x00060000)
1341 dev->stats.rx_frame_errors++;
1342 if (frame_status & 0x00080000)
1343 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 if (frame_status & 0x00100000) {
1345 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1346 " status %8.8x.\n",
1347 dev->name, frame_status);
1348 }
1349 } else {
1350 struct sk_buff *skb;
1351#ifndef final_version
1352 if (netif_msg_rx_status(np))
1353 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1354 ", bogus_cnt %d.\n",
1355 pkt_len, boguscnt);
1356#endif
1357 /* Check if the packet is long enough to accept without copying
1358 to a minimally-sized skbuff. */
Joe Perches8e95a202009-12-03 07:58:21 +00001359 if (pkt_len < rx_copybreak &&
1360 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 skb_reserve(skb, 2); /* 16 byte align the IP header */
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001362 dma_sync_single_for_cpu(&np->pci_dev->dev,
1363 le32_to_cpu(desc->frag[0].addr),
1364 np->rx_buf_sz, DMA_FROM_DEVICE);
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001365 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001366 dma_sync_single_for_device(&np->pci_dev->dev,
1367 le32_to_cpu(desc->frag[0].addr),
1368 np->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 skb_put(skb, pkt_len);
1370 } else {
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001371 dma_unmap_single(&np->pci_dev->dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001372 le32_to_cpu(desc->frag[0].addr),
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001373 np->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1375 np->rx_skbuff[entry] = NULL;
1376 }
1377 skb->protocol = eth_type_trans(skb, dev);
1378 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1379 netif_rx(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 }
1381 entry = (entry + 1) % RX_RING_SIZE;
1382 received++;
1383 }
1384 np->cur_rx = entry;
1385 refill_rx (dev);
1386 np->budget -= received;
1387 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1388 return;
1389
1390not_done:
1391 np->cur_rx = entry;
1392 refill_rx (dev);
1393 if (!received)
1394 received = 1;
1395 np->budget -= received;
1396 if (np->budget <= 0)
1397 np->budget = RX_BUDGET;
1398 tasklet_schedule(&np->rx_tasklet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399}
1400
1401static void refill_rx (struct net_device *dev)
1402{
1403 struct netdev_private *np = netdev_priv(dev);
1404 int entry;
1405 int cnt = 0;
1406
1407 /* Refill the Rx ring buffers. */
1408 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1409 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1410 struct sk_buff *skb;
1411 entry = np->dirty_rx % RX_RING_SIZE;
1412 if (np->rx_skbuff[entry] == NULL) {
1413 skb = dev_alloc_skb(np->rx_buf_sz);
1414 np->rx_skbuff[entry] = skb;
1415 if (skb == NULL)
1416 break; /* Better luck next round. */
1417 skb->dev = dev; /* Mark as being used by this device. */
1418 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1419 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001420 dma_map_single(&np->pci_dev->dev, skb->data,
1421 np->rx_buf_sz, DMA_FROM_DEVICE));
Denis Kirjanovd91dc272010-09-20 22:56:57 +00001422 if (dma_mapping_error(&np->pci_dev->dev,
1423 np->rx_ring[entry].frag[0].addr)) {
1424 dev_kfree_skb_irq(skb);
1425 np->rx_skbuff[entry] = NULL;
1426 break;
1427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 }
1429 /* Perhaps we need not reset this field. */
1430 np->rx_ring[entry].frag[0].length =
1431 cpu_to_le32(np->rx_buf_sz | LastFrag);
1432 np->rx_ring[entry].status = 0;
1433 cnt++;
1434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435}
1436static void netdev_error(struct net_device *dev, int intr_status)
1437{
1438 struct netdev_private *np = netdev_priv(dev);
1439 void __iomem *ioaddr = np->base;
1440 u16 mii_ctl, mii_advertise, mii_lpa;
1441 int speed;
1442
1443 if (intr_status & LinkChange) {
Dan Nicholson50500152008-08-20 16:51:59 -07001444 if (mdio_wait_link(dev, 10) == 0) {
1445 printk(KERN_INFO "%s: Link up\n", dev->name);
1446 if (np->an_enable) {
1447 mii_advertise = mdio_read(dev, np->phys[0],
1448 MII_ADVERTISE);
1449 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1450 mii_advertise &= mii_lpa;
1451 printk(KERN_INFO "%s: Link changed: ",
1452 dev->name);
1453 if (mii_advertise & ADVERTISE_100FULL) {
1454 np->speed = 100;
1455 printk("100Mbps, full duplex\n");
1456 } else if (mii_advertise & ADVERTISE_100HALF) {
1457 np->speed = 100;
1458 printk("100Mbps, half duplex\n");
1459 } else if (mii_advertise & ADVERTISE_10FULL) {
1460 np->speed = 10;
1461 printk("10Mbps, full duplex\n");
1462 } else if (mii_advertise & ADVERTISE_10HALF) {
1463 np->speed = 10;
1464 printk("10Mbps, half duplex\n");
1465 } else
1466 printk("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
Dan Nicholson50500152008-08-20 16:51:59 -07001468 } else {
1469 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1470 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1471 np->speed = speed;
1472 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1473 dev->name, speed);
1474 printk("%s duplex.\n",
1475 (mii_ctl & BMCR_FULLDPLX) ?
1476 "full" : "half");
1477 }
1478 check_duplex(dev);
1479 if (np->flowctrl && np->mii_if.full_duplex) {
1480 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1481 ioaddr + MulticastFilter1+2);
1482 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1483 ioaddr + MACCtrl0);
1484 }
1485 netif_carrier_on(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 } else {
Dan Nicholson50500152008-08-20 16:51:59 -07001487 printk(KERN_INFO "%s: Link down\n", dev->name);
1488 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 }
1490 }
1491 if (intr_status & StatsMax) {
1492 get_stats(dev);
1493 }
1494 if (intr_status & IntrPCIErr) {
1495 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1496 dev->name, intr_status);
1497 /* We must do a global reset of DMA to continue. */
1498 }
1499}
1500
1501static struct net_device_stats *get_stats(struct net_device *dev)
1502{
1503 struct netdev_private *np = netdev_priv(dev);
1504 void __iomem *ioaddr = np->base;
Eric Dumazet7b738b52010-10-09 02:17:01 +00001505 unsigned long flags;
Denis Kirjanov725a4a42010-10-13 00:56:09 +00001506 u8 late_coll, single_coll, mult_coll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Eric Dumazet7b738b52010-10-09 02:17:01 +00001508 spin_lock_irqsave(&np->statlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 /* The chip only need report frame silently dropped. */
Eric Dumazet553e2332009-05-27 10:34:50 +00001510 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1511 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1512 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
Eric Dumazet553e2332009-05-27 10:34:50 +00001513 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
Denis Kirjanov725a4a42010-10-13 00:56:09 +00001514
1515 mult_coll = ioread8(ioaddr + StatsMultiColl);
1516 np->xstats.tx_multiple_collisions += mult_coll;
1517 single_coll = ioread8(ioaddr + StatsOneColl);
1518 np->xstats.tx_single_collisions += single_coll;
1519 late_coll = ioread8(ioaddr + StatsLateColl);
1520 np->xstats.tx_late_collisions += late_coll;
1521 dev->stats.collisions += mult_coll
1522 + single_coll
1523 + late_coll;
1524
1525 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1526 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1527 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1528 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1529 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1530 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1531 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1532
Eric Dumazet553e2332009-05-27 10:34:50 +00001533 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1534 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1535 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1536 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
Eric Dumazet7b738b52010-10-09 02:17:01 +00001538 spin_unlock_irqrestore(&np->statlock, flags);
1539
Eric Dumazet553e2332009-05-27 10:34:50 +00001540 return &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541}
1542
1543static void set_rx_mode(struct net_device *dev)
1544{
1545 struct netdev_private *np = netdev_priv(dev);
1546 void __iomem *ioaddr = np->base;
1547 u16 mc_filter[4]; /* Multicast hash filter */
1548 u32 rx_mode;
1549 int i;
1550
1551 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 memset(mc_filter, 0xff, sizeof(mc_filter));
1553 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001554 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00001555 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 /* Too many to match, or accept all multicasts. */
1557 memset(mc_filter, 0xff, sizeof(mc_filter));
1558 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001559 } else if (!netdev_mc_empty(dev)) {
Jiri Pirko22bedad2010-04-01 21:22:57 +00001560 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 int bit;
1562 int index;
1563 int crc;
1564 memset (mc_filter, 0, sizeof (mc_filter));
Jiri Pirko22bedad2010-04-01 21:22:57 +00001565 netdev_for_each_mc_addr(ha, dev) {
1566 crc = ether_crc_le(ETH_ALEN, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1568 if (crc & 0x80000000) index |= 1 << bit;
1569 mc_filter[index/16] |= (1 << (index % 16));
1570 }
1571 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1572 } else {
1573 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1574 return;
1575 }
1576 if (np->mii_if.full_duplex && np->flowctrl)
1577 mc_filter[3] |= 0x0200;
1578
1579 for (i = 0; i < 4; i++)
1580 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1581 iowrite8(rx_mode, ioaddr + RxMode);
1582}
1583
1584static int __set_mac_addr(struct net_device *dev)
1585{
1586 struct netdev_private *np = netdev_priv(dev);
1587 u16 addr16;
1588
1589 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1590 iowrite16(addr16, np->base + StationAddr);
1591 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1592 iowrite16(addr16, np->base + StationAddr+2);
1593 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1594 iowrite16(addr16, np->base + StationAddr+4);
1595 return 0;
1596}
1597
Denis Kirjanov725a4a42010-10-13 00:56:09 +00001598static const struct {
1599 const char name[ETH_GSTRING_LEN];
1600} sundance_stats[] = {
1601 { "tx_multiple_collisions" },
1602 { "tx_single_collisions" },
1603 { "tx_late_collisions" },
1604 { "tx_deferred" },
1605 { "tx_deferred_excessive" },
1606 { "tx_aborted" },
1607 { "tx_bcasts" },
1608 { "rx_bcasts" },
1609 { "tx_mcasts" },
1610 { "rx_mcasts" },
1611};
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613static int check_if_running(struct net_device *dev)
1614{
1615 if (!netif_running(dev))
1616 return -EINVAL;
1617 return 0;
1618}
1619
1620static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1621{
1622 struct netdev_private *np = netdev_priv(dev);
1623 strcpy(info->driver, DRV_NAME);
1624 strcpy(info->version, DRV_VERSION);
1625 strcpy(info->bus_info, pci_name(np->pci_dev));
1626}
1627
1628static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1629{
1630 struct netdev_private *np = netdev_priv(dev);
1631 spin_lock_irq(&np->lock);
1632 mii_ethtool_gset(&np->mii_if, ecmd);
1633 spin_unlock_irq(&np->lock);
1634 return 0;
1635}
1636
1637static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1638{
1639 struct netdev_private *np = netdev_priv(dev);
1640 int res;
1641 spin_lock_irq(&np->lock);
1642 res = mii_ethtool_sset(&np->mii_if, ecmd);
1643 spin_unlock_irq(&np->lock);
1644 return res;
1645}
1646
1647static int nway_reset(struct net_device *dev)
1648{
1649 struct netdev_private *np = netdev_priv(dev);
1650 return mii_nway_restart(&np->mii_if);
1651}
1652
1653static u32 get_link(struct net_device *dev)
1654{
1655 struct netdev_private *np = netdev_priv(dev);
1656 return mii_link_ok(&np->mii_if);
1657}
1658
1659static u32 get_msglevel(struct net_device *dev)
1660{
1661 struct netdev_private *np = netdev_priv(dev);
1662 return np->msg_enable;
1663}
1664
1665static void set_msglevel(struct net_device *dev, u32 val)
1666{
1667 struct netdev_private *np = netdev_priv(dev);
1668 np->msg_enable = val;
1669}
1670
Denis Kirjanov725a4a42010-10-13 00:56:09 +00001671static void get_strings(struct net_device *dev, u32 stringset,
1672 u8 *data)
1673{
1674 if (stringset == ETH_SS_STATS)
1675 memcpy(data, sundance_stats, sizeof(sundance_stats));
1676}
1677
1678static int get_sset_count(struct net_device *dev, int sset)
1679{
1680 switch (sset) {
1681 case ETH_SS_STATS:
1682 return ARRAY_SIZE(sundance_stats);
1683 default:
1684 return -EOPNOTSUPP;
1685 }
1686}
1687
1688static void get_ethtool_stats(struct net_device *dev,
1689 struct ethtool_stats *stats, u64 *data)
1690{
1691 struct netdev_private *np = netdev_priv(dev);
1692 int i = 0;
1693
1694 get_stats(dev);
1695 data[i++] = np->xstats.tx_multiple_collisions;
1696 data[i++] = np->xstats.tx_single_collisions;
1697 data[i++] = np->xstats.tx_late_collisions;
1698 data[i++] = np->xstats.tx_deferred;
1699 data[i++] = np->xstats.tx_deferred_excessive;
1700 data[i++] = np->xstats.tx_aborted;
1701 data[i++] = np->xstats.tx_bcasts;
1702 data[i++] = np->xstats.rx_bcasts;
1703 data[i++] = np->xstats.tx_mcasts;
1704 data[i++] = np->xstats.rx_mcasts;
1705}
1706
Jeff Garzik7282d492006-09-13 14:30:00 -04001707static const struct ethtool_ops ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 .begin = check_if_running,
1709 .get_drvinfo = get_drvinfo,
1710 .get_settings = get_settings,
1711 .set_settings = set_settings,
1712 .nway_reset = nway_reset,
1713 .get_link = get_link,
1714 .get_msglevel = get_msglevel,
1715 .set_msglevel = set_msglevel,
Denis Kirjanov725a4a42010-10-13 00:56:09 +00001716 .get_strings = get_strings,
1717 .get_sset_count = get_sset_count,
1718 .get_ethtool_stats = get_ethtool_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719};
1720
1721static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1722{
1723 struct netdev_private *np = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
1726 if (!netif_running(dev))
1727 return -EINVAL;
1728
1729 spin_lock_irq(&np->lock);
1730 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1731 spin_unlock_irq(&np->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
1733 return rc;
1734}
1735
1736static int netdev_close(struct net_device *dev)
1737{
1738 struct netdev_private *np = netdev_priv(dev);
1739 void __iomem *ioaddr = np->base;
1740 struct sk_buff *skb;
1741 int i;
1742
Jesse Huang31f817e2006-11-08 19:49:12 -08001743 /* Wait and kill tasklet */
1744 tasklet_kill(&np->rx_tasklet);
1745 tasklet_kill(&np->tx_tasklet);
1746 np->cur_tx = 0;
1747 np->dirty_tx = 0;
1748 np->cur_task = 0;
Randy Dunlapbca79eb2006-11-29 13:15:17 -08001749 np->last_tx = NULL;
Jesse Huang31f817e2006-11-08 19:49:12 -08001750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 netif_stop_queue(dev);
1752
1753 if (netif_msg_ifdown(np)) {
1754 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1755 "Rx %4.4x Int %2.2x.\n",
1756 dev->name, ioread8(ioaddr + TxStatus),
1757 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1758 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1759 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1760 }
1761
1762 /* Disable interrupts by clearing the interrupt mask. */
1763 iowrite16(0x0000, ioaddr + IntrEnable);
1764
Jesse Huangacd70c22006-10-20 14:42:13 -07001765 /* Disable Rx and Tx DMA for safely release resource */
1766 iowrite32(0x500, ioaddr + DMACtrl);
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 /* Stop the chip's Tx and Rx processes. */
1769 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1770
Jesse Huang31f817e2006-11-08 19:49:12 -08001771 for (i = 2000; i > 0; i--) {
1772 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1773 break;
1774 mdelay(1);
1775 }
1776
1777 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
Denis Kirjanov24de5282010-12-21 02:01:36 +00001778 ioaddr + ASIC_HI_WORD(ASICCtrl));
Jesse Huang31f817e2006-11-08 19:49:12 -08001779
1780 for (i = 2000; i > 0; i--) {
Denis Kirjanov24de5282010-12-21 02:01:36 +00001781 if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
Jesse Huang31f817e2006-11-08 19:49:12 -08001782 break;
1783 mdelay(1);
1784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785
1786#ifdef __i386__
1787 if (netif_msg_hw(np)) {
Joe Perchesad361c92009-07-06 13:05:40 -07001788 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 (int)(np->tx_ring_dma));
1790 for (i = 0; i < TX_RING_SIZE; i++)
Joe Perchesad361c92009-07-06 13:05:40 -07001791 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1793 np->tx_ring[i].frag[0].length);
Joe Perchesad361c92009-07-06 13:05:40 -07001794 printk(KERN_DEBUG " Rx ring %8.8x:\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 (int)(np->rx_ring_dma));
1796 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1797 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1798 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1799 np->rx_ring[i].frag[0].length);
1800 }
1801 }
1802#endif /* __i386__ debugging only */
1803
1804 free_irq(dev->irq, dev);
1805
1806 del_timer_sync(&np->timer);
1807
1808 /* Free all the skbuffs in the Rx queue. */
1809 for (i = 0; i < RX_RING_SIZE; i++) {
1810 np->rx_ring[i].status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 skb = np->rx_skbuff[i];
1812 if (skb) {
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001813 dma_unmap_single(&np->pci_dev->dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001814 le32_to_cpu(np->rx_ring[i].frag[0].addr),
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001815 np->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 dev_kfree_skb(skb);
1817 np->rx_skbuff[i] = NULL;
1818 }
Al Viro14c9d9b2007-12-09 16:50:47 +00001819 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 }
1821 for (i = 0; i < TX_RING_SIZE; i++) {
Jesse Huang31f817e2006-11-08 19:49:12 -08001822 np->tx_ring[i].next_desc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 skb = np->tx_skbuff[i];
1824 if (skb) {
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001825 dma_unmap_single(&np->pci_dev->dev,
Al Viro14c9d9b2007-12-09 16:50:47 +00001826 le32_to_cpu(np->tx_ring[i].frag[0].addr),
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001827 skb->len, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 dev_kfree_skb(skb);
1829 np->tx_skbuff[i] = NULL;
1830 }
1831 }
1832
1833 return 0;
1834}
1835
1836static void __devexit sundance_remove1 (struct pci_dev *pdev)
1837{
1838 struct net_device *dev = pci_get_drvdata(pdev);
1839
1840 if (dev) {
Denis Kirjanov0c8a7452010-09-20 22:56:07 +00001841 struct netdev_private *np = netdev_priv(dev);
1842 unregister_netdev(dev);
1843 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1844 np->rx_ring, np->rx_ring_dma);
1845 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1846 np->tx_ring, np->tx_ring_dma);
1847 pci_iounmap(pdev, np->base);
1848 pci_release_regions(pdev);
1849 free_netdev(dev);
1850 pci_set_drvdata(pdev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 }
1852}
1853
Denis Kirjanov61a21452010-09-10 23:23:13 +00001854#ifdef CONFIG_PM
1855
1856static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1857{
1858 struct net_device *dev = pci_get_drvdata(pci_dev);
1859
1860 if (!netif_running(dev))
1861 return 0;
1862
1863 netdev_close(dev);
1864 netif_device_detach(dev);
1865
1866 pci_save_state(pci_dev);
1867 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1868
1869 return 0;
1870}
1871
1872static int sundance_resume(struct pci_dev *pci_dev)
1873{
1874 struct net_device *dev = pci_get_drvdata(pci_dev);
1875 int err = 0;
1876
1877 if (!netif_running(dev))
1878 return 0;
1879
1880 pci_set_power_state(pci_dev, PCI_D0);
1881 pci_restore_state(pci_dev);
1882
1883 err = netdev_open(dev);
1884 if (err) {
1885 printk(KERN_ERR "%s: Can't resume interface!\n",
1886 dev->name);
1887 goto out;
1888 }
1889
1890 netif_device_attach(dev);
1891
1892out:
1893 return err;
1894}
1895
1896#endif /* CONFIG_PM */
1897
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898static struct pci_driver sundance_driver = {
1899 .name = DRV_NAME,
1900 .id_table = sundance_pci_tbl,
1901 .probe = sundance_probe1,
1902 .remove = __devexit_p(sundance_remove1),
Denis Kirjanov61a21452010-09-10 23:23:13 +00001903#ifdef CONFIG_PM
1904 .suspend = sundance_suspend,
1905 .resume = sundance_resume,
1906#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907};
1908
1909static int __init sundance_init(void)
1910{
1911/* when a module, this is printed whether or not devices are found in probe */
1912#ifdef MODULE
1913 printk(version);
1914#endif
Jeff Garzik29917622006-08-19 17:48:59 -04001915 return pci_register_driver(&sundance_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916}
1917
1918static void __exit sundance_exit(void)
1919{
1920 pci_unregister_driver(&sundance_driver);
1921}
1922
1923module_init(sundance_init);
1924module_exit(sundance_exit);
1925
1926