blob: 7b57d552094a4270d653595c5de2ac75f5e05192 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28
29
30 Linux kernel version history:
31
32 LK1.1.0:
33 - Jeff Garzik: softnet 'n stuff
34
35 LK1.1.1:
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
38
39 LK1.1.2:
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
41
42 LK1.1.3:
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
48
49 LK1.1.4:
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
52
53 LK1.1.5:
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
57
58 LK1.1.6:
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
61
62 LK1.1.7:
63 - Manfred Spraul: added reset into tx_timeout
64
65 LK1.1.9:
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
70
71 LK1.1.10:
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
74
75 LK1.1.11:
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
79
80 LK1.1.12:
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
82
83 LK1.1.13 (jgarzik):
84 - Add ethtool support
85 - Replace some MII-related magic numbers with constants
86
87 LK1.1.14 (Ivan G.):
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
99 (Roger Luethi)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
103
104 LK1.1.15 (jgarzik):
105 - Use new MII lib helper generic_mii_ioctl
106
107 LK1.1.16 (Roger Luethi)
108 - Etherleak fix
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
112 - Various clean ups
113
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
120
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
124
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
127
128 LK1.2.0-2.6 (Roger Luethi)
129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
132
133*/
134
135#define DRV_NAME "via-rhine"
136#define DRV_VERSION "1.2.0-2.6"
137#define DRV_RELDATE "June-10-2004"
138
139
140/* A few user-configurable values.
141 These may be modified when a driver module is loaded. */
142
143static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
144static int max_interrupt_work = 20;
145
146/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
147 Setting to > 1518 effectively disables this feature. */
148static int rx_copybreak;
149
150/*
151 * In case you are looking for 'options[]' or 'full_duplex[]', they
152 * are gone. Use ethtool(8) instead.
153 */
154
155/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
156 The Rhine has a 64 element 8390-like hash table. */
157static const int multicast_filter_limit = 32;
158
159
160/* Operational parameters that are set at compile time. */
161
162/* Keep the ring sizes a power of two for compile efficiency.
163 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
164 Making the Tx ring too large decreases the effectiveness of channel
165 bonding and packet priority.
166 There are no ill effects from too-large receive rings. */
167#define TX_RING_SIZE 16
168#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
169#define RX_RING_SIZE 16
170
171
172/* Operational parameters that usually are not changed. */
173
174/* Time in jiffies before concluding the transmitter is hung. */
175#define TX_TIMEOUT (2*HZ)
176
177#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
178
179#include <linux/module.h>
180#include <linux/moduleparam.h>
181#include <linux/kernel.h>
182#include <linux/string.h>
183#include <linux/timer.h>
184#include <linux/errno.h>
185#include <linux/ioport.h>
186#include <linux/slab.h>
187#include <linux/interrupt.h>
188#include <linux/pci.h>
189#include <linux/netdevice.h>
190#include <linux/etherdevice.h>
191#include <linux/skbuff.h>
192#include <linux/init.h>
193#include <linux/delay.h>
194#include <linux/mii.h>
195#include <linux/ethtool.h>
196#include <linux/crc32.h>
197#include <linux/bitops.h>
198#include <asm/processor.h> /* Processor type for cache alignment. */
199#include <asm/io.h>
200#include <asm/irq.h>
201#include <asm/uaccess.h>
202
203/* These identify the driver base version and may not be removed. */
204static char version[] __devinitdata =
205KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
206
207/* This driver was written to use PCI memory space. Some early versions
208 of the Rhine may only work correctly with I/O space accesses. */
209#ifdef CONFIG_VIA_RHINE_MMIO
210#define USE_MMIO
211#else
212#endif
213
214MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
215MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
216MODULE_LICENSE("GPL");
217
218module_param(max_interrupt_work, int, 0);
219module_param(debug, int, 0);
220module_param(rx_copybreak, int, 0);
221MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
222MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
223MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
224
225/*
226 Theory of Operation
227
228I. Board Compatibility
229
230This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
231controller.
232
233II. Board-specific settings
234
235Boards with this chip are functional only in a bus-master PCI slot.
236
237Many operational settings are loaded from the EEPROM to the Config word at
238offset 0x78. For most of these settings, this driver assumes that they are
239correct.
240If this driver is compiled to use PCI memory space operations the EEPROM
241must be configured to enable memory ops.
242
243III. Driver operation
244
245IIIa. Ring buffers
246
247This driver uses two statically allocated fixed-size descriptor lists
248formed into rings by a branch from the final descriptor to the beginning of
249the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
250
251IIIb/c. Transmit/Receive Structure
252
253This driver attempts to use a zero-copy receive and transmit scheme.
254
255Alas, all data buffers are required to start on a 32 bit boundary, so
256the driver must often copy transmit packets into bounce buffers.
257
258The driver allocates full frame size skbuffs for the Rx ring buffers at
259open() time and passes the skb->data field to the chip as receive data
260buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
261a fresh skbuff is allocated and the frame is copied to the new skbuff.
262When the incoming frame is larger, the skbuff is passed directly up the
263protocol stack. Buffers consumed this way are replaced by newly allocated
264skbuffs in the last phase of rhine_rx().
265
266The RX_COPYBREAK value is chosen to trade-off the memory wasted by
267using a full-sized skbuff for small frames vs. the copying costs of larger
268frames. New boards are typically used in generously configured machines
269and the underfilled buffers have negligible impact compared to the benefit of
270a single allocation size, so the default value of zero results in never
271copying packets. When copying is done, the cost is usually mitigated by using
272a combined copy/checksum routine. Copying also preloads the cache, which is
273most useful with small frames.
274
275Since the VIA chips are only able to transfer data to buffers on 32 bit
276boundaries, the IP header at offset 14 in an ethernet frame isn't
277longword aligned for further processing. Copying these unaligned buffers
278has the beneficial effect of 16-byte aligning the IP header.
279
280IIId. Synchronization
281
282The driver runs as two independent, single-threaded flows of control. One
283is the send-packet routine, which enforces single-threaded use by the
284dev->priv->lock spinlock. The other thread is the interrupt handler, which
285is single threaded by the hardware and interrupt handling software.
286
287The send packet thread has partial control over the Tx ring. It locks the
288dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
289is not available it stops the transmit queue by calling netif_stop_queue.
290
291The interrupt handler has exclusive control over the Rx ring and records stats
292from the Tx ring. After reaping the stats, it marks the Tx queue entry as
293empty by incrementing the dirty_tx mark. If at least half of the entries in
294the Rx ring are available the transmit queue is woken up if it was stopped.
295
296IV. Notes
297
298IVb. References
299
300Preliminary VT86C100A manual from http://www.via.com.tw/
301http://www.scyld.com/expert/100mbps.html
302http://www.scyld.com/expert/NWay.html
303ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
304ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
305
306
307IVc. Errata
308
309The VT86C100A manual is not reliable information.
310The 3043 chip does not handle unaligned transmit or receive buffers, resulting
311in significant performance degradation for bounce buffer copies on transmit
312and unaligned IP headers on receive.
313The chip does not pad to minimum transmit length.
314
315*/
316
317
318/* This table drives the PCI probe routines. It's mostly boilerplate in all
319 of the drivers, and will likely be provided by some future kernel.
320 Note the matching code -- the first table entry matchs all 56** cards but
321 second only the 1234 card.
322*/
323
324enum rhine_revs {
325 VT86C100A = 0x00,
326 VTunknown0 = 0x20,
327 VT6102 = 0x40,
328 VT8231 = 0x50, /* Integrated MAC */
329 VT8233 = 0x60, /* Integrated MAC */
330 VT8235 = 0x74, /* Integrated MAC */
331 VT8237 = 0x78, /* Integrated MAC */
332 VTunknown1 = 0x7C,
333 VT6105 = 0x80,
334 VT6105_B0 = 0x83,
335 VT6105L = 0x8A,
336 VT6107 = 0x8C,
337 VTunknown2 = 0x8E,
338 VT6105M = 0x90, /* Management adapter */
339};
340
341enum rhine_quirks {
342 rqWOL = 0x0001, /* Wake-On-LAN support */
343 rqForceReset = 0x0002,
344 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
345 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
346 rqRhineI = 0x0100, /* See comment below */
347};
348/*
349 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
350 * MMIO as well as for the collision counter and the Tx FIFO underflow
351 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
352 */
353
354/* Beware of PCI posted writes */
355#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
356
357static struct pci_device_id rhine_pci_tbl[] =
358{
359 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
360 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
361 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
362 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
363 { } /* terminate list */
364};
365MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
366
367
368/* Offsets to the device registers. */
369enum register_offsets {
370 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
371 ChipCmd1=0x09,
372 IntrStatus=0x0C, IntrEnable=0x0E,
373 MulticastFilter0=0x10, MulticastFilter1=0x14,
374 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
375 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
376 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
377 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
378 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
379 StickyHW=0x83, IntrStatus2=0x84,
380 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
381 WOLcrClr1=0xA6, WOLcgClr=0xA7,
382 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
383};
384
385/* Bits in ConfigD */
386enum backoff_bits {
387 BackOptional=0x01, BackModify=0x02,
388 BackCaptureEffect=0x04, BackRandom=0x08
389};
390
391#ifdef USE_MMIO
392/* Registers we check that mmio and reg are the same. */
393static const int mmio_verify_registers[] = {
394 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
395 0
396};
397#endif
398
399/* Bits in the interrupt status/mask registers. */
400enum intr_status_bits {
401 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
402 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
403 IntrPCIErr=0x0040,
404 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
405 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
406 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
407 IntrRxWakeUp=0x8000,
408 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
409 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
410 IntrTxErrSummary=0x082218,
411};
412
413/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
414enum wol_bits {
415 WOLucast = 0x10,
416 WOLmagic = 0x20,
417 WOLbmcast = 0x30,
418 WOLlnkon = 0x40,
419 WOLlnkoff = 0x80,
420};
421
422/* The Rx and Tx buffer descriptors. */
423struct rx_desc {
424 s32 rx_status;
425 u32 desc_length; /* Chain flag, Buffer/frame length */
426 u32 addr;
427 u32 next_desc;
428};
429struct tx_desc {
430 s32 tx_status;
431 u32 desc_length; /* Chain flag, Tx Config, Frame length */
432 u32 addr;
433 u32 next_desc;
434};
435
436/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
437#define TXDESC 0x00e08000
438
439enum rx_status_bits {
440 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
441};
442
443/* Bits in *_desc.*_status */
444enum desc_status_bits {
445 DescOwn=0x80000000
446};
447
448/* Bits in ChipCmd. */
449enum chip_cmd_bits {
450 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
451 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
452 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
453 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
454};
455
456struct rhine_private {
457 /* Descriptor rings */
458 struct rx_desc *rx_ring;
459 struct tx_desc *tx_ring;
460 dma_addr_t rx_ring_dma;
461 dma_addr_t tx_ring_dma;
462
463 /* The addresses of receive-in-place skbuffs. */
464 struct sk_buff *rx_skbuff[RX_RING_SIZE];
465 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
466
467 /* The saved address of a sent-in-place packet/buffer, for later free(). */
468 struct sk_buff *tx_skbuff[TX_RING_SIZE];
469 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
470
471 /* Tx bounce buffers */
472 unsigned char *tx_buf[TX_RING_SIZE];
473 unsigned char *tx_bufs;
474 dma_addr_t tx_bufs_dma;
475
476 struct pci_dev *pdev;
477 long pioaddr;
478 struct net_device_stats stats;
479 spinlock_t lock;
480
481 /* Frequently used values: keep some adjacent for cache effect. */
482 u32 quirks;
483 struct rx_desc *rx_head_desc;
484 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
485 unsigned int cur_tx, dirty_tx;
486 unsigned int rx_buf_sz; /* Based on MTU+slack. */
487 u8 wolopts;
488
489 u8 tx_thresh, rx_thresh;
490
491 struct mii_if_info mii_if;
492 void __iomem *base;
493};
494
495static int mdio_read(struct net_device *dev, int phy_id, int location);
496static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
497static int rhine_open(struct net_device *dev);
498static void rhine_tx_timeout(struct net_device *dev);
499static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
500static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
501static void rhine_tx(struct net_device *dev);
502static void rhine_rx(struct net_device *dev);
503static void rhine_error(struct net_device *dev, int intr_status);
504static void rhine_set_rx_mode(struct net_device *dev);
505static struct net_device_stats *rhine_get_stats(struct net_device *dev);
506static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
507static struct ethtool_ops netdev_ethtool_ops;
508static int rhine_close(struct net_device *dev);
509static void rhine_shutdown (struct device *gdev);
510
511#define RHINE_WAIT_FOR(condition) do { \
512 int i=1024; \
513 while (!(condition) && --i) \
514 ; \
515 if (debug > 1 && i < 512) \
516 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
517 DRV_NAME, 1024-i, __func__, __LINE__); \
518} while(0)
519
520static inline u32 get_intr_status(struct net_device *dev)
521{
522 struct rhine_private *rp = netdev_priv(dev);
523 void __iomem *ioaddr = rp->base;
524 u32 intr_status;
525
526 intr_status = ioread16(ioaddr + IntrStatus);
527 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
528 if (rp->quirks & rqStatusWBRace)
529 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
530 return intr_status;
531}
532
533/*
534 * Get power related registers into sane state.
535 * Notify user about past WOL event.
536 */
537static void rhine_power_init(struct net_device *dev)
538{
539 struct rhine_private *rp = netdev_priv(dev);
540 void __iomem *ioaddr = rp->base;
541 u16 wolstat;
542
543 if (rp->quirks & rqWOL) {
544 /* Make sure chip is in power state D0 */
545 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
546
547 /* Disable "force PME-enable" */
548 iowrite8(0x80, ioaddr + WOLcgClr);
549
550 /* Clear power-event config bits (WOL) */
551 iowrite8(0xFF, ioaddr + WOLcrClr);
552 /* More recent cards can manage two additional patterns */
553 if (rp->quirks & rq6patterns)
554 iowrite8(0x03, ioaddr + WOLcrClr1);
555
556 /* Save power-event status bits */
557 wolstat = ioread8(ioaddr + PwrcsrSet);
558 if (rp->quirks & rq6patterns)
559 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
560
561 /* Clear power-event status bits */
562 iowrite8(0xFF, ioaddr + PwrcsrClr);
563 if (rp->quirks & rq6patterns)
564 iowrite8(0x03, ioaddr + PwrcsrClr1);
565
566 if (wolstat) {
567 char *reason;
568 switch (wolstat) {
569 case WOLmagic:
570 reason = "Magic packet";
571 break;
572 case WOLlnkon:
573 reason = "Link went up";
574 break;
575 case WOLlnkoff:
576 reason = "Link went down";
577 break;
578 case WOLucast:
579 reason = "Unicast packet";
580 break;
581 case WOLbmcast:
582 reason = "Multicast/broadcast packet";
583 break;
584 default:
585 reason = "Unknown";
586 }
587 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
588 DRV_NAME, reason);
589 }
590 }
591}
592
593static void rhine_chip_reset(struct net_device *dev)
594{
595 struct rhine_private *rp = netdev_priv(dev);
596 void __iomem *ioaddr = rp->base;
597
598 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
599 IOSYNC;
600
601 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
602 printk(KERN_INFO "%s: Reset not complete yet. "
603 "Trying harder.\n", DRV_NAME);
604
605 /* Force reset */
606 if (rp->quirks & rqForceReset)
607 iowrite8(0x40, ioaddr + MiscCmd);
608
609 /* Reset can take somewhat longer (rare) */
610 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
611 }
612
613 if (debug > 1)
614 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
615 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
616 "failed" : "succeeded");
617}
618
619#ifdef USE_MMIO
620static void enable_mmio(long pioaddr, u32 quirks)
621{
622 int n;
623 if (quirks & rqRhineI) {
624 /* More recent docs say that this bit is reserved ... */
625 n = inb(pioaddr + ConfigA) | 0x20;
626 outb(n, pioaddr + ConfigA);
627 } else {
628 n = inb(pioaddr + ConfigD) | 0x80;
629 outb(n, pioaddr + ConfigD);
630 }
631}
632#endif
633
634/*
635 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
636 * (plus 0x6C for Rhine-I/II)
637 */
638static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
639{
640 struct rhine_private *rp = netdev_priv(dev);
641 void __iomem *ioaddr = rp->base;
642
643 outb(0x20, pioaddr + MACRegEEcsr);
644 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
645
646#ifdef USE_MMIO
647 /*
648 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
649 * MMIO. If reloading EEPROM was done first this could be avoided, but
650 * it is not known if that still works with the "win98-reboot" problem.
651 */
652 enable_mmio(pioaddr, rp->quirks);
653#endif
654
655 /* Turn off EEPROM-controlled wake-up (magic packet) */
656 if (rp->quirks & rqWOL)
657 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
658
659}
660
661#ifdef CONFIG_NET_POLL_CONTROLLER
662static void rhine_poll(struct net_device *dev)
663{
664 disable_irq(dev->irq);
665 rhine_interrupt(dev->irq, (void *)dev, NULL);
666 enable_irq(dev->irq);
667}
668#endif
669
670static void rhine_hw_init(struct net_device *dev, long pioaddr)
671{
672 struct rhine_private *rp = netdev_priv(dev);
673
674 /* Reset the chip to erase previous misconfiguration. */
675 rhine_chip_reset(dev);
676
677 /* Rhine-I needs extra time to recuperate before EEPROM reload */
678 if (rp->quirks & rqRhineI)
679 msleep(5);
680
681 /* Reload EEPROM controlled bytes cleared by soft reset */
682 rhine_reload_eeprom(pioaddr, dev);
683}
684
685static int __devinit rhine_init_one(struct pci_dev *pdev,
686 const struct pci_device_id *ent)
687{
688 struct net_device *dev;
689 struct rhine_private *rp;
690 int i, rc;
691 u8 pci_rev;
692 u32 quirks;
693 long pioaddr;
694 long memaddr;
695 void __iomem *ioaddr;
696 int io_size, phy_id;
697 const char *name;
698#ifdef USE_MMIO
699 int bar = 1;
700#else
701 int bar = 0;
702#endif
703
704/* when built into the kernel, we only print version if device is found */
705#ifndef MODULE
706 static int printed_version;
707 if (!printed_version++)
708 printk(version);
709#endif
710
711 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
712
713 io_size = 256;
714 phy_id = 0;
715 quirks = 0;
716 name = "Rhine";
717 if (pci_rev < VTunknown0) {
718 quirks = rqRhineI;
719 io_size = 128;
720 }
721 else if (pci_rev >= VT6102) {
722 quirks = rqWOL | rqForceReset;
723 if (pci_rev < VT6105) {
724 name = "Rhine II";
725 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
726 }
727 else {
728 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
729 if (pci_rev >= VT6105_B0)
730 quirks |= rq6patterns;
731 if (pci_rev < VT6105M)
732 name = "Rhine III";
733 else
734 name = "Rhine III (Management Adapter)";
735 }
736 }
737
738 rc = pci_enable_device(pdev);
739 if (rc)
740 goto err_out;
741
742 /* this should always be supported */
743 rc = pci_set_dma_mask(pdev, 0xffffffff);
744 if (rc) {
745 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
746 "the card!?\n");
747 goto err_out;
748 }
749
750 /* sanity check */
751 if ((pci_resource_len(pdev, 0) < io_size) ||
752 (pci_resource_len(pdev, 1) < io_size)) {
753 rc = -EIO;
754 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
755 goto err_out;
756 }
757
758 pioaddr = pci_resource_start(pdev, 0);
759 memaddr = pci_resource_start(pdev, 1);
760
761 pci_set_master(pdev);
762
763 dev = alloc_etherdev(sizeof(struct rhine_private));
764 if (!dev) {
765 rc = -ENOMEM;
766 printk(KERN_ERR "alloc_etherdev failed\n");
767 goto err_out;
768 }
769 SET_MODULE_OWNER(dev);
770 SET_NETDEV_DEV(dev, &pdev->dev);
771
772 rp = netdev_priv(dev);
773 rp->quirks = quirks;
774 rp->pioaddr = pioaddr;
775 rp->pdev = pdev;
776
777 rc = pci_request_regions(pdev, DRV_NAME);
778 if (rc)
779 goto err_out_free_netdev;
780
781 ioaddr = pci_iomap(pdev, bar, io_size);
782 if (!ioaddr) {
783 rc = -EIO;
784 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
785 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
786 goto err_out_free_res;
787 }
788
789#ifdef USE_MMIO
790 enable_mmio(pioaddr, quirks);
791
792 /* Check that selected MMIO registers match the PIO ones */
793 i = 0;
794 while (mmio_verify_registers[i]) {
795 int reg = mmio_verify_registers[i++];
796 unsigned char a = inb(pioaddr+reg);
797 unsigned char b = readb(ioaddr+reg);
798 if (a != b) {
799 rc = -EIO;
800 printk(KERN_ERR "MMIO do not match PIO [%02x] "
801 "(%02x != %02x)\n", reg, a, b);
802 goto err_out_unmap;
803 }
804 }
805#endif /* USE_MMIO */
806
807 dev->base_addr = (unsigned long)ioaddr;
808 rp->base = ioaddr;
809
810 /* Get chip registers into a sane state */
811 rhine_power_init(dev);
812 rhine_hw_init(dev, pioaddr);
813
814 for (i = 0; i < 6; i++)
815 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
816
817 if (!is_valid_ether_addr(dev->dev_addr)) {
818 rc = -EIO;
819 printk(KERN_ERR "Invalid MAC address\n");
820 goto err_out_unmap;
821 }
822
823 /* For Rhine-I/II, phy_id is loaded from EEPROM */
824 if (!phy_id)
825 phy_id = ioread8(ioaddr + 0x6C);
826
827 dev->irq = pdev->irq;
828
829 spin_lock_init(&rp->lock);
830 rp->mii_if.dev = dev;
831 rp->mii_if.mdio_read = mdio_read;
832 rp->mii_if.mdio_write = mdio_write;
833 rp->mii_if.phy_id_mask = 0x1f;
834 rp->mii_if.reg_num_mask = 0x1f;
835
836 /* The chip-specific entries in the device structure. */
837 dev->open = rhine_open;
838 dev->hard_start_xmit = rhine_start_tx;
839 dev->stop = rhine_close;
840 dev->get_stats = rhine_get_stats;
841 dev->set_multicast_list = rhine_set_rx_mode;
842 dev->do_ioctl = netdev_ioctl;
843 dev->ethtool_ops = &netdev_ethtool_ops;
844 dev->tx_timeout = rhine_tx_timeout;
845 dev->watchdog_timeo = TX_TIMEOUT;
846#ifdef CONFIG_NET_POLL_CONTROLLER
847 dev->poll_controller = rhine_poll;
848#endif
849 if (rp->quirks & rqRhineI)
850 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
851
852 /* dev->name not defined before register_netdev()! */
853 rc = register_netdev(dev);
854 if (rc)
855 goto err_out_unmap;
856
857 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
858 dev->name, name,
859#ifdef USE_MMIO
860 memaddr
861#else
862 (long)ioaddr
863#endif
864 );
865
866 for (i = 0; i < 5; i++)
867 printk("%2.2x:", dev->dev_addr[i]);
868 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
869
870 pci_set_drvdata(pdev, dev);
871
872 {
873 u16 mii_cmd;
874 int mii_status = mdio_read(dev, phy_id, 1);
875 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
876 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
877 if (mii_status != 0xffff && mii_status != 0x0000) {
878 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
879 printk(KERN_INFO "%s: MII PHY found at address "
880 "%d, status 0x%4.4x advertising %4.4x "
881 "Link %4.4x.\n", dev->name, phy_id,
882 mii_status, rp->mii_if.advertising,
883 mdio_read(dev, phy_id, 5));
884
885 /* set IFF_RUNNING */
886 if (mii_status & BMSR_LSTATUS)
887 netif_carrier_on(dev);
888 else
889 netif_carrier_off(dev);
890
891 }
892 }
893 rp->mii_if.phy_id = phy_id;
894
895 return 0;
896
897err_out_unmap:
898 pci_iounmap(pdev, ioaddr);
899err_out_free_res:
900 pci_release_regions(pdev);
901err_out_free_netdev:
902 free_netdev(dev);
903err_out:
904 return rc;
905}
906
907static int alloc_ring(struct net_device* dev)
908{
909 struct rhine_private *rp = netdev_priv(dev);
910 void *ring;
911 dma_addr_t ring_dma;
912
913 ring = pci_alloc_consistent(rp->pdev,
914 RX_RING_SIZE * sizeof(struct rx_desc) +
915 TX_RING_SIZE * sizeof(struct tx_desc),
916 &ring_dma);
917 if (!ring) {
918 printk(KERN_ERR "Could not allocate DMA memory.\n");
919 return -ENOMEM;
920 }
921 if (rp->quirks & rqRhineI) {
922 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
923 PKT_BUF_SZ * TX_RING_SIZE,
924 &rp->tx_bufs_dma);
925 if (rp->tx_bufs == NULL) {
926 pci_free_consistent(rp->pdev,
927 RX_RING_SIZE * sizeof(struct rx_desc) +
928 TX_RING_SIZE * sizeof(struct tx_desc),
929 ring, ring_dma);
930 return -ENOMEM;
931 }
932 }
933
934 rp->rx_ring = ring;
935 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
936 rp->rx_ring_dma = ring_dma;
937 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
938
939 return 0;
940}
941
942static void free_ring(struct net_device* dev)
943{
944 struct rhine_private *rp = netdev_priv(dev);
945
946 pci_free_consistent(rp->pdev,
947 RX_RING_SIZE * sizeof(struct rx_desc) +
948 TX_RING_SIZE * sizeof(struct tx_desc),
949 rp->rx_ring, rp->rx_ring_dma);
950 rp->tx_ring = NULL;
951
952 if (rp->tx_bufs)
953 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
954 rp->tx_bufs, rp->tx_bufs_dma);
955
956 rp->tx_bufs = NULL;
957
958}
959
960static void alloc_rbufs(struct net_device *dev)
961{
962 struct rhine_private *rp = netdev_priv(dev);
963 dma_addr_t next;
964 int i;
965
966 rp->dirty_rx = rp->cur_rx = 0;
967
968 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
969 rp->rx_head_desc = &rp->rx_ring[0];
970 next = rp->rx_ring_dma;
971
972 /* Init the ring entries */
973 for (i = 0; i < RX_RING_SIZE; i++) {
974 rp->rx_ring[i].rx_status = 0;
975 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
976 next += sizeof(struct rx_desc);
977 rp->rx_ring[i].next_desc = cpu_to_le32(next);
978 rp->rx_skbuff[i] = NULL;
979 }
980 /* Mark the last entry as wrapping the ring. */
981 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
982
983 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
984 for (i = 0; i < RX_RING_SIZE; i++) {
985 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
986 rp->rx_skbuff[i] = skb;
987 if (skb == NULL)
988 break;
989 skb->dev = dev; /* Mark as being used by this device. */
990
991 rp->rx_skbuff_dma[i] =
992 pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
993 PCI_DMA_FROMDEVICE);
994
995 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
996 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
997 }
998 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
999}
1000
1001static void free_rbufs(struct net_device* dev)
1002{
1003 struct rhine_private *rp = netdev_priv(dev);
1004 int i;
1005
1006 /* Free all the skbuffs in the Rx queue. */
1007 for (i = 0; i < RX_RING_SIZE; i++) {
1008 rp->rx_ring[i].rx_status = 0;
1009 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1010 if (rp->rx_skbuff[i]) {
1011 pci_unmap_single(rp->pdev,
1012 rp->rx_skbuff_dma[i],
1013 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1014 dev_kfree_skb(rp->rx_skbuff[i]);
1015 }
1016 rp->rx_skbuff[i] = NULL;
1017 }
1018}
1019
1020static void alloc_tbufs(struct net_device* dev)
1021{
1022 struct rhine_private *rp = netdev_priv(dev);
1023 dma_addr_t next;
1024 int i;
1025
1026 rp->dirty_tx = rp->cur_tx = 0;
1027 next = rp->tx_ring_dma;
1028 for (i = 0; i < TX_RING_SIZE; i++) {
1029 rp->tx_skbuff[i] = NULL;
1030 rp->tx_ring[i].tx_status = 0;
1031 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1032 next += sizeof(struct tx_desc);
1033 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1034 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1035 }
1036 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1037
1038}
1039
1040static void free_tbufs(struct net_device* dev)
1041{
1042 struct rhine_private *rp = netdev_priv(dev);
1043 int i;
1044
1045 for (i = 0; i < TX_RING_SIZE; i++) {
1046 rp->tx_ring[i].tx_status = 0;
1047 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1048 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1049 if (rp->tx_skbuff[i]) {
1050 if (rp->tx_skbuff_dma[i]) {
1051 pci_unmap_single(rp->pdev,
1052 rp->tx_skbuff_dma[i],
1053 rp->tx_skbuff[i]->len,
1054 PCI_DMA_TODEVICE);
1055 }
1056 dev_kfree_skb(rp->tx_skbuff[i]);
1057 }
1058 rp->tx_skbuff[i] = NULL;
1059 rp->tx_buf[i] = NULL;
1060 }
1061}
1062
1063static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1064{
1065 struct rhine_private *rp = netdev_priv(dev);
1066 void __iomem *ioaddr = rp->base;
1067
1068 mii_check_media(&rp->mii_if, debug, init_media);
1069
1070 if (rp->mii_if.full_duplex)
1071 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1072 ioaddr + ChipCmd1);
1073 else
1074 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1075 ioaddr + ChipCmd1);
1076}
1077
1078static void init_registers(struct net_device *dev)
1079{
1080 struct rhine_private *rp = netdev_priv(dev);
1081 void __iomem *ioaddr = rp->base;
1082 int i;
1083
1084 for (i = 0; i < 6; i++)
1085 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1086
1087 /* Initialize other registers. */
1088 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1089 /* Configure initial FIFO thresholds. */
1090 iowrite8(0x20, ioaddr + TxConfig);
1091 rp->tx_thresh = 0x20;
1092 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1093
1094 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1095 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1096
1097 rhine_set_rx_mode(dev);
1098
1099 /* Enable interrupts by setting the interrupt mask. */
1100 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1101 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1102 IntrTxDone | IntrTxError | IntrTxUnderrun |
1103 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1104 ioaddr + IntrEnable);
1105
1106 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1107 ioaddr + ChipCmd);
1108 rhine_check_media(dev, 1);
1109}
1110
1111/* Enable MII link status auto-polling (required for IntrLinkChange) */
1112static void rhine_enable_linkmon(void __iomem *ioaddr)
1113{
1114 iowrite8(0, ioaddr + MIICmd);
1115 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1116 iowrite8(0x80, ioaddr + MIICmd);
1117
1118 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1119
1120 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1121}
1122
1123/* Disable MII link status auto-polling (required for MDIO access) */
1124static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1125{
1126 iowrite8(0, ioaddr + MIICmd);
1127
1128 if (quirks & rqRhineI) {
1129 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1130
1131 /* Can be called from ISR. Evil. */
1132 mdelay(1);
1133
1134 /* 0x80 must be set immediately before turning it off */
1135 iowrite8(0x80, ioaddr + MIICmd);
1136
1137 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1138
1139 /* Heh. Now clear 0x80 again. */
1140 iowrite8(0, ioaddr + MIICmd);
1141 }
1142 else
1143 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1144}
1145
1146/* Read and write over the MII Management Data I/O (MDIO) interface. */
1147
1148static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1149{
1150 struct rhine_private *rp = netdev_priv(dev);
1151 void __iomem *ioaddr = rp->base;
1152 int result;
1153
1154 rhine_disable_linkmon(ioaddr, rp->quirks);
1155
1156 /* rhine_disable_linkmon already cleared MIICmd */
1157 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1158 iowrite8(regnum, ioaddr + MIIRegAddr);
1159 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1160 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1161 result = ioread16(ioaddr + MIIData);
1162
1163 rhine_enable_linkmon(ioaddr);
1164 return result;
1165}
1166
1167static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1168{
1169 struct rhine_private *rp = netdev_priv(dev);
1170 void __iomem *ioaddr = rp->base;
1171
1172 rhine_disable_linkmon(ioaddr, rp->quirks);
1173
1174 /* rhine_disable_linkmon already cleared MIICmd */
1175 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1176 iowrite8(regnum, ioaddr + MIIRegAddr);
1177 iowrite16(value, ioaddr + MIIData);
1178 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1179 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1180
1181 rhine_enable_linkmon(ioaddr);
1182}
1183
1184static int rhine_open(struct net_device *dev)
1185{
1186 struct rhine_private *rp = netdev_priv(dev);
1187 void __iomem *ioaddr = rp->base;
1188 int rc;
1189
1190 rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1191 dev);
1192 if (rc)
1193 return rc;
1194
1195 if (debug > 1)
1196 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1197 dev->name, rp->pdev->irq);
1198
1199 rc = alloc_ring(dev);
1200 if (rc) {
1201 free_irq(rp->pdev->irq, dev);
1202 return rc;
1203 }
1204 alloc_rbufs(dev);
1205 alloc_tbufs(dev);
1206 rhine_chip_reset(dev);
1207 init_registers(dev);
1208 if (debug > 2)
1209 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1210 "MII status: %4.4x.\n",
1211 dev->name, ioread16(ioaddr + ChipCmd),
1212 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1213
1214 netif_start_queue(dev);
1215
1216 return 0;
1217}
1218
1219static void rhine_tx_timeout(struct net_device *dev)
1220{
1221 struct rhine_private *rp = netdev_priv(dev);
1222 void __iomem *ioaddr = rp->base;
1223
1224 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1225 "%4.4x, resetting...\n",
1226 dev->name, ioread16(ioaddr + IntrStatus),
1227 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1228
1229 /* protect against concurrent rx interrupts */
1230 disable_irq(rp->pdev->irq);
1231
1232 spin_lock(&rp->lock);
1233
1234 /* clear all descriptors */
1235 free_tbufs(dev);
1236 free_rbufs(dev);
1237 alloc_tbufs(dev);
1238 alloc_rbufs(dev);
1239
1240 /* Reinitialize the hardware. */
1241 rhine_chip_reset(dev);
1242 init_registers(dev);
1243
1244 spin_unlock(&rp->lock);
1245 enable_irq(rp->pdev->irq);
1246
1247 dev->trans_start = jiffies;
1248 rp->stats.tx_errors++;
1249 netif_wake_queue(dev);
1250}
1251
1252static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1253{
1254 struct rhine_private *rp = netdev_priv(dev);
1255 void __iomem *ioaddr = rp->base;
1256 unsigned entry;
1257
1258 /* Caution: the write order is important here, set the field
1259 with the "ownership" bits last. */
1260
1261 /* Calculate the next Tx descriptor entry. */
1262 entry = rp->cur_tx % TX_RING_SIZE;
1263
1264 if (skb->len < ETH_ZLEN) {
1265 skb = skb_padto(skb, ETH_ZLEN);
1266 if (skb == NULL)
1267 return 0;
1268 }
1269
1270 rp->tx_skbuff[entry] = skb;
1271
1272 if ((rp->quirks & rqRhineI) &&
1273 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1274 /* Must use alignment buffer. */
1275 if (skb->len > PKT_BUF_SZ) {
1276 /* packet too long, drop it */
1277 dev_kfree_skb(skb);
1278 rp->tx_skbuff[entry] = NULL;
1279 rp->stats.tx_dropped++;
1280 return 0;
1281 }
1282 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1283 rp->tx_skbuff_dma[entry] = 0;
1284 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1285 (rp->tx_buf[entry] -
1286 rp->tx_bufs));
1287 } else {
1288 rp->tx_skbuff_dma[entry] =
1289 pci_map_single(rp->pdev, skb->data, skb->len,
1290 PCI_DMA_TODEVICE);
1291 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1292 }
1293
1294 rp->tx_ring[entry].desc_length =
1295 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1296
1297 /* lock eth irq */
1298 spin_lock_irq(&rp->lock);
1299 wmb();
1300 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1301 wmb();
1302
1303 rp->cur_tx++;
1304
1305 /* Non-x86 Todo: explicitly flush cache lines here. */
1306
1307 /* Wake the potentially-idle transmit channel */
1308 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1309 ioaddr + ChipCmd1);
1310 IOSYNC;
1311
1312 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1313 netif_stop_queue(dev);
1314
1315 dev->trans_start = jiffies;
1316
1317 spin_unlock_irq(&rp->lock);
1318
1319 if (debug > 4) {
1320 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1321 dev->name, rp->cur_tx-1, entry);
1322 }
1323 return 0;
1324}
1325
1326/* The interrupt handler does all of the Rx thread work and cleans up
1327 after the Tx thread. */
1328static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1329{
1330 struct net_device *dev = dev_instance;
1331 struct rhine_private *rp = netdev_priv(dev);
1332 void __iomem *ioaddr = rp->base;
1333 u32 intr_status;
1334 int boguscnt = max_interrupt_work;
1335 int handled = 0;
1336
1337 while ((intr_status = get_intr_status(dev))) {
1338 handled = 1;
1339
1340 /* Acknowledge all of the current interrupt sources ASAP. */
1341 if (intr_status & IntrTxDescRace)
1342 iowrite8(0x08, ioaddr + IntrStatus2);
1343 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1344 IOSYNC;
1345
1346 if (debug > 4)
1347 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1348 dev->name, intr_status);
1349
1350 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1351 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1352 rhine_rx(dev);
1353
1354 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1355 if (intr_status & IntrTxErrSummary) {
1356 /* Avoid scavenging before Tx engine turned off */
1357 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1358 if (debug > 2 &&
1359 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1360 printk(KERN_WARNING "%s: "
1361 "rhine_interrupt() Tx engine"
1362 "still on.\n", dev->name);
1363 }
1364 rhine_tx(dev);
1365 }
1366
1367 /* Abnormal error summary/uncommon events handlers. */
1368 if (intr_status & (IntrPCIErr | IntrLinkChange |
1369 IntrStatsMax | IntrTxError | IntrTxAborted |
1370 IntrTxUnderrun | IntrTxDescRace))
1371 rhine_error(dev, intr_status);
1372
1373 if (--boguscnt < 0) {
1374 printk(KERN_WARNING "%s: Too much work at interrupt, "
1375 "status=%#8.8x.\n",
1376 dev->name, intr_status);
1377 break;
1378 }
1379 }
1380
1381 if (debug > 3)
1382 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1383 dev->name, ioread16(ioaddr + IntrStatus));
1384 return IRQ_RETVAL(handled);
1385}
1386
1387/* This routine is logically part of the interrupt handler, but isolated
1388 for clarity. */
1389static void rhine_tx(struct net_device *dev)
1390{
1391 struct rhine_private *rp = netdev_priv(dev);
1392 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1393
1394 spin_lock(&rp->lock);
1395
1396 /* find and cleanup dirty tx descriptors */
1397 while (rp->dirty_tx != rp->cur_tx) {
1398 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1399 if (debug > 6)
1400 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1401 entry, txstatus);
1402 if (txstatus & DescOwn)
1403 break;
1404 if (txstatus & 0x8000) {
1405 if (debug > 1)
1406 printk(KERN_DEBUG "%s: Transmit error, "
1407 "Tx status %8.8x.\n",
1408 dev->name, txstatus);
1409 rp->stats.tx_errors++;
1410 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1411 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1412 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1413 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1414 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1415 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1416 rp->stats.tx_fifo_errors++;
1417 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1418 break; /* Keep the skb - we try again */
1419 }
1420 /* Transmitter restarted in 'abnormal' handler. */
1421 } else {
1422 if (rp->quirks & rqRhineI)
1423 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1424 else
1425 rp->stats.collisions += txstatus & 0x0F;
1426 if (debug > 6)
1427 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1428 (txstatus >> 3) & 0xF,
1429 txstatus & 0xF);
1430 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1431 rp->stats.tx_packets++;
1432 }
1433 /* Free the original skb. */
1434 if (rp->tx_skbuff_dma[entry]) {
1435 pci_unmap_single(rp->pdev,
1436 rp->tx_skbuff_dma[entry],
1437 rp->tx_skbuff[entry]->len,
1438 PCI_DMA_TODEVICE);
1439 }
1440 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1441 rp->tx_skbuff[entry] = NULL;
1442 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1443 }
1444 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1445 netif_wake_queue(dev);
1446
1447 spin_unlock(&rp->lock);
1448}
1449
1450/* This routine is logically part of the interrupt handler, but isolated
1451 for clarity and better register allocation. */
1452static void rhine_rx(struct net_device *dev)
1453{
1454 struct rhine_private *rp = netdev_priv(dev);
1455 int entry = rp->cur_rx % RX_RING_SIZE;
1456 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1457
1458 if (debug > 4) {
1459 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1460 dev->name, entry,
1461 le32_to_cpu(rp->rx_head_desc->rx_status));
1462 }
1463
1464 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1465 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1466 struct rx_desc *desc = rp->rx_head_desc;
1467 u32 desc_status = le32_to_cpu(desc->rx_status);
1468 int data_size = desc_status >> 16;
1469
1470 if (debug > 4)
1471 printk(KERN_DEBUG " rhine_rx() status is %8.8x.\n",
1472 desc_status);
1473 if (--boguscnt < 0)
1474 break;
1475 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1476 if ((desc_status & RxWholePkt) != RxWholePkt) {
1477 printk(KERN_WARNING "%s: Oversized Ethernet "
1478 "frame spanned multiple buffers, entry "
1479 "%#x length %d status %8.8x!\n",
1480 dev->name, entry, data_size,
1481 desc_status);
1482 printk(KERN_WARNING "%s: Oversized Ethernet "
1483 "frame %p vs %p.\n", dev->name,
1484 rp->rx_head_desc, &rp->rx_ring[entry]);
1485 rp->stats.rx_length_errors++;
1486 } else if (desc_status & RxErr) {
1487 /* There was a error. */
1488 if (debug > 2)
1489 printk(KERN_DEBUG " rhine_rx() Rx "
1490 "error was %8.8x.\n",
1491 desc_status);
1492 rp->stats.rx_errors++;
1493 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1494 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1495 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1496 if (desc_status & 0x0002) {
1497 /* this can also be updated outside the interrupt handler */
1498 spin_lock(&rp->lock);
1499 rp->stats.rx_crc_errors++;
1500 spin_unlock(&rp->lock);
1501 }
1502 }
1503 } else {
1504 struct sk_buff *skb;
1505 /* Length should omit the CRC */
1506 int pkt_len = data_size - 4;
1507
1508 /* Check if the packet is long enough to accept without
1509 copying to a minimally-sized skbuff. */
1510 if (pkt_len < rx_copybreak &&
1511 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1512 skb->dev = dev;
1513 skb_reserve(skb, 2); /* 16 byte align the IP header */
1514 pci_dma_sync_single_for_cpu(rp->pdev,
1515 rp->rx_skbuff_dma[entry],
1516 rp->rx_buf_sz,
1517 PCI_DMA_FROMDEVICE);
1518
1519 eth_copy_and_sum(skb,
1520 rp->rx_skbuff[entry]->tail,
1521 pkt_len, 0);
1522 skb_put(skb, pkt_len);
1523 pci_dma_sync_single_for_device(rp->pdev,
1524 rp->rx_skbuff_dma[entry],
1525 rp->rx_buf_sz,
1526 PCI_DMA_FROMDEVICE);
1527 } else {
1528 skb = rp->rx_skbuff[entry];
1529 if (skb == NULL) {
1530 printk(KERN_ERR "%s: Inconsistent Rx "
1531 "descriptor chain.\n",
1532 dev->name);
1533 break;
1534 }
1535 rp->rx_skbuff[entry] = NULL;
1536 skb_put(skb, pkt_len);
1537 pci_unmap_single(rp->pdev,
1538 rp->rx_skbuff_dma[entry],
1539 rp->rx_buf_sz,
1540 PCI_DMA_FROMDEVICE);
1541 }
1542 skb->protocol = eth_type_trans(skb, dev);
1543 netif_rx(skb);
1544 dev->last_rx = jiffies;
1545 rp->stats.rx_bytes += pkt_len;
1546 rp->stats.rx_packets++;
1547 }
1548 entry = (++rp->cur_rx) % RX_RING_SIZE;
1549 rp->rx_head_desc = &rp->rx_ring[entry];
1550 }
1551
1552 /* Refill the Rx ring buffers. */
1553 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1554 struct sk_buff *skb;
1555 entry = rp->dirty_rx % RX_RING_SIZE;
1556 if (rp->rx_skbuff[entry] == NULL) {
1557 skb = dev_alloc_skb(rp->rx_buf_sz);
1558 rp->rx_skbuff[entry] = skb;
1559 if (skb == NULL)
1560 break; /* Better luck next round. */
1561 skb->dev = dev; /* Mark as being used by this device. */
1562 rp->rx_skbuff_dma[entry] =
1563 pci_map_single(rp->pdev, skb->tail,
1564 rp->rx_buf_sz,
1565 PCI_DMA_FROMDEVICE);
1566 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1567 }
1568 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1569 }
1570}
1571
1572/*
1573 * Clears the "tally counters" for CRC errors and missed frames(?).
1574 * It has been reported that some chips need a write of 0 to clear
1575 * these, for others the counters are set to 1 when written to and
1576 * instead cleared when read. So we clear them both ways ...
1577 */
1578static inline void clear_tally_counters(void __iomem *ioaddr)
1579{
1580 iowrite32(0, ioaddr + RxMissed);
1581 ioread16(ioaddr + RxCRCErrs);
1582 ioread16(ioaddr + RxMissed);
1583}
1584
1585static void rhine_restart_tx(struct net_device *dev) {
1586 struct rhine_private *rp = netdev_priv(dev);
1587 void __iomem *ioaddr = rp->base;
1588 int entry = rp->dirty_tx % TX_RING_SIZE;
1589 u32 intr_status;
1590
1591 /*
1592 * If new errors occured, we need to sort them out before doing Tx.
1593 * In that case the ISR will be back here RSN anyway.
1594 */
1595 intr_status = get_intr_status(dev);
1596
1597 if ((intr_status & IntrTxErrSummary) == 0) {
1598
1599 /* We know better than the chip where it should continue. */
1600 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1601 ioaddr + TxRingPtr);
1602
1603 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1604 ioaddr + ChipCmd);
1605 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1606 ioaddr + ChipCmd1);
1607 IOSYNC;
1608 }
1609 else {
1610 /* This should never happen */
1611 if (debug > 1)
1612 printk(KERN_WARNING "%s: rhine_restart_tx() "
1613 "Another error occured %8.8x.\n",
1614 dev->name, intr_status);
1615 }
1616
1617}
1618
1619static void rhine_error(struct net_device *dev, int intr_status)
1620{
1621 struct rhine_private *rp = netdev_priv(dev);
1622 void __iomem *ioaddr = rp->base;
1623
1624 spin_lock(&rp->lock);
1625
1626 if (intr_status & IntrLinkChange)
1627 rhine_check_media(dev, 0);
1628 if (intr_status & IntrStatsMax) {
1629 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1630 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1631 clear_tally_counters(ioaddr);
1632 }
1633 if (intr_status & IntrTxAborted) {
1634 if (debug > 1)
1635 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1636 dev->name, intr_status);
1637 }
1638 if (intr_status & IntrTxUnderrun) {
1639 if (rp->tx_thresh < 0xE0)
1640 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1641 if (debug > 1)
1642 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1643 "threshold now %2.2x.\n",
1644 dev->name, rp->tx_thresh);
1645 }
1646 if (intr_status & IntrTxDescRace) {
1647 if (debug > 2)
1648 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1649 dev->name);
1650 }
1651 if ((intr_status & IntrTxError) &&
1652 (intr_status & (IntrTxAborted |
1653 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1654 if (rp->tx_thresh < 0xE0) {
1655 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1656 }
1657 if (debug > 1)
1658 printk(KERN_INFO "%s: Unspecified error. Tx "
1659 "threshold now %2.2x.\n",
1660 dev->name, rp->tx_thresh);
1661 }
1662 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1663 IntrTxError))
1664 rhine_restart_tx(dev);
1665
1666 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1667 IntrTxError | IntrTxAborted | IntrNormalSummary |
1668 IntrTxDescRace)) {
1669 if (debug > 1)
1670 printk(KERN_ERR "%s: Something Wicked happened! "
1671 "%8.8x.\n", dev->name, intr_status);
1672 }
1673
1674 spin_unlock(&rp->lock);
1675}
1676
1677static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1678{
1679 struct rhine_private *rp = netdev_priv(dev);
1680 void __iomem *ioaddr = rp->base;
1681 unsigned long flags;
1682
1683 spin_lock_irqsave(&rp->lock, flags);
1684 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1685 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1686 clear_tally_counters(ioaddr);
1687 spin_unlock_irqrestore(&rp->lock, flags);
1688
1689 return &rp->stats;
1690}
1691
1692static void rhine_set_rx_mode(struct net_device *dev)
1693{
1694 struct rhine_private *rp = netdev_priv(dev);
1695 void __iomem *ioaddr = rp->base;
1696 u32 mc_filter[2]; /* Multicast hash filter */
1697 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1698
1699 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1700 /* Unconditionally log net taps. */
1701 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1702 dev->name);
1703 rx_mode = 0x1C;
1704 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1705 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1706 } else if ((dev->mc_count > multicast_filter_limit)
1707 || (dev->flags & IFF_ALLMULTI)) {
1708 /* Too many to match, or accept all multicasts. */
1709 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1710 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1711 rx_mode = 0x0C;
1712 } else {
1713 struct dev_mc_list *mclist;
1714 int i;
1715 memset(mc_filter, 0, sizeof(mc_filter));
1716 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1717 i++, mclist = mclist->next) {
1718 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1719
1720 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1721 }
1722 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1723 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1724 rx_mode = 0x0C;
1725 }
1726 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1727}
1728
1729static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1730{
1731 struct rhine_private *rp = netdev_priv(dev);
1732
1733 strcpy(info->driver, DRV_NAME);
1734 strcpy(info->version, DRV_VERSION);
1735 strcpy(info->bus_info, pci_name(rp->pdev));
1736}
1737
1738static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1739{
1740 struct rhine_private *rp = netdev_priv(dev);
1741 int rc;
1742
1743 spin_lock_irq(&rp->lock);
1744 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1745 spin_unlock_irq(&rp->lock);
1746
1747 return rc;
1748}
1749
1750static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1751{
1752 struct rhine_private *rp = netdev_priv(dev);
1753 int rc;
1754
1755 spin_lock_irq(&rp->lock);
1756 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1757 spin_unlock_irq(&rp->lock);
1758
1759 return rc;
1760}
1761
1762static int netdev_nway_reset(struct net_device *dev)
1763{
1764 struct rhine_private *rp = netdev_priv(dev);
1765
1766 return mii_nway_restart(&rp->mii_if);
1767}
1768
1769static u32 netdev_get_link(struct net_device *dev)
1770{
1771 struct rhine_private *rp = netdev_priv(dev);
1772
1773 return mii_link_ok(&rp->mii_if);
1774}
1775
1776static u32 netdev_get_msglevel(struct net_device *dev)
1777{
1778 return debug;
1779}
1780
1781static void netdev_set_msglevel(struct net_device *dev, u32 value)
1782{
1783 debug = value;
1784}
1785
1786static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1787{
1788 struct rhine_private *rp = netdev_priv(dev);
1789
1790 if (!(rp->quirks & rqWOL))
1791 return;
1792
1793 spin_lock_irq(&rp->lock);
1794 wol->supported = WAKE_PHY | WAKE_MAGIC |
1795 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1796 wol->wolopts = rp->wolopts;
1797 spin_unlock_irq(&rp->lock);
1798}
1799
1800static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1801{
1802 struct rhine_private *rp = netdev_priv(dev);
1803 u32 support = WAKE_PHY | WAKE_MAGIC |
1804 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1805
1806 if (!(rp->quirks & rqWOL))
1807 return -EINVAL;
1808
1809 if (wol->wolopts & ~support)
1810 return -EINVAL;
1811
1812 spin_lock_irq(&rp->lock);
1813 rp->wolopts = wol->wolopts;
1814 spin_unlock_irq(&rp->lock);
1815
1816 return 0;
1817}
1818
1819static struct ethtool_ops netdev_ethtool_ops = {
1820 .get_drvinfo = netdev_get_drvinfo,
1821 .get_settings = netdev_get_settings,
1822 .set_settings = netdev_set_settings,
1823 .nway_reset = netdev_nway_reset,
1824 .get_link = netdev_get_link,
1825 .get_msglevel = netdev_get_msglevel,
1826 .set_msglevel = netdev_set_msglevel,
1827 .get_wol = rhine_get_wol,
1828 .set_wol = rhine_set_wol,
1829 .get_sg = ethtool_op_get_sg,
1830 .get_tx_csum = ethtool_op_get_tx_csum,
1831};
1832
1833static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1834{
1835 struct rhine_private *rp = netdev_priv(dev);
1836 int rc;
1837
1838 if (!netif_running(dev))
1839 return -EINVAL;
1840
1841 spin_lock_irq(&rp->lock);
1842 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1843 spin_unlock_irq(&rp->lock);
1844
1845 return rc;
1846}
1847
1848static int rhine_close(struct net_device *dev)
1849{
1850 struct rhine_private *rp = netdev_priv(dev);
1851 void __iomem *ioaddr = rp->base;
1852
1853 spin_lock_irq(&rp->lock);
1854
1855 netif_stop_queue(dev);
1856
1857 if (debug > 1)
1858 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1859 "status was %4.4x.\n",
1860 dev->name, ioread16(ioaddr + ChipCmd));
1861
1862 /* Switch to loopback mode to avoid hardware races. */
1863 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1864
1865 /* Disable interrupts by clearing the interrupt mask. */
1866 iowrite16(0x0000, ioaddr + IntrEnable);
1867
1868 /* Stop the chip's Tx and Rx processes. */
1869 iowrite16(CmdStop, ioaddr + ChipCmd);
1870
1871 spin_unlock_irq(&rp->lock);
1872
1873 free_irq(rp->pdev->irq, dev);
1874 free_rbufs(dev);
1875 free_tbufs(dev);
1876 free_ring(dev);
1877
1878 return 0;
1879}
1880
1881
1882static void __devexit rhine_remove_one(struct pci_dev *pdev)
1883{
1884 struct net_device *dev = pci_get_drvdata(pdev);
1885 struct rhine_private *rp = netdev_priv(dev);
1886
1887 unregister_netdev(dev);
1888
1889 pci_iounmap(pdev, rp->base);
1890 pci_release_regions(pdev);
1891
1892 free_netdev(dev);
1893 pci_disable_device(pdev);
1894 pci_set_drvdata(pdev, NULL);
1895}
1896
1897static void rhine_shutdown (struct device *gendev)
1898{
1899 struct pci_dev *pdev = to_pci_dev(gendev);
1900 struct net_device *dev = pci_get_drvdata(pdev);
1901 struct rhine_private *rp = netdev_priv(dev);
1902 void __iomem *ioaddr = rp->base;
1903
1904 if (!(rp->quirks & rqWOL))
1905 return; /* Nothing to do for non-WOL adapters */
1906
1907 rhine_power_init(dev);
1908
1909 /* Make sure we use pattern 0, 1 and not 4, 5 */
1910 if (rp->quirks & rq6patterns)
1911 iowrite8(0x04, ioaddr + 0xA7);
1912
1913 if (rp->wolopts & WAKE_MAGIC) {
1914 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1915 /*
1916 * Turn EEPROM-controlled wake-up back on -- some hardware may
1917 * not cooperate otherwise.
1918 */
1919 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1920 }
1921
1922 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1923 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1924
1925 if (rp->wolopts & WAKE_PHY)
1926 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1927
1928 if (rp->wolopts & WAKE_UCAST)
1929 iowrite8(WOLucast, ioaddr + WOLcrSet);
1930
1931 if (rp->wolopts) {
1932 /* Enable legacy WOL (for old motherboards) */
1933 iowrite8(0x01, ioaddr + PwcfgSet);
1934 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1935 }
1936
1937 /* Hit power state D3 (sleep) */
1938 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1939
1940 /* TODO: Check use of pci_enable_wake() */
1941
1942}
1943
1944#ifdef CONFIG_PM
1945static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1946{
1947 struct net_device *dev = pci_get_drvdata(pdev);
1948 struct rhine_private *rp = netdev_priv(dev);
1949 unsigned long flags;
1950
1951 if (!netif_running(dev))
1952 return 0;
1953
1954 netif_device_detach(dev);
1955 pci_save_state(pdev);
1956
1957 spin_lock_irqsave(&rp->lock, flags);
1958 rhine_shutdown(&pdev->dev);
1959 spin_unlock_irqrestore(&rp->lock, flags);
1960
1961 free_irq(dev->irq, dev);
1962 return 0;
1963}
1964
1965static int rhine_resume(struct pci_dev *pdev)
1966{
1967 struct net_device *dev = pci_get_drvdata(pdev);
1968 struct rhine_private *rp = netdev_priv(dev);
1969 unsigned long flags;
1970 int ret;
1971
1972 if (!netif_running(dev))
1973 return 0;
1974
1975 if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
1976 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1977
1978 ret = pci_set_power_state(pdev, PCI_D0);
1979 if (debug > 1)
1980 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1981 dev->name, ret ? "failed" : "succeeded", ret);
1982
1983 pci_restore_state(pdev);
1984
1985 spin_lock_irqsave(&rp->lock, flags);
1986#ifdef USE_MMIO
1987 enable_mmio(rp->pioaddr, rp->quirks);
1988#endif
1989 rhine_power_init(dev);
1990 free_tbufs(dev);
1991 free_rbufs(dev);
1992 alloc_tbufs(dev);
1993 alloc_rbufs(dev);
1994 init_registers(dev);
1995 spin_unlock_irqrestore(&rp->lock, flags);
1996
1997 netif_device_attach(dev);
1998
1999 return 0;
2000}
2001#endif /* CONFIG_PM */
2002
2003static struct pci_driver rhine_driver = {
2004 .name = DRV_NAME,
2005 .id_table = rhine_pci_tbl,
2006 .probe = rhine_init_one,
2007 .remove = __devexit_p(rhine_remove_one),
2008#ifdef CONFIG_PM
2009 .suspend = rhine_suspend,
2010 .resume = rhine_resume,
2011#endif /* CONFIG_PM */
2012 .driver = {
2013 .shutdown = rhine_shutdown,
2014 }
2015};
2016
2017
2018static int __init rhine_init(void)
2019{
2020/* when a module, this is printed whether or not devices are found in probe */
2021#ifdef MODULE
2022 printk(version);
2023#endif
2024 return pci_module_init(&rhine_driver);
2025}
2026
2027
2028static void __exit rhine_cleanup(void)
2029{
2030 pci_unregister_driver(&rhine_driver);
2031}
2032
2033
2034module_init(rhine_init);
2035module_exit(rhine_cleanup);