blob: e7b4bc3820e337282fb08adc90ff005e368868ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28
29
30 Linux kernel version history:
31
32 LK1.1.0:
33 - Jeff Garzik: softnet 'n stuff
34
35 LK1.1.1:
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
38
39 LK1.1.2:
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
41
42 LK1.1.3:
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
48
49 LK1.1.4:
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
52
53 LK1.1.5:
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
57
58 LK1.1.6:
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
61
62 LK1.1.7:
63 - Manfred Spraul: added reset into tx_timeout
64
65 LK1.1.9:
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
70
71 LK1.1.10:
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
74
75 LK1.1.11:
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
79
80 LK1.1.12:
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
82
83 LK1.1.13 (jgarzik):
84 - Add ethtool support
85 - Replace some MII-related magic numbers with constants
86
87 LK1.1.14 (Ivan G.):
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
99 (Roger Luethi)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
103
104 LK1.1.15 (jgarzik):
105 - Use new MII lib helper generic_mii_ioctl
106
107 LK1.1.16 (Roger Luethi)
108 - Etherleak fix
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
112 - Various clean ups
113
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
120
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
124
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
127
128 LK1.2.0-2.6 (Roger Luethi)
129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
132
133*/
134
135#define DRV_NAME "via-rhine"
136#define DRV_VERSION "1.2.0-2.6"
137#define DRV_RELDATE "June-10-2004"
138
139
140/* A few user-configurable values.
141 These may be modified when a driver module is loaded. */
142
143static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
144static int max_interrupt_work = 20;
145
146/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
147 Setting to > 1518 effectively disables this feature. */
148static int rx_copybreak;
149
150/*
151 * In case you are looking for 'options[]' or 'full_duplex[]', they
152 * are gone. Use ethtool(8) instead.
153 */
154
155/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
156 The Rhine has a 64 element 8390-like hash table. */
157static const int multicast_filter_limit = 32;
158
159
160/* Operational parameters that are set at compile time. */
161
162/* Keep the ring sizes a power of two for compile efficiency.
163 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
164 Making the Tx ring too large decreases the effectiveness of channel
165 bonding and packet priority.
166 There are no ill effects from too-large receive rings. */
167#define TX_RING_SIZE 16
168#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
169#define RX_RING_SIZE 16
170
171
172/* Operational parameters that usually are not changed. */
173
174/* Time in jiffies before concluding the transmitter is hung. */
175#define TX_TIMEOUT (2*HZ)
176
177#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
178
179#include <linux/module.h>
180#include <linux/moduleparam.h>
181#include <linux/kernel.h>
182#include <linux/string.h>
183#include <linux/timer.h>
184#include <linux/errno.h>
185#include <linux/ioport.h>
186#include <linux/slab.h>
187#include <linux/interrupt.h>
188#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400189#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190#include <linux/netdevice.h>
191#include <linux/etherdevice.h>
192#include <linux/skbuff.h>
193#include <linux/init.h>
194#include <linux/delay.h>
195#include <linux/mii.h>
196#include <linux/ethtool.h>
197#include <linux/crc32.h>
198#include <linux/bitops.h>
199#include <asm/processor.h> /* Processor type for cache alignment. */
200#include <asm/io.h>
201#include <asm/irq.h>
202#include <asm/uaccess.h>
203
204/* These identify the driver base version and may not be removed. */
205static char version[] __devinitdata =
206KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
207
208/* This driver was written to use PCI memory space. Some early versions
209 of the Rhine may only work correctly with I/O space accesses. */
210#ifdef CONFIG_VIA_RHINE_MMIO
211#define USE_MMIO
212#else
213#endif
214
215MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
216MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
217MODULE_LICENSE("GPL");
218
219module_param(max_interrupt_work, int, 0);
220module_param(debug, int, 0);
221module_param(rx_copybreak, int, 0);
222MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
223MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
224MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
225
226/*
227 Theory of Operation
228
229I. Board Compatibility
230
231This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
232controller.
233
234II. Board-specific settings
235
236Boards with this chip are functional only in a bus-master PCI slot.
237
238Many operational settings are loaded from the EEPROM to the Config word at
239offset 0x78. For most of these settings, this driver assumes that they are
240correct.
241If this driver is compiled to use PCI memory space operations the EEPROM
242must be configured to enable memory ops.
243
244III. Driver operation
245
246IIIa. Ring buffers
247
248This driver uses two statically allocated fixed-size descriptor lists
249formed into rings by a branch from the final descriptor to the beginning of
250the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
251
252IIIb/c. Transmit/Receive Structure
253
254This driver attempts to use a zero-copy receive and transmit scheme.
255
256Alas, all data buffers are required to start on a 32 bit boundary, so
257the driver must often copy transmit packets into bounce buffers.
258
259The driver allocates full frame size skbuffs for the Rx ring buffers at
260open() time and passes the skb->data field to the chip as receive data
261buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
262a fresh skbuff is allocated and the frame is copied to the new skbuff.
263When the incoming frame is larger, the skbuff is passed directly up the
264protocol stack. Buffers consumed this way are replaced by newly allocated
265skbuffs in the last phase of rhine_rx().
266
267The RX_COPYBREAK value is chosen to trade-off the memory wasted by
268using a full-sized skbuff for small frames vs. the copying costs of larger
269frames. New boards are typically used in generously configured machines
270and the underfilled buffers have negligible impact compared to the benefit of
271a single allocation size, so the default value of zero results in never
272copying packets. When copying is done, the cost is usually mitigated by using
273a combined copy/checksum routine. Copying also preloads the cache, which is
274most useful with small frames.
275
276Since the VIA chips are only able to transfer data to buffers on 32 bit
277boundaries, the IP header at offset 14 in an ethernet frame isn't
278longword aligned for further processing. Copying these unaligned buffers
279has the beneficial effect of 16-byte aligning the IP header.
280
281IIId. Synchronization
282
283The driver runs as two independent, single-threaded flows of control. One
284is the send-packet routine, which enforces single-threaded use by the
285dev->priv->lock spinlock. The other thread is the interrupt handler, which
286is single threaded by the hardware and interrupt handling software.
287
288The send packet thread has partial control over the Tx ring. It locks the
289dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
290is not available it stops the transmit queue by calling netif_stop_queue.
291
292The interrupt handler has exclusive control over the Rx ring and records stats
293from the Tx ring. After reaping the stats, it marks the Tx queue entry as
294empty by incrementing the dirty_tx mark. If at least half of the entries in
295the Rx ring are available the transmit queue is woken up if it was stopped.
296
297IV. Notes
298
299IVb. References
300
301Preliminary VT86C100A manual from http://www.via.com.tw/
302http://www.scyld.com/expert/100mbps.html
303http://www.scyld.com/expert/NWay.html
304ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
305ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
306
307
308IVc. Errata
309
310The VT86C100A manual is not reliable information.
311The 3043 chip does not handle unaligned transmit or receive buffers, resulting
312in significant performance degradation for bounce buffer copies on transmit
313and unaligned IP headers on receive.
314The chip does not pad to minimum transmit length.
315
316*/
317
318
319/* This table drives the PCI probe routines. It's mostly boilerplate in all
320 of the drivers, and will likely be provided by some future kernel.
321 Note the matching code -- the first table entry matchs all 56** cards but
322 second only the 1234 card.
323*/
324
325enum rhine_revs {
326 VT86C100A = 0x00,
327 VTunknown0 = 0x20,
328 VT6102 = 0x40,
329 VT8231 = 0x50, /* Integrated MAC */
330 VT8233 = 0x60, /* Integrated MAC */
331 VT8235 = 0x74, /* Integrated MAC */
332 VT8237 = 0x78, /* Integrated MAC */
333 VTunknown1 = 0x7C,
334 VT6105 = 0x80,
335 VT6105_B0 = 0x83,
336 VT6105L = 0x8A,
337 VT6107 = 0x8C,
338 VTunknown2 = 0x8E,
339 VT6105M = 0x90, /* Management adapter */
340};
341
342enum rhine_quirks {
343 rqWOL = 0x0001, /* Wake-On-LAN support */
344 rqForceReset = 0x0002,
345 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
346 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
347 rqRhineI = 0x0100, /* See comment below */
348};
349/*
350 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
351 * MMIO as well as for the collision counter and the Tx FIFO underflow
352 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
353 */
354
355/* Beware of PCI posted writes */
356#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
357
358static struct pci_device_id rhine_pci_tbl[] =
359{
360 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
361 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
362 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
363 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
364 { } /* terminate list */
365};
366MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
367
368
369/* Offsets to the device registers. */
370enum register_offsets {
371 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
372 ChipCmd1=0x09,
373 IntrStatus=0x0C, IntrEnable=0x0E,
374 MulticastFilter0=0x10, MulticastFilter1=0x14,
375 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
376 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
377 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
378 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
379 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
380 StickyHW=0x83, IntrStatus2=0x84,
381 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
382 WOLcrClr1=0xA6, WOLcgClr=0xA7,
383 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
384};
385
386/* Bits in ConfigD */
387enum backoff_bits {
388 BackOptional=0x01, BackModify=0x02,
389 BackCaptureEffect=0x04, BackRandom=0x08
390};
391
392#ifdef USE_MMIO
393/* Registers we check that mmio and reg are the same. */
394static const int mmio_verify_registers[] = {
395 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
396 0
397};
398#endif
399
400/* Bits in the interrupt status/mask registers. */
401enum intr_status_bits {
402 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
403 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
404 IntrPCIErr=0x0040,
405 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
406 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
407 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
408 IntrRxWakeUp=0x8000,
409 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
410 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
411 IntrTxErrSummary=0x082218,
412};
413
414/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
415enum wol_bits {
416 WOLucast = 0x10,
417 WOLmagic = 0x20,
418 WOLbmcast = 0x30,
419 WOLlnkon = 0x40,
420 WOLlnkoff = 0x80,
421};
422
423/* The Rx and Tx buffer descriptors. */
424struct rx_desc {
425 s32 rx_status;
426 u32 desc_length; /* Chain flag, Buffer/frame length */
427 u32 addr;
428 u32 next_desc;
429};
430struct tx_desc {
431 s32 tx_status;
432 u32 desc_length; /* Chain flag, Tx Config, Frame length */
433 u32 addr;
434 u32 next_desc;
435};
436
437/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
438#define TXDESC 0x00e08000
439
440enum rx_status_bits {
441 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
442};
443
444/* Bits in *_desc.*_status */
445enum desc_status_bits {
446 DescOwn=0x80000000
447};
448
449/* Bits in ChipCmd. */
450enum chip_cmd_bits {
451 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
452 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
453 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
454 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
455};
456
457struct rhine_private {
458 /* Descriptor rings */
459 struct rx_desc *rx_ring;
460 struct tx_desc *tx_ring;
461 dma_addr_t rx_ring_dma;
462 dma_addr_t tx_ring_dma;
463
464 /* The addresses of receive-in-place skbuffs. */
465 struct sk_buff *rx_skbuff[RX_RING_SIZE];
466 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
467
468 /* The saved address of a sent-in-place packet/buffer, for later free(). */
469 struct sk_buff *tx_skbuff[TX_RING_SIZE];
470 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
471
472 /* Tx bounce buffers */
473 unsigned char *tx_buf[TX_RING_SIZE];
474 unsigned char *tx_bufs;
475 dma_addr_t tx_bufs_dma;
476
477 struct pci_dev *pdev;
478 long pioaddr;
479 struct net_device_stats stats;
480 spinlock_t lock;
481
482 /* Frequently used values: keep some adjacent for cache effect. */
483 u32 quirks;
484 struct rx_desc *rx_head_desc;
485 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
486 unsigned int cur_tx, dirty_tx;
487 unsigned int rx_buf_sz; /* Based on MTU+slack. */
488 u8 wolopts;
489
490 u8 tx_thresh, rx_thresh;
491
492 struct mii_if_info mii_if;
493 void __iomem *base;
494};
495
496static int mdio_read(struct net_device *dev, int phy_id, int location);
497static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
498static int rhine_open(struct net_device *dev);
499static void rhine_tx_timeout(struct net_device *dev);
500static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
501static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
502static void rhine_tx(struct net_device *dev);
503static void rhine_rx(struct net_device *dev);
504static void rhine_error(struct net_device *dev, int intr_status);
505static void rhine_set_rx_mode(struct net_device *dev);
506static struct net_device_stats *rhine_get_stats(struct net_device *dev);
507static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
508static struct ethtool_ops netdev_ethtool_ops;
509static int rhine_close(struct net_device *dev);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -0700510static void rhine_shutdown (struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
512#define RHINE_WAIT_FOR(condition) do { \
513 int i=1024; \
514 while (!(condition) && --i) \
515 ; \
516 if (debug > 1 && i < 512) \
517 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
518 DRV_NAME, 1024-i, __func__, __LINE__); \
519} while(0)
520
521static inline u32 get_intr_status(struct net_device *dev)
522{
523 struct rhine_private *rp = netdev_priv(dev);
524 void __iomem *ioaddr = rp->base;
525 u32 intr_status;
526
527 intr_status = ioread16(ioaddr + IntrStatus);
528 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
529 if (rp->quirks & rqStatusWBRace)
530 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
531 return intr_status;
532}
533
534/*
535 * Get power related registers into sane state.
536 * Notify user about past WOL event.
537 */
538static void rhine_power_init(struct net_device *dev)
539{
540 struct rhine_private *rp = netdev_priv(dev);
541 void __iomem *ioaddr = rp->base;
542 u16 wolstat;
543
544 if (rp->quirks & rqWOL) {
545 /* Make sure chip is in power state D0 */
546 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
547
548 /* Disable "force PME-enable" */
549 iowrite8(0x80, ioaddr + WOLcgClr);
550
551 /* Clear power-event config bits (WOL) */
552 iowrite8(0xFF, ioaddr + WOLcrClr);
553 /* More recent cards can manage two additional patterns */
554 if (rp->quirks & rq6patterns)
555 iowrite8(0x03, ioaddr + WOLcrClr1);
556
557 /* Save power-event status bits */
558 wolstat = ioread8(ioaddr + PwrcsrSet);
559 if (rp->quirks & rq6patterns)
560 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
561
562 /* Clear power-event status bits */
563 iowrite8(0xFF, ioaddr + PwrcsrClr);
564 if (rp->quirks & rq6patterns)
565 iowrite8(0x03, ioaddr + PwrcsrClr1);
566
567 if (wolstat) {
568 char *reason;
569 switch (wolstat) {
570 case WOLmagic:
571 reason = "Magic packet";
572 break;
573 case WOLlnkon:
574 reason = "Link went up";
575 break;
576 case WOLlnkoff:
577 reason = "Link went down";
578 break;
579 case WOLucast:
580 reason = "Unicast packet";
581 break;
582 case WOLbmcast:
583 reason = "Multicast/broadcast packet";
584 break;
585 default:
586 reason = "Unknown";
587 }
588 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
589 DRV_NAME, reason);
590 }
591 }
592}
593
594static void rhine_chip_reset(struct net_device *dev)
595{
596 struct rhine_private *rp = netdev_priv(dev);
597 void __iomem *ioaddr = rp->base;
598
599 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
600 IOSYNC;
601
602 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
603 printk(KERN_INFO "%s: Reset not complete yet. "
604 "Trying harder.\n", DRV_NAME);
605
606 /* Force reset */
607 if (rp->quirks & rqForceReset)
608 iowrite8(0x40, ioaddr + MiscCmd);
609
610 /* Reset can take somewhat longer (rare) */
611 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
612 }
613
614 if (debug > 1)
615 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
616 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
617 "failed" : "succeeded");
618}
619
620#ifdef USE_MMIO
621static void enable_mmio(long pioaddr, u32 quirks)
622{
623 int n;
624 if (quirks & rqRhineI) {
625 /* More recent docs say that this bit is reserved ... */
626 n = inb(pioaddr + ConfigA) | 0x20;
627 outb(n, pioaddr + ConfigA);
628 } else {
629 n = inb(pioaddr + ConfigD) | 0x80;
630 outb(n, pioaddr + ConfigD);
631 }
632}
633#endif
634
635/*
636 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
637 * (plus 0x6C for Rhine-I/II)
638 */
639static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
640{
641 struct rhine_private *rp = netdev_priv(dev);
642 void __iomem *ioaddr = rp->base;
643
644 outb(0x20, pioaddr + MACRegEEcsr);
645 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
646
647#ifdef USE_MMIO
648 /*
649 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
650 * MMIO. If reloading EEPROM was done first this could be avoided, but
651 * it is not known if that still works with the "win98-reboot" problem.
652 */
653 enable_mmio(pioaddr, rp->quirks);
654#endif
655
656 /* Turn off EEPROM-controlled wake-up (magic packet) */
657 if (rp->quirks & rqWOL)
658 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
659
660}
661
662#ifdef CONFIG_NET_POLL_CONTROLLER
663static void rhine_poll(struct net_device *dev)
664{
665 disable_irq(dev->irq);
666 rhine_interrupt(dev->irq, (void *)dev, NULL);
667 enable_irq(dev->irq);
668}
669#endif
670
671static void rhine_hw_init(struct net_device *dev, long pioaddr)
672{
673 struct rhine_private *rp = netdev_priv(dev);
674
675 /* Reset the chip to erase previous misconfiguration. */
676 rhine_chip_reset(dev);
677
678 /* Rhine-I needs extra time to recuperate before EEPROM reload */
679 if (rp->quirks & rqRhineI)
680 msleep(5);
681
682 /* Reload EEPROM controlled bytes cleared by soft reset */
683 rhine_reload_eeprom(pioaddr, dev);
684}
685
686static int __devinit rhine_init_one(struct pci_dev *pdev,
687 const struct pci_device_id *ent)
688{
689 struct net_device *dev;
690 struct rhine_private *rp;
691 int i, rc;
692 u8 pci_rev;
693 u32 quirks;
694 long pioaddr;
695 long memaddr;
696 void __iomem *ioaddr;
697 int io_size, phy_id;
698 const char *name;
699#ifdef USE_MMIO
700 int bar = 1;
701#else
702 int bar = 0;
703#endif
704
705/* when built into the kernel, we only print version if device is found */
706#ifndef MODULE
707 static int printed_version;
708 if (!printed_version++)
709 printk(version);
710#endif
711
712 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
713
714 io_size = 256;
715 phy_id = 0;
716 quirks = 0;
717 name = "Rhine";
718 if (pci_rev < VTunknown0) {
719 quirks = rqRhineI;
720 io_size = 128;
721 }
722 else if (pci_rev >= VT6102) {
723 quirks = rqWOL | rqForceReset;
724 if (pci_rev < VT6105) {
725 name = "Rhine II";
726 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
727 }
728 else {
729 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
730 if (pci_rev >= VT6105_B0)
731 quirks |= rq6patterns;
732 if (pci_rev < VT6105M)
733 name = "Rhine III";
734 else
735 name = "Rhine III (Management Adapter)";
736 }
737 }
738
739 rc = pci_enable_device(pdev);
740 if (rc)
741 goto err_out;
742
743 /* this should always be supported */
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400744 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 if (rc) {
746 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
747 "the card!?\n");
748 goto err_out;
749 }
750
751 /* sanity check */
752 if ((pci_resource_len(pdev, 0) < io_size) ||
753 (pci_resource_len(pdev, 1) < io_size)) {
754 rc = -EIO;
755 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
756 goto err_out;
757 }
758
759 pioaddr = pci_resource_start(pdev, 0);
760 memaddr = pci_resource_start(pdev, 1);
761
762 pci_set_master(pdev);
763
764 dev = alloc_etherdev(sizeof(struct rhine_private));
765 if (!dev) {
766 rc = -ENOMEM;
767 printk(KERN_ERR "alloc_etherdev failed\n");
768 goto err_out;
769 }
770 SET_MODULE_OWNER(dev);
771 SET_NETDEV_DEV(dev, &pdev->dev);
772
773 rp = netdev_priv(dev);
774 rp->quirks = quirks;
775 rp->pioaddr = pioaddr;
776 rp->pdev = pdev;
777
778 rc = pci_request_regions(pdev, DRV_NAME);
779 if (rc)
780 goto err_out_free_netdev;
781
782 ioaddr = pci_iomap(pdev, bar, io_size);
783 if (!ioaddr) {
784 rc = -EIO;
785 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
786 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
787 goto err_out_free_res;
788 }
789
790#ifdef USE_MMIO
791 enable_mmio(pioaddr, quirks);
792
793 /* Check that selected MMIO registers match the PIO ones */
794 i = 0;
795 while (mmio_verify_registers[i]) {
796 int reg = mmio_verify_registers[i++];
797 unsigned char a = inb(pioaddr+reg);
798 unsigned char b = readb(ioaddr+reg);
799 if (a != b) {
800 rc = -EIO;
801 printk(KERN_ERR "MMIO do not match PIO [%02x] "
802 "(%02x != %02x)\n", reg, a, b);
803 goto err_out_unmap;
804 }
805 }
806#endif /* USE_MMIO */
807
808 dev->base_addr = (unsigned long)ioaddr;
809 rp->base = ioaddr;
810
811 /* Get chip registers into a sane state */
812 rhine_power_init(dev);
813 rhine_hw_init(dev, pioaddr);
814
815 for (i = 0; i < 6; i++)
816 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400817 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400819 if (!is_valid_ether_addr(dev->perm_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 rc = -EIO;
821 printk(KERN_ERR "Invalid MAC address\n");
822 goto err_out_unmap;
823 }
824
825 /* For Rhine-I/II, phy_id is loaded from EEPROM */
826 if (!phy_id)
827 phy_id = ioread8(ioaddr + 0x6C);
828
829 dev->irq = pdev->irq;
830
831 spin_lock_init(&rp->lock);
832 rp->mii_if.dev = dev;
833 rp->mii_if.mdio_read = mdio_read;
834 rp->mii_if.mdio_write = mdio_write;
835 rp->mii_if.phy_id_mask = 0x1f;
836 rp->mii_if.reg_num_mask = 0x1f;
837
838 /* The chip-specific entries in the device structure. */
839 dev->open = rhine_open;
840 dev->hard_start_xmit = rhine_start_tx;
841 dev->stop = rhine_close;
842 dev->get_stats = rhine_get_stats;
843 dev->set_multicast_list = rhine_set_rx_mode;
844 dev->do_ioctl = netdev_ioctl;
845 dev->ethtool_ops = &netdev_ethtool_ops;
846 dev->tx_timeout = rhine_tx_timeout;
847 dev->watchdog_timeo = TX_TIMEOUT;
848#ifdef CONFIG_NET_POLL_CONTROLLER
849 dev->poll_controller = rhine_poll;
850#endif
851 if (rp->quirks & rqRhineI)
852 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
853
854 /* dev->name not defined before register_netdev()! */
855 rc = register_netdev(dev);
856 if (rc)
857 goto err_out_unmap;
858
859 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
860 dev->name, name,
861#ifdef USE_MMIO
862 memaddr
863#else
864 (long)ioaddr
865#endif
866 );
867
868 for (i = 0; i < 5; i++)
869 printk("%2.2x:", dev->dev_addr[i]);
870 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
871
872 pci_set_drvdata(pdev, dev);
873
874 {
875 u16 mii_cmd;
876 int mii_status = mdio_read(dev, phy_id, 1);
877 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
878 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
879 if (mii_status != 0xffff && mii_status != 0x0000) {
880 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
881 printk(KERN_INFO "%s: MII PHY found at address "
882 "%d, status 0x%4.4x advertising %4.4x "
883 "Link %4.4x.\n", dev->name, phy_id,
884 mii_status, rp->mii_if.advertising,
885 mdio_read(dev, phy_id, 5));
886
887 /* set IFF_RUNNING */
888 if (mii_status & BMSR_LSTATUS)
889 netif_carrier_on(dev);
890 else
891 netif_carrier_off(dev);
892
893 }
894 }
895 rp->mii_if.phy_id = phy_id;
896
897 return 0;
898
899err_out_unmap:
900 pci_iounmap(pdev, ioaddr);
901err_out_free_res:
902 pci_release_regions(pdev);
903err_out_free_netdev:
904 free_netdev(dev);
905err_out:
906 return rc;
907}
908
909static int alloc_ring(struct net_device* dev)
910{
911 struct rhine_private *rp = netdev_priv(dev);
912 void *ring;
913 dma_addr_t ring_dma;
914
915 ring = pci_alloc_consistent(rp->pdev,
916 RX_RING_SIZE * sizeof(struct rx_desc) +
917 TX_RING_SIZE * sizeof(struct tx_desc),
918 &ring_dma);
919 if (!ring) {
920 printk(KERN_ERR "Could not allocate DMA memory.\n");
921 return -ENOMEM;
922 }
923 if (rp->quirks & rqRhineI) {
924 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
925 PKT_BUF_SZ * TX_RING_SIZE,
926 &rp->tx_bufs_dma);
927 if (rp->tx_bufs == NULL) {
928 pci_free_consistent(rp->pdev,
929 RX_RING_SIZE * sizeof(struct rx_desc) +
930 TX_RING_SIZE * sizeof(struct tx_desc),
931 ring, ring_dma);
932 return -ENOMEM;
933 }
934 }
935
936 rp->rx_ring = ring;
937 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
938 rp->rx_ring_dma = ring_dma;
939 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
940
941 return 0;
942}
943
944static void free_ring(struct net_device* dev)
945{
946 struct rhine_private *rp = netdev_priv(dev);
947
948 pci_free_consistent(rp->pdev,
949 RX_RING_SIZE * sizeof(struct rx_desc) +
950 TX_RING_SIZE * sizeof(struct tx_desc),
951 rp->rx_ring, rp->rx_ring_dma);
952 rp->tx_ring = NULL;
953
954 if (rp->tx_bufs)
955 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
956 rp->tx_bufs, rp->tx_bufs_dma);
957
958 rp->tx_bufs = NULL;
959
960}
961
962static void alloc_rbufs(struct net_device *dev)
963{
964 struct rhine_private *rp = netdev_priv(dev);
965 dma_addr_t next;
966 int i;
967
968 rp->dirty_rx = rp->cur_rx = 0;
969
970 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
971 rp->rx_head_desc = &rp->rx_ring[0];
972 next = rp->rx_ring_dma;
973
974 /* Init the ring entries */
975 for (i = 0; i < RX_RING_SIZE; i++) {
976 rp->rx_ring[i].rx_status = 0;
977 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
978 next += sizeof(struct rx_desc);
979 rp->rx_ring[i].next_desc = cpu_to_le32(next);
980 rp->rx_skbuff[i] = NULL;
981 }
982 /* Mark the last entry as wrapping the ring. */
983 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
984
985 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
986 for (i = 0; i < RX_RING_SIZE; i++) {
987 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
988 rp->rx_skbuff[i] = skb;
989 if (skb == NULL)
990 break;
991 skb->dev = dev; /* Mark as being used by this device. */
992
993 rp->rx_skbuff_dma[i] =
David S. Miller689be432005-06-28 15:25:31 -0700994 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 PCI_DMA_FROMDEVICE);
996
997 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
998 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
999 }
1000 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1001}
1002
1003static void free_rbufs(struct net_device* dev)
1004{
1005 struct rhine_private *rp = netdev_priv(dev);
1006 int i;
1007
1008 /* Free all the skbuffs in the Rx queue. */
1009 for (i = 0; i < RX_RING_SIZE; i++) {
1010 rp->rx_ring[i].rx_status = 0;
1011 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1012 if (rp->rx_skbuff[i]) {
1013 pci_unmap_single(rp->pdev,
1014 rp->rx_skbuff_dma[i],
1015 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1016 dev_kfree_skb(rp->rx_skbuff[i]);
1017 }
1018 rp->rx_skbuff[i] = NULL;
1019 }
1020}
1021
1022static void alloc_tbufs(struct net_device* dev)
1023{
1024 struct rhine_private *rp = netdev_priv(dev);
1025 dma_addr_t next;
1026 int i;
1027
1028 rp->dirty_tx = rp->cur_tx = 0;
1029 next = rp->tx_ring_dma;
1030 for (i = 0; i < TX_RING_SIZE; i++) {
1031 rp->tx_skbuff[i] = NULL;
1032 rp->tx_ring[i].tx_status = 0;
1033 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1034 next += sizeof(struct tx_desc);
1035 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1036 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1037 }
1038 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1039
1040}
1041
1042static void free_tbufs(struct net_device* dev)
1043{
1044 struct rhine_private *rp = netdev_priv(dev);
1045 int i;
1046
1047 for (i = 0; i < TX_RING_SIZE; i++) {
1048 rp->tx_ring[i].tx_status = 0;
1049 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1050 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1051 if (rp->tx_skbuff[i]) {
1052 if (rp->tx_skbuff_dma[i]) {
1053 pci_unmap_single(rp->pdev,
1054 rp->tx_skbuff_dma[i],
1055 rp->tx_skbuff[i]->len,
1056 PCI_DMA_TODEVICE);
1057 }
1058 dev_kfree_skb(rp->tx_skbuff[i]);
1059 }
1060 rp->tx_skbuff[i] = NULL;
1061 rp->tx_buf[i] = NULL;
1062 }
1063}
1064
1065static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1066{
1067 struct rhine_private *rp = netdev_priv(dev);
1068 void __iomem *ioaddr = rp->base;
1069
1070 mii_check_media(&rp->mii_if, debug, init_media);
1071
1072 if (rp->mii_if.full_duplex)
1073 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1074 ioaddr + ChipCmd1);
1075 else
1076 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1077 ioaddr + ChipCmd1);
1078}
1079
1080static void init_registers(struct net_device *dev)
1081{
1082 struct rhine_private *rp = netdev_priv(dev);
1083 void __iomem *ioaddr = rp->base;
1084 int i;
1085
1086 for (i = 0; i < 6; i++)
1087 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1088
1089 /* Initialize other registers. */
1090 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1091 /* Configure initial FIFO thresholds. */
1092 iowrite8(0x20, ioaddr + TxConfig);
1093 rp->tx_thresh = 0x20;
1094 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1095
1096 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1097 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1098
1099 rhine_set_rx_mode(dev);
1100
1101 /* Enable interrupts by setting the interrupt mask. */
1102 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1103 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1104 IntrTxDone | IntrTxError | IntrTxUnderrun |
1105 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1106 ioaddr + IntrEnable);
1107
1108 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1109 ioaddr + ChipCmd);
1110 rhine_check_media(dev, 1);
1111}
1112
1113/* Enable MII link status auto-polling (required for IntrLinkChange) */
1114static void rhine_enable_linkmon(void __iomem *ioaddr)
1115{
1116 iowrite8(0, ioaddr + MIICmd);
1117 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1118 iowrite8(0x80, ioaddr + MIICmd);
1119
1120 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1121
1122 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1123}
1124
1125/* Disable MII link status auto-polling (required for MDIO access) */
1126static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1127{
1128 iowrite8(0, ioaddr + MIICmd);
1129
1130 if (quirks & rqRhineI) {
1131 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1132
1133 /* Can be called from ISR. Evil. */
1134 mdelay(1);
1135
1136 /* 0x80 must be set immediately before turning it off */
1137 iowrite8(0x80, ioaddr + MIICmd);
1138
1139 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1140
1141 /* Heh. Now clear 0x80 again. */
1142 iowrite8(0, ioaddr + MIICmd);
1143 }
1144 else
1145 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1146}
1147
1148/* Read and write over the MII Management Data I/O (MDIO) interface. */
1149
1150static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1151{
1152 struct rhine_private *rp = netdev_priv(dev);
1153 void __iomem *ioaddr = rp->base;
1154 int result;
1155
1156 rhine_disable_linkmon(ioaddr, rp->quirks);
1157
1158 /* rhine_disable_linkmon already cleared MIICmd */
1159 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1160 iowrite8(regnum, ioaddr + MIIRegAddr);
1161 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1162 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1163 result = ioread16(ioaddr + MIIData);
1164
1165 rhine_enable_linkmon(ioaddr);
1166 return result;
1167}
1168
1169static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1170{
1171 struct rhine_private *rp = netdev_priv(dev);
1172 void __iomem *ioaddr = rp->base;
1173
1174 rhine_disable_linkmon(ioaddr, rp->quirks);
1175
1176 /* rhine_disable_linkmon already cleared MIICmd */
1177 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1178 iowrite8(regnum, ioaddr + MIIRegAddr);
1179 iowrite16(value, ioaddr + MIIData);
1180 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1181 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1182
1183 rhine_enable_linkmon(ioaddr);
1184}
1185
1186static int rhine_open(struct net_device *dev)
1187{
1188 struct rhine_private *rp = netdev_priv(dev);
1189 void __iomem *ioaddr = rp->base;
1190 int rc;
1191
1192 rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1193 dev);
1194 if (rc)
1195 return rc;
1196
1197 if (debug > 1)
1198 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1199 dev->name, rp->pdev->irq);
1200
1201 rc = alloc_ring(dev);
1202 if (rc) {
1203 free_irq(rp->pdev->irq, dev);
1204 return rc;
1205 }
1206 alloc_rbufs(dev);
1207 alloc_tbufs(dev);
1208 rhine_chip_reset(dev);
1209 init_registers(dev);
1210 if (debug > 2)
1211 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1212 "MII status: %4.4x.\n",
1213 dev->name, ioread16(ioaddr + ChipCmd),
1214 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1215
1216 netif_start_queue(dev);
1217
1218 return 0;
1219}
1220
1221static void rhine_tx_timeout(struct net_device *dev)
1222{
1223 struct rhine_private *rp = netdev_priv(dev);
1224 void __iomem *ioaddr = rp->base;
1225
1226 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1227 "%4.4x, resetting...\n",
1228 dev->name, ioread16(ioaddr + IntrStatus),
1229 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1230
1231 /* protect against concurrent rx interrupts */
1232 disable_irq(rp->pdev->irq);
1233
1234 spin_lock(&rp->lock);
1235
1236 /* clear all descriptors */
1237 free_tbufs(dev);
1238 free_rbufs(dev);
1239 alloc_tbufs(dev);
1240 alloc_rbufs(dev);
1241
1242 /* Reinitialize the hardware. */
1243 rhine_chip_reset(dev);
1244 init_registers(dev);
1245
1246 spin_unlock(&rp->lock);
1247 enable_irq(rp->pdev->irq);
1248
1249 dev->trans_start = jiffies;
1250 rp->stats.tx_errors++;
1251 netif_wake_queue(dev);
1252}
1253
1254static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1255{
1256 struct rhine_private *rp = netdev_priv(dev);
1257 void __iomem *ioaddr = rp->base;
1258 unsigned entry;
1259
1260 /* Caution: the write order is important here, set the field
1261 with the "ownership" bits last. */
1262
1263 /* Calculate the next Tx descriptor entry. */
1264 entry = rp->cur_tx % TX_RING_SIZE;
1265
1266 if (skb->len < ETH_ZLEN) {
1267 skb = skb_padto(skb, ETH_ZLEN);
1268 if (skb == NULL)
1269 return 0;
1270 }
1271
1272 rp->tx_skbuff[entry] = skb;
1273
1274 if ((rp->quirks & rqRhineI) &&
1275 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1276 /* Must use alignment buffer. */
1277 if (skb->len > PKT_BUF_SZ) {
1278 /* packet too long, drop it */
1279 dev_kfree_skb(skb);
1280 rp->tx_skbuff[entry] = NULL;
1281 rp->stats.tx_dropped++;
1282 return 0;
1283 }
1284 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1285 rp->tx_skbuff_dma[entry] = 0;
1286 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1287 (rp->tx_buf[entry] -
1288 rp->tx_bufs));
1289 } else {
1290 rp->tx_skbuff_dma[entry] =
1291 pci_map_single(rp->pdev, skb->data, skb->len,
1292 PCI_DMA_TODEVICE);
1293 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1294 }
1295
1296 rp->tx_ring[entry].desc_length =
1297 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1298
1299 /* lock eth irq */
1300 spin_lock_irq(&rp->lock);
1301 wmb();
1302 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1303 wmb();
1304
1305 rp->cur_tx++;
1306
1307 /* Non-x86 Todo: explicitly flush cache lines here. */
1308
1309 /* Wake the potentially-idle transmit channel */
1310 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1311 ioaddr + ChipCmd1);
1312 IOSYNC;
1313
1314 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1315 netif_stop_queue(dev);
1316
1317 dev->trans_start = jiffies;
1318
1319 spin_unlock_irq(&rp->lock);
1320
1321 if (debug > 4) {
1322 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1323 dev->name, rp->cur_tx-1, entry);
1324 }
1325 return 0;
1326}
1327
1328/* The interrupt handler does all of the Rx thread work and cleans up
1329 after the Tx thread. */
1330static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1331{
1332 struct net_device *dev = dev_instance;
1333 struct rhine_private *rp = netdev_priv(dev);
1334 void __iomem *ioaddr = rp->base;
1335 u32 intr_status;
1336 int boguscnt = max_interrupt_work;
1337 int handled = 0;
1338
1339 while ((intr_status = get_intr_status(dev))) {
1340 handled = 1;
1341
1342 /* Acknowledge all of the current interrupt sources ASAP. */
1343 if (intr_status & IntrTxDescRace)
1344 iowrite8(0x08, ioaddr + IntrStatus2);
1345 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1346 IOSYNC;
1347
1348 if (debug > 4)
1349 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1350 dev->name, intr_status);
1351
1352 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1353 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1354 rhine_rx(dev);
1355
1356 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1357 if (intr_status & IntrTxErrSummary) {
1358 /* Avoid scavenging before Tx engine turned off */
1359 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1360 if (debug > 2 &&
1361 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1362 printk(KERN_WARNING "%s: "
1363 "rhine_interrupt() Tx engine"
1364 "still on.\n", dev->name);
1365 }
1366 rhine_tx(dev);
1367 }
1368
1369 /* Abnormal error summary/uncommon events handlers. */
1370 if (intr_status & (IntrPCIErr | IntrLinkChange |
1371 IntrStatsMax | IntrTxError | IntrTxAborted |
1372 IntrTxUnderrun | IntrTxDescRace))
1373 rhine_error(dev, intr_status);
1374
1375 if (--boguscnt < 0) {
1376 printk(KERN_WARNING "%s: Too much work at interrupt, "
1377 "status=%#8.8x.\n",
1378 dev->name, intr_status);
1379 break;
1380 }
1381 }
1382
1383 if (debug > 3)
1384 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1385 dev->name, ioread16(ioaddr + IntrStatus));
1386 return IRQ_RETVAL(handled);
1387}
1388
1389/* This routine is logically part of the interrupt handler, but isolated
1390 for clarity. */
1391static void rhine_tx(struct net_device *dev)
1392{
1393 struct rhine_private *rp = netdev_priv(dev);
1394 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1395
1396 spin_lock(&rp->lock);
1397
1398 /* find and cleanup dirty tx descriptors */
1399 while (rp->dirty_tx != rp->cur_tx) {
1400 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1401 if (debug > 6)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001402 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 entry, txstatus);
1404 if (txstatus & DescOwn)
1405 break;
1406 if (txstatus & 0x8000) {
1407 if (debug > 1)
1408 printk(KERN_DEBUG "%s: Transmit error, "
1409 "Tx status %8.8x.\n",
1410 dev->name, txstatus);
1411 rp->stats.tx_errors++;
1412 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1413 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1414 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1415 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1416 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1417 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1418 rp->stats.tx_fifo_errors++;
1419 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1420 break; /* Keep the skb - we try again */
1421 }
1422 /* Transmitter restarted in 'abnormal' handler. */
1423 } else {
1424 if (rp->quirks & rqRhineI)
1425 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1426 else
1427 rp->stats.collisions += txstatus & 0x0F;
1428 if (debug > 6)
1429 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1430 (txstatus >> 3) & 0xF,
1431 txstatus & 0xF);
1432 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1433 rp->stats.tx_packets++;
1434 }
1435 /* Free the original skb. */
1436 if (rp->tx_skbuff_dma[entry]) {
1437 pci_unmap_single(rp->pdev,
1438 rp->tx_skbuff_dma[entry],
1439 rp->tx_skbuff[entry]->len,
1440 PCI_DMA_TODEVICE);
1441 }
1442 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1443 rp->tx_skbuff[entry] = NULL;
1444 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1445 }
1446 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1447 netif_wake_queue(dev);
1448
1449 spin_unlock(&rp->lock);
1450}
1451
1452/* This routine is logically part of the interrupt handler, but isolated
1453 for clarity and better register allocation. */
1454static void rhine_rx(struct net_device *dev)
1455{
1456 struct rhine_private *rp = netdev_priv(dev);
1457 int entry = rp->cur_rx % RX_RING_SIZE;
1458 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1459
1460 if (debug > 4) {
1461 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1462 dev->name, entry,
1463 le32_to_cpu(rp->rx_head_desc->rx_status));
1464 }
1465
1466 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1467 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1468 struct rx_desc *desc = rp->rx_head_desc;
1469 u32 desc_status = le32_to_cpu(desc->rx_status);
1470 int data_size = desc_status >> 16;
1471
1472 if (debug > 4)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001473 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 desc_status);
1475 if (--boguscnt < 0)
1476 break;
1477 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1478 if ((desc_status & RxWholePkt) != RxWholePkt) {
1479 printk(KERN_WARNING "%s: Oversized Ethernet "
1480 "frame spanned multiple buffers, entry "
1481 "%#x length %d status %8.8x!\n",
1482 dev->name, entry, data_size,
1483 desc_status);
1484 printk(KERN_WARNING "%s: Oversized Ethernet "
1485 "frame %p vs %p.\n", dev->name,
1486 rp->rx_head_desc, &rp->rx_ring[entry]);
1487 rp->stats.rx_length_errors++;
1488 } else if (desc_status & RxErr) {
1489 /* There was a error. */
1490 if (debug > 2)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001491 printk(KERN_DEBUG "rhine_rx() Rx "
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 "error was %8.8x.\n",
1493 desc_status);
1494 rp->stats.rx_errors++;
1495 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1496 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1497 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1498 if (desc_status & 0x0002) {
1499 /* this can also be updated outside the interrupt handler */
1500 spin_lock(&rp->lock);
1501 rp->stats.rx_crc_errors++;
1502 spin_unlock(&rp->lock);
1503 }
1504 }
1505 } else {
1506 struct sk_buff *skb;
1507 /* Length should omit the CRC */
1508 int pkt_len = data_size - 4;
1509
1510 /* Check if the packet is long enough to accept without
1511 copying to a minimally-sized skbuff. */
1512 if (pkt_len < rx_copybreak &&
1513 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1514 skb->dev = dev;
1515 skb_reserve(skb, 2); /* 16 byte align the IP header */
1516 pci_dma_sync_single_for_cpu(rp->pdev,
1517 rp->rx_skbuff_dma[entry],
1518 rp->rx_buf_sz,
1519 PCI_DMA_FROMDEVICE);
1520
1521 eth_copy_and_sum(skb,
David S. Miller689be432005-06-28 15:25:31 -07001522 rp->rx_skbuff[entry]->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 pkt_len, 0);
1524 skb_put(skb, pkt_len);
1525 pci_dma_sync_single_for_device(rp->pdev,
1526 rp->rx_skbuff_dma[entry],
1527 rp->rx_buf_sz,
1528 PCI_DMA_FROMDEVICE);
1529 } else {
1530 skb = rp->rx_skbuff[entry];
1531 if (skb == NULL) {
1532 printk(KERN_ERR "%s: Inconsistent Rx "
1533 "descriptor chain.\n",
1534 dev->name);
1535 break;
1536 }
1537 rp->rx_skbuff[entry] = NULL;
1538 skb_put(skb, pkt_len);
1539 pci_unmap_single(rp->pdev,
1540 rp->rx_skbuff_dma[entry],
1541 rp->rx_buf_sz,
1542 PCI_DMA_FROMDEVICE);
1543 }
1544 skb->protocol = eth_type_trans(skb, dev);
1545 netif_rx(skb);
1546 dev->last_rx = jiffies;
1547 rp->stats.rx_bytes += pkt_len;
1548 rp->stats.rx_packets++;
1549 }
1550 entry = (++rp->cur_rx) % RX_RING_SIZE;
1551 rp->rx_head_desc = &rp->rx_ring[entry];
1552 }
1553
1554 /* Refill the Rx ring buffers. */
1555 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1556 struct sk_buff *skb;
1557 entry = rp->dirty_rx % RX_RING_SIZE;
1558 if (rp->rx_skbuff[entry] == NULL) {
1559 skb = dev_alloc_skb(rp->rx_buf_sz);
1560 rp->rx_skbuff[entry] = skb;
1561 if (skb == NULL)
1562 break; /* Better luck next round. */
1563 skb->dev = dev; /* Mark as being used by this device. */
1564 rp->rx_skbuff_dma[entry] =
David S. Miller689be432005-06-28 15:25:31 -07001565 pci_map_single(rp->pdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 rp->rx_buf_sz,
1567 PCI_DMA_FROMDEVICE);
1568 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1569 }
1570 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1571 }
1572}
1573
1574/*
1575 * Clears the "tally counters" for CRC errors and missed frames(?).
1576 * It has been reported that some chips need a write of 0 to clear
1577 * these, for others the counters are set to 1 when written to and
1578 * instead cleared when read. So we clear them both ways ...
1579 */
1580static inline void clear_tally_counters(void __iomem *ioaddr)
1581{
1582 iowrite32(0, ioaddr + RxMissed);
1583 ioread16(ioaddr + RxCRCErrs);
1584 ioread16(ioaddr + RxMissed);
1585}
1586
1587static void rhine_restart_tx(struct net_device *dev) {
1588 struct rhine_private *rp = netdev_priv(dev);
1589 void __iomem *ioaddr = rp->base;
1590 int entry = rp->dirty_tx % TX_RING_SIZE;
1591 u32 intr_status;
1592
1593 /*
1594 * If new errors occured, we need to sort them out before doing Tx.
1595 * In that case the ISR will be back here RSN anyway.
1596 */
1597 intr_status = get_intr_status(dev);
1598
1599 if ((intr_status & IntrTxErrSummary) == 0) {
1600
1601 /* We know better than the chip where it should continue. */
1602 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1603 ioaddr + TxRingPtr);
1604
1605 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1606 ioaddr + ChipCmd);
1607 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1608 ioaddr + ChipCmd1);
1609 IOSYNC;
1610 }
1611 else {
1612 /* This should never happen */
1613 if (debug > 1)
1614 printk(KERN_WARNING "%s: rhine_restart_tx() "
1615 "Another error occured %8.8x.\n",
1616 dev->name, intr_status);
1617 }
1618
1619}
1620
1621static void rhine_error(struct net_device *dev, int intr_status)
1622{
1623 struct rhine_private *rp = netdev_priv(dev);
1624 void __iomem *ioaddr = rp->base;
1625
1626 spin_lock(&rp->lock);
1627
1628 if (intr_status & IntrLinkChange)
1629 rhine_check_media(dev, 0);
1630 if (intr_status & IntrStatsMax) {
1631 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1632 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1633 clear_tally_counters(ioaddr);
1634 }
1635 if (intr_status & IntrTxAborted) {
1636 if (debug > 1)
1637 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1638 dev->name, intr_status);
1639 }
1640 if (intr_status & IntrTxUnderrun) {
1641 if (rp->tx_thresh < 0xE0)
1642 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1643 if (debug > 1)
1644 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1645 "threshold now %2.2x.\n",
1646 dev->name, rp->tx_thresh);
1647 }
1648 if (intr_status & IntrTxDescRace) {
1649 if (debug > 2)
1650 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1651 dev->name);
1652 }
1653 if ((intr_status & IntrTxError) &&
1654 (intr_status & (IntrTxAborted |
1655 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1656 if (rp->tx_thresh < 0xE0) {
1657 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1658 }
1659 if (debug > 1)
1660 printk(KERN_INFO "%s: Unspecified error. Tx "
1661 "threshold now %2.2x.\n",
1662 dev->name, rp->tx_thresh);
1663 }
1664 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1665 IntrTxError))
1666 rhine_restart_tx(dev);
1667
1668 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1669 IntrTxError | IntrTxAborted | IntrNormalSummary |
1670 IntrTxDescRace)) {
1671 if (debug > 1)
1672 printk(KERN_ERR "%s: Something Wicked happened! "
1673 "%8.8x.\n", dev->name, intr_status);
1674 }
1675
1676 spin_unlock(&rp->lock);
1677}
1678
1679static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1680{
1681 struct rhine_private *rp = netdev_priv(dev);
1682 void __iomem *ioaddr = rp->base;
1683 unsigned long flags;
1684
1685 spin_lock_irqsave(&rp->lock, flags);
1686 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1687 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1688 clear_tally_counters(ioaddr);
1689 spin_unlock_irqrestore(&rp->lock, flags);
1690
1691 return &rp->stats;
1692}
1693
1694static void rhine_set_rx_mode(struct net_device *dev)
1695{
1696 struct rhine_private *rp = netdev_priv(dev);
1697 void __iomem *ioaddr = rp->base;
1698 u32 mc_filter[2]; /* Multicast hash filter */
1699 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1700
1701 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1702 /* Unconditionally log net taps. */
1703 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1704 dev->name);
1705 rx_mode = 0x1C;
1706 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1707 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1708 } else if ((dev->mc_count > multicast_filter_limit)
1709 || (dev->flags & IFF_ALLMULTI)) {
1710 /* Too many to match, or accept all multicasts. */
1711 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1712 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1713 rx_mode = 0x0C;
1714 } else {
1715 struct dev_mc_list *mclist;
1716 int i;
1717 memset(mc_filter, 0, sizeof(mc_filter));
1718 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1719 i++, mclist = mclist->next) {
1720 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1721
1722 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1723 }
1724 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1725 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1726 rx_mode = 0x0C;
1727 }
1728 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1729}
1730
1731static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1732{
1733 struct rhine_private *rp = netdev_priv(dev);
1734
1735 strcpy(info->driver, DRV_NAME);
1736 strcpy(info->version, DRV_VERSION);
1737 strcpy(info->bus_info, pci_name(rp->pdev));
1738}
1739
1740static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1741{
1742 struct rhine_private *rp = netdev_priv(dev);
1743 int rc;
1744
1745 spin_lock_irq(&rp->lock);
1746 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1747 spin_unlock_irq(&rp->lock);
1748
1749 return rc;
1750}
1751
1752static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1753{
1754 struct rhine_private *rp = netdev_priv(dev);
1755 int rc;
1756
1757 spin_lock_irq(&rp->lock);
1758 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1759 spin_unlock_irq(&rp->lock);
1760
1761 return rc;
1762}
1763
1764static int netdev_nway_reset(struct net_device *dev)
1765{
1766 struct rhine_private *rp = netdev_priv(dev);
1767
1768 return mii_nway_restart(&rp->mii_if);
1769}
1770
1771static u32 netdev_get_link(struct net_device *dev)
1772{
1773 struct rhine_private *rp = netdev_priv(dev);
1774
1775 return mii_link_ok(&rp->mii_if);
1776}
1777
1778static u32 netdev_get_msglevel(struct net_device *dev)
1779{
1780 return debug;
1781}
1782
1783static void netdev_set_msglevel(struct net_device *dev, u32 value)
1784{
1785 debug = value;
1786}
1787
1788static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1789{
1790 struct rhine_private *rp = netdev_priv(dev);
1791
1792 if (!(rp->quirks & rqWOL))
1793 return;
1794
1795 spin_lock_irq(&rp->lock);
1796 wol->supported = WAKE_PHY | WAKE_MAGIC |
1797 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1798 wol->wolopts = rp->wolopts;
1799 spin_unlock_irq(&rp->lock);
1800}
1801
1802static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1803{
1804 struct rhine_private *rp = netdev_priv(dev);
1805 u32 support = WAKE_PHY | WAKE_MAGIC |
1806 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1807
1808 if (!(rp->quirks & rqWOL))
1809 return -EINVAL;
1810
1811 if (wol->wolopts & ~support)
1812 return -EINVAL;
1813
1814 spin_lock_irq(&rp->lock);
1815 rp->wolopts = wol->wolopts;
1816 spin_unlock_irq(&rp->lock);
1817
1818 return 0;
1819}
1820
1821static struct ethtool_ops netdev_ethtool_ops = {
1822 .get_drvinfo = netdev_get_drvinfo,
1823 .get_settings = netdev_get_settings,
1824 .set_settings = netdev_set_settings,
1825 .nway_reset = netdev_nway_reset,
1826 .get_link = netdev_get_link,
1827 .get_msglevel = netdev_get_msglevel,
1828 .set_msglevel = netdev_set_msglevel,
1829 .get_wol = rhine_get_wol,
1830 .set_wol = rhine_set_wol,
1831 .get_sg = ethtool_op_get_sg,
1832 .get_tx_csum = ethtool_op_get_tx_csum,
John W. Linvilleb81e8e12005-09-12 10:48:58 -04001833 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834};
1835
1836static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1837{
1838 struct rhine_private *rp = netdev_priv(dev);
1839 int rc;
1840
1841 if (!netif_running(dev))
1842 return -EINVAL;
1843
1844 spin_lock_irq(&rp->lock);
1845 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1846 spin_unlock_irq(&rp->lock);
1847
1848 return rc;
1849}
1850
1851static int rhine_close(struct net_device *dev)
1852{
1853 struct rhine_private *rp = netdev_priv(dev);
1854 void __iomem *ioaddr = rp->base;
1855
1856 spin_lock_irq(&rp->lock);
1857
1858 netif_stop_queue(dev);
1859
1860 if (debug > 1)
1861 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1862 "status was %4.4x.\n",
1863 dev->name, ioread16(ioaddr + ChipCmd));
1864
1865 /* Switch to loopback mode to avoid hardware races. */
1866 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1867
1868 /* Disable interrupts by clearing the interrupt mask. */
1869 iowrite16(0x0000, ioaddr + IntrEnable);
1870
1871 /* Stop the chip's Tx and Rx processes. */
1872 iowrite16(CmdStop, ioaddr + ChipCmd);
1873
1874 spin_unlock_irq(&rp->lock);
1875
1876 free_irq(rp->pdev->irq, dev);
1877 free_rbufs(dev);
1878 free_tbufs(dev);
1879 free_ring(dev);
1880
1881 return 0;
1882}
1883
1884
1885static void __devexit rhine_remove_one(struct pci_dev *pdev)
1886{
1887 struct net_device *dev = pci_get_drvdata(pdev);
1888 struct rhine_private *rp = netdev_priv(dev);
1889
1890 unregister_netdev(dev);
1891
1892 pci_iounmap(pdev, rp->base);
1893 pci_release_regions(pdev);
1894
1895 free_netdev(dev);
1896 pci_disable_device(pdev);
1897 pci_set_drvdata(pdev, NULL);
1898}
1899
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001900static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 struct net_device *dev = pci_get_drvdata(pdev);
1903 struct rhine_private *rp = netdev_priv(dev);
1904 void __iomem *ioaddr = rp->base;
1905
1906 if (!(rp->quirks & rqWOL))
1907 return; /* Nothing to do for non-WOL adapters */
1908
1909 rhine_power_init(dev);
1910
1911 /* Make sure we use pattern 0, 1 and not 4, 5 */
1912 if (rp->quirks & rq6patterns)
1913 iowrite8(0x04, ioaddr + 0xA7);
1914
1915 if (rp->wolopts & WAKE_MAGIC) {
1916 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1917 /*
1918 * Turn EEPROM-controlled wake-up back on -- some hardware may
1919 * not cooperate otherwise.
1920 */
1921 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1922 }
1923
1924 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1925 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1926
1927 if (rp->wolopts & WAKE_PHY)
1928 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1929
1930 if (rp->wolopts & WAKE_UCAST)
1931 iowrite8(WOLucast, ioaddr + WOLcrSet);
1932
1933 if (rp->wolopts) {
1934 /* Enable legacy WOL (for old motherboards) */
1935 iowrite8(0x01, ioaddr + PwcfgSet);
1936 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1937 }
1938
1939 /* Hit power state D3 (sleep) */
1940 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1941
1942 /* TODO: Check use of pci_enable_wake() */
1943
1944}
1945
1946#ifdef CONFIG_PM
1947static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1948{
1949 struct net_device *dev = pci_get_drvdata(pdev);
1950 struct rhine_private *rp = netdev_priv(dev);
1951 unsigned long flags;
1952
1953 if (!netif_running(dev))
1954 return 0;
1955
1956 netif_device_detach(dev);
1957 pci_save_state(pdev);
1958
1959 spin_lock_irqsave(&rp->lock, flags);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001960 rhine_shutdown(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 spin_unlock_irqrestore(&rp->lock, flags);
1962
1963 free_irq(dev->irq, dev);
1964 return 0;
1965}
1966
1967static int rhine_resume(struct pci_dev *pdev)
1968{
1969 struct net_device *dev = pci_get_drvdata(pdev);
1970 struct rhine_private *rp = netdev_priv(dev);
1971 unsigned long flags;
1972 int ret;
1973
1974 if (!netif_running(dev))
1975 return 0;
1976
1977 if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
1978 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1979
1980 ret = pci_set_power_state(pdev, PCI_D0);
1981 if (debug > 1)
1982 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1983 dev->name, ret ? "failed" : "succeeded", ret);
1984
1985 pci_restore_state(pdev);
1986
1987 spin_lock_irqsave(&rp->lock, flags);
1988#ifdef USE_MMIO
1989 enable_mmio(rp->pioaddr, rp->quirks);
1990#endif
1991 rhine_power_init(dev);
1992 free_tbufs(dev);
1993 free_rbufs(dev);
1994 alloc_tbufs(dev);
1995 alloc_rbufs(dev);
1996 init_registers(dev);
1997 spin_unlock_irqrestore(&rp->lock, flags);
1998
1999 netif_device_attach(dev);
2000
2001 return 0;
2002}
2003#endif /* CONFIG_PM */
2004
2005static struct pci_driver rhine_driver = {
2006 .name = DRV_NAME,
2007 .id_table = rhine_pci_tbl,
2008 .probe = rhine_init_one,
2009 .remove = __devexit_p(rhine_remove_one),
2010#ifdef CONFIG_PM
2011 .suspend = rhine_suspend,
2012 .resume = rhine_resume,
2013#endif /* CONFIG_PM */
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002014 .shutdown = rhine_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015};
2016
2017
2018static int __init rhine_init(void)
2019{
2020/* when a module, this is printed whether or not devices are found in probe */
2021#ifdef MODULE
2022 printk(version);
2023#endif
2024 return pci_module_init(&rhine_driver);
2025}
2026
2027
2028static void __exit rhine_cleanup(void)
2029{
2030 pci_unregister_driver(&rhine_driver);
2031}
2032
2033
2034module_init(rhine_init);
2035module_exit(rhine_cleanup);