blob: e48e76ce73a08112bc9c15973be217a0ea9cd2fd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
32#define DRV_NAME "via-rhine"
Jeff Garzik03a8c662006-06-27 07:57:22 -040033#define DRV_VERSION "1.4.0"
34#define DRV_RELDATE "June-27-2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36
37/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45static int rx_copybreak;
46
47/*
48 * In case you are looking for 'options[]' or 'full_duplex[]', they
49 * are gone. Use ethtool(8) instead.
50 */
51
52/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
53 The Rhine has a 64 element 8390-like hash table. */
54static const int multicast_filter_limit = 32;
55
56
57/* Operational parameters that are set at compile time. */
58
59/* Keep the ring sizes a power of two for compile efficiency.
60 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
61 Making the Tx ring too large decreases the effectiveness of channel
62 bonding and packet priority.
63 There are no ill effects from too-large receive rings. */
64#define TX_RING_SIZE 16
65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
66#define RX_RING_SIZE 16
67
68
69/* Operational parameters that usually are not changed. */
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT (2*HZ)
73
74#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
75
76#include <linux/module.h>
77#include <linux/moduleparam.h>
78#include <linux/kernel.h>
79#include <linux/string.h>
80#include <linux/timer.h>
81#include <linux/errno.h>
82#include <linux/ioport.h>
83#include <linux/slab.h>
84#include <linux/interrupt.h>
85#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -040086#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/netdevice.h>
88#include <linux/etherdevice.h>
89#include <linux/skbuff.h>
90#include <linux/init.h>
91#include <linux/delay.h>
92#include <linux/mii.h>
93#include <linux/ethtool.h>
94#include <linux/crc32.h>
95#include <linux/bitops.h>
96#include <asm/processor.h> /* Processor type for cache alignment. */
97#include <asm/io.h>
98#include <asm/irq.h>
99#include <asm/uaccess.h>
100
101/* These identify the driver base version and may not be removed. */
102static char version[] __devinitdata =
103KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
104
105/* This driver was written to use PCI memory space. Some early versions
106 of the Rhine may only work correctly with I/O space accesses. */
107#ifdef CONFIG_VIA_RHINE_MMIO
108#define USE_MMIO
109#else
110#endif
111
112MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
113MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
114MODULE_LICENSE("GPL");
115
116module_param(max_interrupt_work, int, 0);
117module_param(debug, int, 0);
118module_param(rx_copybreak, int, 0);
119MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
120MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
121MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
122
123/*
124 Theory of Operation
125
126I. Board Compatibility
127
128This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
129controller.
130
131II. Board-specific settings
132
133Boards with this chip are functional only in a bus-master PCI slot.
134
135Many operational settings are loaded from the EEPROM to the Config word at
136offset 0x78. For most of these settings, this driver assumes that they are
137correct.
138If this driver is compiled to use PCI memory space operations the EEPROM
139must be configured to enable memory ops.
140
141III. Driver operation
142
143IIIa. Ring buffers
144
145This driver uses two statically allocated fixed-size descriptor lists
146formed into rings by a branch from the final descriptor to the beginning of
147the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
148
149IIIb/c. Transmit/Receive Structure
150
151This driver attempts to use a zero-copy receive and transmit scheme.
152
153Alas, all data buffers are required to start on a 32 bit boundary, so
154the driver must often copy transmit packets into bounce buffers.
155
156The driver allocates full frame size skbuffs for the Rx ring buffers at
157open() time and passes the skb->data field to the chip as receive data
158buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
159a fresh skbuff is allocated and the frame is copied to the new skbuff.
160When the incoming frame is larger, the skbuff is passed directly up the
161protocol stack. Buffers consumed this way are replaced by newly allocated
162skbuffs in the last phase of rhine_rx().
163
164The RX_COPYBREAK value is chosen to trade-off the memory wasted by
165using a full-sized skbuff for small frames vs. the copying costs of larger
166frames. New boards are typically used in generously configured machines
167and the underfilled buffers have negligible impact compared to the benefit of
168a single allocation size, so the default value of zero results in never
169copying packets. When copying is done, the cost is usually mitigated by using
170a combined copy/checksum routine. Copying also preloads the cache, which is
171most useful with small frames.
172
173Since the VIA chips are only able to transfer data to buffers on 32 bit
174boundaries, the IP header at offset 14 in an ethernet frame isn't
175longword aligned for further processing. Copying these unaligned buffers
176has the beneficial effect of 16-byte aligning the IP header.
177
178IIId. Synchronization
179
180The driver runs as two independent, single-threaded flows of control. One
181is the send-packet routine, which enforces single-threaded use by the
182dev->priv->lock spinlock. The other thread is the interrupt handler, which
183is single threaded by the hardware and interrupt handling software.
184
185The send packet thread has partial control over the Tx ring. It locks the
186dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
187is not available it stops the transmit queue by calling netif_stop_queue.
188
189The interrupt handler has exclusive control over the Rx ring and records stats
190from the Tx ring. After reaping the stats, it marks the Tx queue entry as
191empty by incrementing the dirty_tx mark. If at least half of the entries in
192the Rx ring are available the transmit queue is woken up if it was stopped.
193
194IV. Notes
195
196IVb. References
197
198Preliminary VT86C100A manual from http://www.via.com.tw/
199http://www.scyld.com/expert/100mbps.html
200http://www.scyld.com/expert/NWay.html
201ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
202ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
203
204
205IVc. Errata
206
207The VT86C100A manual is not reliable information.
208The 3043 chip does not handle unaligned transmit or receive buffers, resulting
209in significant performance degradation for bounce buffer copies on transmit
210and unaligned IP headers on receive.
211The chip does not pad to minimum transmit length.
212
213*/
214
215
216/* This table drives the PCI probe routines. It's mostly boilerplate in all
217 of the drivers, and will likely be provided by some future kernel.
218 Note the matching code -- the first table entry matchs all 56** cards but
219 second only the 1234 card.
220*/
221
222enum rhine_revs {
223 VT86C100A = 0x00,
224 VTunknown0 = 0x20,
225 VT6102 = 0x40,
226 VT8231 = 0x50, /* Integrated MAC */
227 VT8233 = 0x60, /* Integrated MAC */
228 VT8235 = 0x74, /* Integrated MAC */
229 VT8237 = 0x78, /* Integrated MAC */
230 VTunknown1 = 0x7C,
231 VT6105 = 0x80,
232 VT6105_B0 = 0x83,
233 VT6105L = 0x8A,
234 VT6107 = 0x8C,
235 VTunknown2 = 0x8E,
236 VT6105M = 0x90, /* Management adapter */
237};
238
239enum rhine_quirks {
240 rqWOL = 0x0001, /* Wake-On-LAN support */
241 rqForceReset = 0x0002,
242 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
243 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
244 rqRhineI = 0x0100, /* See comment below */
245};
246/*
247 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
248 * MMIO as well as for the collision counter and the Tx FIFO underflow
249 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
250 */
251
252/* Beware of PCI posted writes */
253#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
254
255static struct pci_device_id rhine_pci_tbl[] =
256{
257 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
258 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
259 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
260 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
261 { } /* terminate list */
262};
263MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
264
265
266/* Offsets to the device registers. */
267enum register_offsets {
268 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
269 ChipCmd1=0x09,
270 IntrStatus=0x0C, IntrEnable=0x0E,
271 MulticastFilter0=0x10, MulticastFilter1=0x14,
272 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
273 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
274 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
275 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
276 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
277 StickyHW=0x83, IntrStatus2=0x84,
278 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
279 WOLcrClr1=0xA6, WOLcgClr=0xA7,
280 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
281};
282
283/* Bits in ConfigD */
284enum backoff_bits {
285 BackOptional=0x01, BackModify=0x02,
286 BackCaptureEffect=0x04, BackRandom=0x08
287};
288
289#ifdef USE_MMIO
290/* Registers we check that mmio and reg are the same. */
291static const int mmio_verify_registers[] = {
292 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
293 0
294};
295#endif
296
297/* Bits in the interrupt status/mask registers. */
298enum intr_status_bits {
299 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
300 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
301 IntrPCIErr=0x0040,
302 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
303 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
304 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
305 IntrRxWakeUp=0x8000,
306 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
307 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
308 IntrTxErrSummary=0x082218,
309};
310
311/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
312enum wol_bits {
313 WOLucast = 0x10,
314 WOLmagic = 0x20,
315 WOLbmcast = 0x30,
316 WOLlnkon = 0x40,
317 WOLlnkoff = 0x80,
318};
319
320/* The Rx and Tx buffer descriptors. */
321struct rx_desc {
322 s32 rx_status;
323 u32 desc_length; /* Chain flag, Buffer/frame length */
324 u32 addr;
325 u32 next_desc;
326};
327struct tx_desc {
328 s32 tx_status;
329 u32 desc_length; /* Chain flag, Tx Config, Frame length */
330 u32 addr;
331 u32 next_desc;
332};
333
334/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
335#define TXDESC 0x00e08000
336
337enum rx_status_bits {
338 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
339};
340
341/* Bits in *_desc.*_status */
342enum desc_status_bits {
343 DescOwn=0x80000000
344};
345
346/* Bits in ChipCmd. */
347enum chip_cmd_bits {
348 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
349 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
350 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
351 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
352};
353
354struct rhine_private {
355 /* Descriptor rings */
356 struct rx_desc *rx_ring;
357 struct tx_desc *tx_ring;
358 dma_addr_t rx_ring_dma;
359 dma_addr_t tx_ring_dma;
360
361 /* The addresses of receive-in-place skbuffs. */
362 struct sk_buff *rx_skbuff[RX_RING_SIZE];
363 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
364
365 /* The saved address of a sent-in-place packet/buffer, for later free(). */
366 struct sk_buff *tx_skbuff[TX_RING_SIZE];
367 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
368
Roger Luethi4be5de22006-04-04 20:49:16 +0200369 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 unsigned char *tx_buf[TX_RING_SIZE];
371 unsigned char *tx_bufs;
372 dma_addr_t tx_bufs_dma;
373
374 struct pci_dev *pdev;
375 long pioaddr;
376 struct net_device_stats stats;
377 spinlock_t lock;
378
379 /* Frequently used values: keep some adjacent for cache effect. */
380 u32 quirks;
381 struct rx_desc *rx_head_desc;
382 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
383 unsigned int cur_tx, dirty_tx;
384 unsigned int rx_buf_sz; /* Based on MTU+slack. */
385 u8 wolopts;
386
387 u8 tx_thresh, rx_thresh;
388
389 struct mii_if_info mii_if;
390 void __iomem *base;
391};
392
393static int mdio_read(struct net_device *dev, int phy_id, int location);
394static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
395static int rhine_open(struct net_device *dev);
396static void rhine_tx_timeout(struct net_device *dev);
397static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
398static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
399static void rhine_tx(struct net_device *dev);
400static void rhine_rx(struct net_device *dev);
401static void rhine_error(struct net_device *dev, int intr_status);
402static void rhine_set_rx_mode(struct net_device *dev);
403static struct net_device_stats *rhine_get_stats(struct net_device *dev);
404static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
405static struct ethtool_ops netdev_ethtool_ops;
406static int rhine_close(struct net_device *dev);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -0700407static void rhine_shutdown (struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409#define RHINE_WAIT_FOR(condition) do { \
410 int i=1024; \
411 while (!(condition) && --i) \
412 ; \
413 if (debug > 1 && i < 512) \
414 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
415 DRV_NAME, 1024-i, __func__, __LINE__); \
416} while(0)
417
418static inline u32 get_intr_status(struct net_device *dev)
419{
420 struct rhine_private *rp = netdev_priv(dev);
421 void __iomem *ioaddr = rp->base;
422 u32 intr_status;
423
424 intr_status = ioread16(ioaddr + IntrStatus);
425 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
426 if (rp->quirks & rqStatusWBRace)
427 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
428 return intr_status;
429}
430
431/*
432 * Get power related registers into sane state.
433 * Notify user about past WOL event.
434 */
435static void rhine_power_init(struct net_device *dev)
436{
437 struct rhine_private *rp = netdev_priv(dev);
438 void __iomem *ioaddr = rp->base;
439 u16 wolstat;
440
441 if (rp->quirks & rqWOL) {
442 /* Make sure chip is in power state D0 */
443 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
444
445 /* Disable "force PME-enable" */
446 iowrite8(0x80, ioaddr + WOLcgClr);
447
448 /* Clear power-event config bits (WOL) */
449 iowrite8(0xFF, ioaddr + WOLcrClr);
450 /* More recent cards can manage two additional patterns */
451 if (rp->quirks & rq6patterns)
452 iowrite8(0x03, ioaddr + WOLcrClr1);
453
454 /* Save power-event status bits */
455 wolstat = ioread8(ioaddr + PwrcsrSet);
456 if (rp->quirks & rq6patterns)
457 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
458
459 /* Clear power-event status bits */
460 iowrite8(0xFF, ioaddr + PwrcsrClr);
461 if (rp->quirks & rq6patterns)
462 iowrite8(0x03, ioaddr + PwrcsrClr1);
463
464 if (wolstat) {
465 char *reason;
466 switch (wolstat) {
467 case WOLmagic:
468 reason = "Magic packet";
469 break;
470 case WOLlnkon:
471 reason = "Link went up";
472 break;
473 case WOLlnkoff:
474 reason = "Link went down";
475 break;
476 case WOLucast:
477 reason = "Unicast packet";
478 break;
479 case WOLbmcast:
480 reason = "Multicast/broadcast packet";
481 break;
482 default:
483 reason = "Unknown";
484 }
485 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
486 DRV_NAME, reason);
487 }
488 }
489}
490
491static void rhine_chip_reset(struct net_device *dev)
492{
493 struct rhine_private *rp = netdev_priv(dev);
494 void __iomem *ioaddr = rp->base;
495
496 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
497 IOSYNC;
498
499 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
500 printk(KERN_INFO "%s: Reset not complete yet. "
501 "Trying harder.\n", DRV_NAME);
502
503 /* Force reset */
504 if (rp->quirks & rqForceReset)
505 iowrite8(0x40, ioaddr + MiscCmd);
506
507 /* Reset can take somewhat longer (rare) */
508 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
509 }
510
511 if (debug > 1)
512 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
513 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
514 "failed" : "succeeded");
515}
516
517#ifdef USE_MMIO
518static void enable_mmio(long pioaddr, u32 quirks)
519{
520 int n;
521 if (quirks & rqRhineI) {
522 /* More recent docs say that this bit is reserved ... */
523 n = inb(pioaddr + ConfigA) | 0x20;
524 outb(n, pioaddr + ConfigA);
525 } else {
526 n = inb(pioaddr + ConfigD) | 0x80;
527 outb(n, pioaddr + ConfigD);
528 }
529}
530#endif
531
532/*
533 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
534 * (plus 0x6C for Rhine-I/II)
535 */
536static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
537{
538 struct rhine_private *rp = netdev_priv(dev);
539 void __iomem *ioaddr = rp->base;
540
541 outb(0x20, pioaddr + MACRegEEcsr);
542 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
543
544#ifdef USE_MMIO
545 /*
546 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
547 * MMIO. If reloading EEPROM was done first this could be avoided, but
548 * it is not known if that still works with the "win98-reboot" problem.
549 */
550 enable_mmio(pioaddr, rp->quirks);
551#endif
552
553 /* Turn off EEPROM-controlled wake-up (magic packet) */
554 if (rp->quirks & rqWOL)
555 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
556
557}
558
559#ifdef CONFIG_NET_POLL_CONTROLLER
560static void rhine_poll(struct net_device *dev)
561{
562 disable_irq(dev->irq);
563 rhine_interrupt(dev->irq, (void *)dev, NULL);
564 enable_irq(dev->irq);
565}
566#endif
567
568static void rhine_hw_init(struct net_device *dev, long pioaddr)
569{
570 struct rhine_private *rp = netdev_priv(dev);
571
572 /* Reset the chip to erase previous misconfiguration. */
573 rhine_chip_reset(dev);
574
575 /* Rhine-I needs extra time to recuperate before EEPROM reload */
576 if (rp->quirks & rqRhineI)
577 msleep(5);
578
579 /* Reload EEPROM controlled bytes cleared by soft reset */
580 rhine_reload_eeprom(pioaddr, dev);
581}
582
583static int __devinit rhine_init_one(struct pci_dev *pdev,
584 const struct pci_device_id *ent)
585{
586 struct net_device *dev;
587 struct rhine_private *rp;
588 int i, rc;
589 u8 pci_rev;
590 u32 quirks;
591 long pioaddr;
592 long memaddr;
593 void __iomem *ioaddr;
594 int io_size, phy_id;
595 const char *name;
596#ifdef USE_MMIO
597 int bar = 1;
598#else
599 int bar = 0;
600#endif
601
602/* when built into the kernel, we only print version if device is found */
603#ifndef MODULE
604 static int printed_version;
605 if (!printed_version++)
606 printk(version);
607#endif
608
609 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
610
611 io_size = 256;
612 phy_id = 0;
613 quirks = 0;
614 name = "Rhine";
615 if (pci_rev < VTunknown0) {
616 quirks = rqRhineI;
617 io_size = 128;
618 }
619 else if (pci_rev >= VT6102) {
620 quirks = rqWOL | rqForceReset;
621 if (pci_rev < VT6105) {
622 name = "Rhine II";
623 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
624 }
625 else {
626 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
627 if (pci_rev >= VT6105_B0)
628 quirks |= rq6patterns;
629 if (pci_rev < VT6105M)
630 name = "Rhine III";
631 else
632 name = "Rhine III (Management Adapter)";
633 }
634 }
635
636 rc = pci_enable_device(pdev);
637 if (rc)
638 goto err_out;
639
640 /* this should always be supported */
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400641 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 if (rc) {
643 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
644 "the card!?\n");
645 goto err_out;
646 }
647
648 /* sanity check */
649 if ((pci_resource_len(pdev, 0) < io_size) ||
650 (pci_resource_len(pdev, 1) < io_size)) {
651 rc = -EIO;
652 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
653 goto err_out;
654 }
655
656 pioaddr = pci_resource_start(pdev, 0);
657 memaddr = pci_resource_start(pdev, 1);
658
659 pci_set_master(pdev);
660
661 dev = alloc_etherdev(sizeof(struct rhine_private));
662 if (!dev) {
663 rc = -ENOMEM;
664 printk(KERN_ERR "alloc_etherdev failed\n");
665 goto err_out;
666 }
667 SET_MODULE_OWNER(dev);
668 SET_NETDEV_DEV(dev, &pdev->dev);
669
670 rp = netdev_priv(dev);
671 rp->quirks = quirks;
672 rp->pioaddr = pioaddr;
673 rp->pdev = pdev;
674
675 rc = pci_request_regions(pdev, DRV_NAME);
676 if (rc)
677 goto err_out_free_netdev;
678
679 ioaddr = pci_iomap(pdev, bar, io_size);
680 if (!ioaddr) {
681 rc = -EIO;
682 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
683 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
684 goto err_out_free_res;
685 }
686
687#ifdef USE_MMIO
688 enable_mmio(pioaddr, quirks);
689
690 /* Check that selected MMIO registers match the PIO ones */
691 i = 0;
692 while (mmio_verify_registers[i]) {
693 int reg = mmio_verify_registers[i++];
694 unsigned char a = inb(pioaddr+reg);
695 unsigned char b = readb(ioaddr+reg);
696 if (a != b) {
697 rc = -EIO;
698 printk(KERN_ERR "MMIO do not match PIO [%02x] "
699 "(%02x != %02x)\n", reg, a, b);
700 goto err_out_unmap;
701 }
702 }
703#endif /* USE_MMIO */
704
705 dev->base_addr = (unsigned long)ioaddr;
706 rp->base = ioaddr;
707
708 /* Get chip registers into a sane state */
709 rhine_power_init(dev);
710 rhine_hw_init(dev, pioaddr);
711
712 for (i = 0; i < 6; i++)
713 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400714 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400716 if (!is_valid_ether_addr(dev->perm_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 rc = -EIO;
718 printk(KERN_ERR "Invalid MAC address\n");
719 goto err_out_unmap;
720 }
721
722 /* For Rhine-I/II, phy_id is loaded from EEPROM */
723 if (!phy_id)
724 phy_id = ioread8(ioaddr + 0x6C);
725
726 dev->irq = pdev->irq;
727
728 spin_lock_init(&rp->lock);
729 rp->mii_if.dev = dev;
730 rp->mii_if.mdio_read = mdio_read;
731 rp->mii_if.mdio_write = mdio_write;
732 rp->mii_if.phy_id_mask = 0x1f;
733 rp->mii_if.reg_num_mask = 0x1f;
734
735 /* The chip-specific entries in the device structure. */
736 dev->open = rhine_open;
737 dev->hard_start_xmit = rhine_start_tx;
738 dev->stop = rhine_close;
739 dev->get_stats = rhine_get_stats;
740 dev->set_multicast_list = rhine_set_rx_mode;
741 dev->do_ioctl = netdev_ioctl;
742 dev->ethtool_ops = &netdev_ethtool_ops;
743 dev->tx_timeout = rhine_tx_timeout;
744 dev->watchdog_timeo = TX_TIMEOUT;
745#ifdef CONFIG_NET_POLL_CONTROLLER
746 dev->poll_controller = rhine_poll;
747#endif
748 if (rp->quirks & rqRhineI)
749 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
750
751 /* dev->name not defined before register_netdev()! */
752 rc = register_netdev(dev);
753 if (rc)
754 goto err_out_unmap;
755
756 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
757 dev->name, name,
758#ifdef USE_MMIO
759 memaddr
760#else
761 (long)ioaddr
762#endif
763 );
764
765 for (i = 0; i < 5; i++)
766 printk("%2.2x:", dev->dev_addr[i]);
767 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
768
769 pci_set_drvdata(pdev, dev);
770
771 {
772 u16 mii_cmd;
773 int mii_status = mdio_read(dev, phy_id, 1);
774 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
775 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
776 if (mii_status != 0xffff && mii_status != 0x0000) {
777 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
778 printk(KERN_INFO "%s: MII PHY found at address "
779 "%d, status 0x%4.4x advertising %4.4x "
780 "Link %4.4x.\n", dev->name, phy_id,
781 mii_status, rp->mii_if.advertising,
782 mdio_read(dev, phy_id, 5));
783
784 /* set IFF_RUNNING */
785 if (mii_status & BMSR_LSTATUS)
786 netif_carrier_on(dev);
787 else
788 netif_carrier_off(dev);
789
790 }
791 }
792 rp->mii_if.phy_id = phy_id;
793
794 return 0;
795
796err_out_unmap:
797 pci_iounmap(pdev, ioaddr);
798err_out_free_res:
799 pci_release_regions(pdev);
800err_out_free_netdev:
801 free_netdev(dev);
802err_out:
803 return rc;
804}
805
806static int alloc_ring(struct net_device* dev)
807{
808 struct rhine_private *rp = netdev_priv(dev);
809 void *ring;
810 dma_addr_t ring_dma;
811
812 ring = pci_alloc_consistent(rp->pdev,
813 RX_RING_SIZE * sizeof(struct rx_desc) +
814 TX_RING_SIZE * sizeof(struct tx_desc),
815 &ring_dma);
816 if (!ring) {
817 printk(KERN_ERR "Could not allocate DMA memory.\n");
818 return -ENOMEM;
819 }
820 if (rp->quirks & rqRhineI) {
821 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
822 PKT_BUF_SZ * TX_RING_SIZE,
823 &rp->tx_bufs_dma);
824 if (rp->tx_bufs == NULL) {
825 pci_free_consistent(rp->pdev,
826 RX_RING_SIZE * sizeof(struct rx_desc) +
827 TX_RING_SIZE * sizeof(struct tx_desc),
828 ring, ring_dma);
829 return -ENOMEM;
830 }
831 }
832
833 rp->rx_ring = ring;
834 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
835 rp->rx_ring_dma = ring_dma;
836 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
837
838 return 0;
839}
840
841static void free_ring(struct net_device* dev)
842{
843 struct rhine_private *rp = netdev_priv(dev);
844
845 pci_free_consistent(rp->pdev,
846 RX_RING_SIZE * sizeof(struct rx_desc) +
847 TX_RING_SIZE * sizeof(struct tx_desc),
848 rp->rx_ring, rp->rx_ring_dma);
849 rp->tx_ring = NULL;
850
851 if (rp->tx_bufs)
852 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
853 rp->tx_bufs, rp->tx_bufs_dma);
854
855 rp->tx_bufs = NULL;
856
857}
858
859static void alloc_rbufs(struct net_device *dev)
860{
861 struct rhine_private *rp = netdev_priv(dev);
862 dma_addr_t next;
863 int i;
864
865 rp->dirty_rx = rp->cur_rx = 0;
866
867 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
868 rp->rx_head_desc = &rp->rx_ring[0];
869 next = rp->rx_ring_dma;
870
871 /* Init the ring entries */
872 for (i = 0; i < RX_RING_SIZE; i++) {
873 rp->rx_ring[i].rx_status = 0;
874 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
875 next += sizeof(struct rx_desc);
876 rp->rx_ring[i].next_desc = cpu_to_le32(next);
877 rp->rx_skbuff[i] = NULL;
878 }
879 /* Mark the last entry as wrapping the ring. */
880 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
881
882 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
883 for (i = 0; i < RX_RING_SIZE; i++) {
884 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
885 rp->rx_skbuff[i] = skb;
886 if (skb == NULL)
887 break;
888 skb->dev = dev; /* Mark as being used by this device. */
889
890 rp->rx_skbuff_dma[i] =
David S. Miller689be432005-06-28 15:25:31 -0700891 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 PCI_DMA_FROMDEVICE);
893
894 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
895 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
896 }
897 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
898}
899
900static void free_rbufs(struct net_device* dev)
901{
902 struct rhine_private *rp = netdev_priv(dev);
903 int i;
904
905 /* Free all the skbuffs in the Rx queue. */
906 for (i = 0; i < RX_RING_SIZE; i++) {
907 rp->rx_ring[i].rx_status = 0;
908 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
909 if (rp->rx_skbuff[i]) {
910 pci_unmap_single(rp->pdev,
911 rp->rx_skbuff_dma[i],
912 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
913 dev_kfree_skb(rp->rx_skbuff[i]);
914 }
915 rp->rx_skbuff[i] = NULL;
916 }
917}
918
919static void alloc_tbufs(struct net_device* dev)
920{
921 struct rhine_private *rp = netdev_priv(dev);
922 dma_addr_t next;
923 int i;
924
925 rp->dirty_tx = rp->cur_tx = 0;
926 next = rp->tx_ring_dma;
927 for (i = 0; i < TX_RING_SIZE; i++) {
928 rp->tx_skbuff[i] = NULL;
929 rp->tx_ring[i].tx_status = 0;
930 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
931 next += sizeof(struct tx_desc);
932 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +0200933 if (rp->quirks & rqRhineI)
934 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 }
936 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
937
938}
939
940static void free_tbufs(struct net_device* dev)
941{
942 struct rhine_private *rp = netdev_priv(dev);
943 int i;
944
945 for (i = 0; i < TX_RING_SIZE; i++) {
946 rp->tx_ring[i].tx_status = 0;
947 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
948 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
949 if (rp->tx_skbuff[i]) {
950 if (rp->tx_skbuff_dma[i]) {
951 pci_unmap_single(rp->pdev,
952 rp->tx_skbuff_dma[i],
953 rp->tx_skbuff[i]->len,
954 PCI_DMA_TODEVICE);
955 }
956 dev_kfree_skb(rp->tx_skbuff[i]);
957 }
958 rp->tx_skbuff[i] = NULL;
959 rp->tx_buf[i] = NULL;
960 }
961}
962
963static void rhine_check_media(struct net_device *dev, unsigned int init_media)
964{
965 struct rhine_private *rp = netdev_priv(dev);
966 void __iomem *ioaddr = rp->base;
967
968 mii_check_media(&rp->mii_if, debug, init_media);
969
970 if (rp->mii_if.full_duplex)
971 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
972 ioaddr + ChipCmd1);
973 else
974 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
975 ioaddr + ChipCmd1);
Roger Luethi00b428c2006-03-28 20:53:56 +0200976 if (debug > 1)
977 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
978 rp->mii_if.force_media, netif_carrier_ok(dev));
979}
980
981/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -0700982static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +0200983{
984 if (mii->force_media) {
985 /* autoneg is off: Link is always assumed to be up */
986 if (!netif_carrier_ok(mii->dev))
987 netif_carrier_on(mii->dev);
988 }
989 else /* Let MMI library update carrier status */
990 rhine_check_media(mii->dev, 0);
991 if (debug > 1)
992 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
993 mii->dev->name, mii->force_media,
994 netif_carrier_ok(mii->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
997static void init_registers(struct net_device *dev)
998{
999 struct rhine_private *rp = netdev_priv(dev);
1000 void __iomem *ioaddr = rp->base;
1001 int i;
1002
1003 for (i = 0; i < 6; i++)
1004 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1005
1006 /* Initialize other registers. */
1007 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1008 /* Configure initial FIFO thresholds. */
1009 iowrite8(0x20, ioaddr + TxConfig);
1010 rp->tx_thresh = 0x20;
1011 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1012
1013 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1014 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1015
1016 rhine_set_rx_mode(dev);
1017
1018 /* Enable interrupts by setting the interrupt mask. */
1019 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1020 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1021 IntrTxDone | IntrTxError | IntrTxUnderrun |
1022 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1023 ioaddr + IntrEnable);
1024
1025 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1026 ioaddr + ChipCmd);
1027 rhine_check_media(dev, 1);
1028}
1029
1030/* Enable MII link status auto-polling (required for IntrLinkChange) */
1031static void rhine_enable_linkmon(void __iomem *ioaddr)
1032{
1033 iowrite8(0, ioaddr + MIICmd);
1034 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1035 iowrite8(0x80, ioaddr + MIICmd);
1036
1037 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1038
1039 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1040}
1041
1042/* Disable MII link status auto-polling (required for MDIO access) */
1043static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1044{
1045 iowrite8(0, ioaddr + MIICmd);
1046
1047 if (quirks & rqRhineI) {
1048 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1049
John W. Linville38bb6b22006-05-19 10:51:21 -04001050 /* Can be called from ISR. Evil. */
1051 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
1053 /* 0x80 must be set immediately before turning it off */
1054 iowrite8(0x80, ioaddr + MIICmd);
1055
1056 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1057
1058 /* Heh. Now clear 0x80 again. */
1059 iowrite8(0, ioaddr + MIICmd);
1060 }
1061 else
1062 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1063}
1064
1065/* Read and write over the MII Management Data I/O (MDIO) interface. */
1066
1067static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1068{
1069 struct rhine_private *rp = netdev_priv(dev);
1070 void __iomem *ioaddr = rp->base;
1071 int result;
1072
1073 rhine_disable_linkmon(ioaddr, rp->quirks);
1074
1075 /* rhine_disable_linkmon already cleared MIICmd */
1076 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1077 iowrite8(regnum, ioaddr + MIIRegAddr);
1078 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1079 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1080 result = ioread16(ioaddr + MIIData);
1081
1082 rhine_enable_linkmon(ioaddr);
1083 return result;
1084}
1085
1086static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1087{
1088 struct rhine_private *rp = netdev_priv(dev);
1089 void __iomem *ioaddr = rp->base;
1090
1091 rhine_disable_linkmon(ioaddr, rp->quirks);
1092
1093 /* rhine_disable_linkmon already cleared MIICmd */
1094 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1095 iowrite8(regnum, ioaddr + MIIRegAddr);
1096 iowrite16(value, ioaddr + MIIData);
1097 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1098 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1099
1100 rhine_enable_linkmon(ioaddr);
1101}
1102
1103static int rhine_open(struct net_device *dev)
1104{
1105 struct rhine_private *rp = netdev_priv(dev);
1106 void __iomem *ioaddr = rp->base;
1107 int rc;
1108
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001109 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 dev);
1111 if (rc)
1112 return rc;
1113
1114 if (debug > 1)
1115 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1116 dev->name, rp->pdev->irq);
1117
1118 rc = alloc_ring(dev);
1119 if (rc) {
1120 free_irq(rp->pdev->irq, dev);
1121 return rc;
1122 }
1123 alloc_rbufs(dev);
1124 alloc_tbufs(dev);
1125 rhine_chip_reset(dev);
1126 init_registers(dev);
1127 if (debug > 2)
1128 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1129 "MII status: %4.4x.\n",
1130 dev->name, ioread16(ioaddr + ChipCmd),
1131 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1132
1133 netif_start_queue(dev);
1134
1135 return 0;
1136}
1137
1138static void rhine_tx_timeout(struct net_device *dev)
1139{
1140 struct rhine_private *rp = netdev_priv(dev);
1141 void __iomem *ioaddr = rp->base;
1142
1143 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1144 "%4.4x, resetting...\n",
1145 dev->name, ioread16(ioaddr + IntrStatus),
1146 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1147
1148 /* protect against concurrent rx interrupts */
1149 disable_irq(rp->pdev->irq);
1150
1151 spin_lock(&rp->lock);
1152
1153 /* clear all descriptors */
1154 free_tbufs(dev);
1155 free_rbufs(dev);
1156 alloc_tbufs(dev);
1157 alloc_rbufs(dev);
1158
1159 /* Reinitialize the hardware. */
1160 rhine_chip_reset(dev);
1161 init_registers(dev);
1162
1163 spin_unlock(&rp->lock);
1164 enable_irq(rp->pdev->irq);
1165
1166 dev->trans_start = jiffies;
1167 rp->stats.tx_errors++;
1168 netif_wake_queue(dev);
1169}
1170
1171static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1172{
1173 struct rhine_private *rp = netdev_priv(dev);
1174 void __iomem *ioaddr = rp->base;
1175 unsigned entry;
1176
1177 /* Caution: the write order is important here, set the field
1178 with the "ownership" bits last. */
1179
1180 /* Calculate the next Tx descriptor entry. */
1181 entry = rp->cur_tx % TX_RING_SIZE;
1182
Herbert Xu5b057c62006-06-23 02:06:41 -07001183 if (skb_padto(skb, ETH_ZLEN))
1184 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 rp->tx_skbuff[entry] = skb;
1187
1188 if ((rp->quirks & rqRhineI) &&
1189 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1190 /* Must use alignment buffer. */
1191 if (skb->len > PKT_BUF_SZ) {
1192 /* packet too long, drop it */
1193 dev_kfree_skb(skb);
1194 rp->tx_skbuff[entry] = NULL;
1195 rp->stats.tx_dropped++;
1196 return 0;
1197 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001198
1199 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001201 if (skb->len < ETH_ZLEN)
1202 memset(rp->tx_buf[entry] + skb->len, 0,
1203 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 rp->tx_skbuff_dma[entry] = 0;
1205 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1206 (rp->tx_buf[entry] -
1207 rp->tx_bufs));
1208 } else {
1209 rp->tx_skbuff_dma[entry] =
1210 pci_map_single(rp->pdev, skb->data, skb->len,
1211 PCI_DMA_TODEVICE);
1212 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1213 }
1214
1215 rp->tx_ring[entry].desc_length =
1216 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1217
1218 /* lock eth irq */
1219 spin_lock_irq(&rp->lock);
1220 wmb();
1221 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1222 wmb();
1223
1224 rp->cur_tx++;
1225
1226 /* Non-x86 Todo: explicitly flush cache lines here. */
1227
1228 /* Wake the potentially-idle transmit channel */
1229 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1230 ioaddr + ChipCmd1);
1231 IOSYNC;
1232
1233 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1234 netif_stop_queue(dev);
1235
1236 dev->trans_start = jiffies;
1237
1238 spin_unlock_irq(&rp->lock);
1239
1240 if (debug > 4) {
1241 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1242 dev->name, rp->cur_tx-1, entry);
1243 }
1244 return 0;
1245}
1246
1247/* The interrupt handler does all of the Rx thread work and cleans up
1248 after the Tx thread. */
1249static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1250{
1251 struct net_device *dev = dev_instance;
1252 struct rhine_private *rp = netdev_priv(dev);
1253 void __iomem *ioaddr = rp->base;
1254 u32 intr_status;
1255 int boguscnt = max_interrupt_work;
1256 int handled = 0;
1257
1258 while ((intr_status = get_intr_status(dev))) {
1259 handled = 1;
1260
1261 /* Acknowledge all of the current interrupt sources ASAP. */
1262 if (intr_status & IntrTxDescRace)
1263 iowrite8(0x08, ioaddr + IntrStatus2);
1264 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1265 IOSYNC;
1266
1267 if (debug > 4)
1268 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1269 dev->name, intr_status);
1270
1271 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1272 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1273 rhine_rx(dev);
1274
1275 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1276 if (intr_status & IntrTxErrSummary) {
1277 /* Avoid scavenging before Tx engine turned off */
1278 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1279 if (debug > 2 &&
1280 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1281 printk(KERN_WARNING "%s: "
1282 "rhine_interrupt() Tx engine"
1283 "still on.\n", dev->name);
1284 }
1285 rhine_tx(dev);
1286 }
1287
1288 /* Abnormal error summary/uncommon events handlers. */
1289 if (intr_status & (IntrPCIErr | IntrLinkChange |
1290 IntrStatsMax | IntrTxError | IntrTxAborted |
1291 IntrTxUnderrun | IntrTxDescRace))
1292 rhine_error(dev, intr_status);
1293
1294 if (--boguscnt < 0) {
1295 printk(KERN_WARNING "%s: Too much work at interrupt, "
1296 "status=%#8.8x.\n",
1297 dev->name, intr_status);
1298 break;
1299 }
1300 }
1301
1302 if (debug > 3)
1303 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1304 dev->name, ioread16(ioaddr + IntrStatus));
1305 return IRQ_RETVAL(handled);
1306}
1307
1308/* This routine is logically part of the interrupt handler, but isolated
1309 for clarity. */
1310static void rhine_tx(struct net_device *dev)
1311{
1312 struct rhine_private *rp = netdev_priv(dev);
1313 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1314
1315 spin_lock(&rp->lock);
1316
1317 /* find and cleanup dirty tx descriptors */
1318 while (rp->dirty_tx != rp->cur_tx) {
1319 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1320 if (debug > 6)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001321 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 entry, txstatus);
1323 if (txstatus & DescOwn)
1324 break;
1325 if (txstatus & 0x8000) {
1326 if (debug > 1)
1327 printk(KERN_DEBUG "%s: Transmit error, "
1328 "Tx status %8.8x.\n",
1329 dev->name, txstatus);
1330 rp->stats.tx_errors++;
1331 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1332 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1333 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1334 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1335 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1336 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1337 rp->stats.tx_fifo_errors++;
1338 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1339 break; /* Keep the skb - we try again */
1340 }
1341 /* Transmitter restarted in 'abnormal' handler. */
1342 } else {
1343 if (rp->quirks & rqRhineI)
1344 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1345 else
1346 rp->stats.collisions += txstatus & 0x0F;
1347 if (debug > 6)
1348 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1349 (txstatus >> 3) & 0xF,
1350 txstatus & 0xF);
1351 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1352 rp->stats.tx_packets++;
1353 }
1354 /* Free the original skb. */
1355 if (rp->tx_skbuff_dma[entry]) {
1356 pci_unmap_single(rp->pdev,
1357 rp->tx_skbuff_dma[entry],
1358 rp->tx_skbuff[entry]->len,
1359 PCI_DMA_TODEVICE);
1360 }
1361 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1362 rp->tx_skbuff[entry] = NULL;
1363 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1364 }
1365 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1366 netif_wake_queue(dev);
1367
1368 spin_unlock(&rp->lock);
1369}
1370
1371/* This routine is logically part of the interrupt handler, but isolated
1372 for clarity and better register allocation. */
1373static void rhine_rx(struct net_device *dev)
1374{
1375 struct rhine_private *rp = netdev_priv(dev);
1376 int entry = rp->cur_rx % RX_RING_SIZE;
1377 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1378
1379 if (debug > 4) {
1380 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1381 dev->name, entry,
1382 le32_to_cpu(rp->rx_head_desc->rx_status));
1383 }
1384
1385 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1386 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1387 struct rx_desc *desc = rp->rx_head_desc;
1388 u32 desc_status = le32_to_cpu(desc->rx_status);
1389 int data_size = desc_status >> 16;
1390
1391 if (debug > 4)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001392 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 desc_status);
1394 if (--boguscnt < 0)
1395 break;
1396 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1397 if ((desc_status & RxWholePkt) != RxWholePkt) {
1398 printk(KERN_WARNING "%s: Oversized Ethernet "
1399 "frame spanned multiple buffers, entry "
1400 "%#x length %d status %8.8x!\n",
1401 dev->name, entry, data_size,
1402 desc_status);
1403 printk(KERN_WARNING "%s: Oversized Ethernet "
1404 "frame %p vs %p.\n", dev->name,
1405 rp->rx_head_desc, &rp->rx_ring[entry]);
1406 rp->stats.rx_length_errors++;
1407 } else if (desc_status & RxErr) {
1408 /* There was a error. */
1409 if (debug > 2)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001410 printk(KERN_DEBUG "rhine_rx() Rx "
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 "error was %8.8x.\n",
1412 desc_status);
1413 rp->stats.rx_errors++;
1414 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1415 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1416 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1417 if (desc_status & 0x0002) {
1418 /* this can also be updated outside the interrupt handler */
1419 spin_lock(&rp->lock);
1420 rp->stats.rx_crc_errors++;
1421 spin_unlock(&rp->lock);
1422 }
1423 }
1424 } else {
1425 struct sk_buff *skb;
1426 /* Length should omit the CRC */
1427 int pkt_len = data_size - 4;
1428
1429 /* Check if the packet is long enough to accept without
1430 copying to a minimally-sized skbuff. */
1431 if (pkt_len < rx_copybreak &&
1432 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1433 skb->dev = dev;
1434 skb_reserve(skb, 2); /* 16 byte align the IP header */
1435 pci_dma_sync_single_for_cpu(rp->pdev,
1436 rp->rx_skbuff_dma[entry],
1437 rp->rx_buf_sz,
1438 PCI_DMA_FROMDEVICE);
1439
1440 eth_copy_and_sum(skb,
David S. Miller689be432005-06-28 15:25:31 -07001441 rp->rx_skbuff[entry]->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 pkt_len, 0);
1443 skb_put(skb, pkt_len);
1444 pci_dma_sync_single_for_device(rp->pdev,
1445 rp->rx_skbuff_dma[entry],
1446 rp->rx_buf_sz,
1447 PCI_DMA_FROMDEVICE);
1448 } else {
1449 skb = rp->rx_skbuff[entry];
1450 if (skb == NULL) {
1451 printk(KERN_ERR "%s: Inconsistent Rx "
1452 "descriptor chain.\n",
1453 dev->name);
1454 break;
1455 }
1456 rp->rx_skbuff[entry] = NULL;
1457 skb_put(skb, pkt_len);
1458 pci_unmap_single(rp->pdev,
1459 rp->rx_skbuff_dma[entry],
1460 rp->rx_buf_sz,
1461 PCI_DMA_FROMDEVICE);
1462 }
1463 skb->protocol = eth_type_trans(skb, dev);
1464 netif_rx(skb);
1465 dev->last_rx = jiffies;
1466 rp->stats.rx_bytes += pkt_len;
1467 rp->stats.rx_packets++;
1468 }
1469 entry = (++rp->cur_rx) % RX_RING_SIZE;
1470 rp->rx_head_desc = &rp->rx_ring[entry];
1471 }
1472
1473 /* Refill the Rx ring buffers. */
1474 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1475 struct sk_buff *skb;
1476 entry = rp->dirty_rx % RX_RING_SIZE;
1477 if (rp->rx_skbuff[entry] == NULL) {
1478 skb = dev_alloc_skb(rp->rx_buf_sz);
1479 rp->rx_skbuff[entry] = skb;
1480 if (skb == NULL)
1481 break; /* Better luck next round. */
1482 skb->dev = dev; /* Mark as being used by this device. */
1483 rp->rx_skbuff_dma[entry] =
David S. Miller689be432005-06-28 15:25:31 -07001484 pci_map_single(rp->pdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 rp->rx_buf_sz,
1486 PCI_DMA_FROMDEVICE);
1487 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1488 }
1489 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1490 }
1491}
1492
1493/*
1494 * Clears the "tally counters" for CRC errors and missed frames(?).
1495 * It has been reported that some chips need a write of 0 to clear
1496 * these, for others the counters are set to 1 when written to and
1497 * instead cleared when read. So we clear them both ways ...
1498 */
1499static inline void clear_tally_counters(void __iomem *ioaddr)
1500{
1501 iowrite32(0, ioaddr + RxMissed);
1502 ioread16(ioaddr + RxCRCErrs);
1503 ioread16(ioaddr + RxMissed);
1504}
1505
1506static void rhine_restart_tx(struct net_device *dev) {
1507 struct rhine_private *rp = netdev_priv(dev);
1508 void __iomem *ioaddr = rp->base;
1509 int entry = rp->dirty_tx % TX_RING_SIZE;
1510 u32 intr_status;
1511
1512 /*
1513 * If new errors occured, we need to sort them out before doing Tx.
1514 * In that case the ISR will be back here RSN anyway.
1515 */
1516 intr_status = get_intr_status(dev);
1517
1518 if ((intr_status & IntrTxErrSummary) == 0) {
1519
1520 /* We know better than the chip where it should continue. */
1521 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1522 ioaddr + TxRingPtr);
1523
1524 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1525 ioaddr + ChipCmd);
1526 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1527 ioaddr + ChipCmd1);
1528 IOSYNC;
1529 }
1530 else {
1531 /* This should never happen */
1532 if (debug > 1)
1533 printk(KERN_WARNING "%s: rhine_restart_tx() "
1534 "Another error occured %8.8x.\n",
1535 dev->name, intr_status);
1536 }
1537
1538}
1539
1540static void rhine_error(struct net_device *dev, int intr_status)
1541{
1542 struct rhine_private *rp = netdev_priv(dev);
1543 void __iomem *ioaddr = rp->base;
1544
1545 spin_lock(&rp->lock);
1546
1547 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04001548 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 if (intr_status & IntrStatsMax) {
1550 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1551 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1552 clear_tally_counters(ioaddr);
1553 }
1554 if (intr_status & IntrTxAborted) {
1555 if (debug > 1)
1556 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1557 dev->name, intr_status);
1558 }
1559 if (intr_status & IntrTxUnderrun) {
1560 if (rp->tx_thresh < 0xE0)
1561 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1562 if (debug > 1)
1563 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1564 "threshold now %2.2x.\n",
1565 dev->name, rp->tx_thresh);
1566 }
1567 if (intr_status & IntrTxDescRace) {
1568 if (debug > 2)
1569 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1570 dev->name);
1571 }
1572 if ((intr_status & IntrTxError) &&
1573 (intr_status & (IntrTxAborted |
1574 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1575 if (rp->tx_thresh < 0xE0) {
1576 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1577 }
1578 if (debug > 1)
1579 printk(KERN_INFO "%s: Unspecified error. Tx "
1580 "threshold now %2.2x.\n",
1581 dev->name, rp->tx_thresh);
1582 }
1583 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1584 IntrTxError))
1585 rhine_restart_tx(dev);
1586
1587 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1588 IntrTxError | IntrTxAborted | IntrNormalSummary |
1589 IntrTxDescRace)) {
1590 if (debug > 1)
1591 printk(KERN_ERR "%s: Something Wicked happened! "
1592 "%8.8x.\n", dev->name, intr_status);
1593 }
1594
1595 spin_unlock(&rp->lock);
1596}
1597
1598static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1599{
1600 struct rhine_private *rp = netdev_priv(dev);
1601 void __iomem *ioaddr = rp->base;
1602 unsigned long flags;
1603
1604 spin_lock_irqsave(&rp->lock, flags);
1605 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1606 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1607 clear_tally_counters(ioaddr);
1608 spin_unlock_irqrestore(&rp->lock, flags);
1609
1610 return &rp->stats;
1611}
1612
1613static void rhine_set_rx_mode(struct net_device *dev)
1614{
1615 struct rhine_private *rp = netdev_priv(dev);
1616 void __iomem *ioaddr = rp->base;
1617 u32 mc_filter[2]; /* Multicast hash filter */
1618 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1619
1620 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1621 /* Unconditionally log net taps. */
1622 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1623 dev->name);
1624 rx_mode = 0x1C;
1625 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1626 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1627 } else if ((dev->mc_count > multicast_filter_limit)
1628 || (dev->flags & IFF_ALLMULTI)) {
1629 /* Too many to match, or accept all multicasts. */
1630 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1631 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1632 rx_mode = 0x0C;
1633 } else {
1634 struct dev_mc_list *mclist;
1635 int i;
1636 memset(mc_filter, 0, sizeof(mc_filter));
1637 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1638 i++, mclist = mclist->next) {
1639 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1640
1641 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1642 }
1643 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1644 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1645 rx_mode = 0x0C;
1646 }
1647 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1648}
1649
1650static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1651{
1652 struct rhine_private *rp = netdev_priv(dev);
1653
1654 strcpy(info->driver, DRV_NAME);
1655 strcpy(info->version, DRV_VERSION);
1656 strcpy(info->bus_info, pci_name(rp->pdev));
1657}
1658
1659static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1660{
1661 struct rhine_private *rp = netdev_priv(dev);
1662 int rc;
1663
1664 spin_lock_irq(&rp->lock);
1665 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1666 spin_unlock_irq(&rp->lock);
1667
1668 return rc;
1669}
1670
1671static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1672{
1673 struct rhine_private *rp = netdev_priv(dev);
1674 int rc;
1675
1676 spin_lock_irq(&rp->lock);
1677 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1678 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02001679 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 return rc;
1682}
1683
1684static int netdev_nway_reset(struct net_device *dev)
1685{
1686 struct rhine_private *rp = netdev_priv(dev);
1687
1688 return mii_nway_restart(&rp->mii_if);
1689}
1690
1691static u32 netdev_get_link(struct net_device *dev)
1692{
1693 struct rhine_private *rp = netdev_priv(dev);
1694
1695 return mii_link_ok(&rp->mii_if);
1696}
1697
1698static u32 netdev_get_msglevel(struct net_device *dev)
1699{
1700 return debug;
1701}
1702
1703static void netdev_set_msglevel(struct net_device *dev, u32 value)
1704{
1705 debug = value;
1706}
1707
1708static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1709{
1710 struct rhine_private *rp = netdev_priv(dev);
1711
1712 if (!(rp->quirks & rqWOL))
1713 return;
1714
1715 spin_lock_irq(&rp->lock);
1716 wol->supported = WAKE_PHY | WAKE_MAGIC |
1717 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1718 wol->wolopts = rp->wolopts;
1719 spin_unlock_irq(&rp->lock);
1720}
1721
1722static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1723{
1724 struct rhine_private *rp = netdev_priv(dev);
1725 u32 support = WAKE_PHY | WAKE_MAGIC |
1726 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1727
1728 if (!(rp->quirks & rqWOL))
1729 return -EINVAL;
1730
1731 if (wol->wolopts & ~support)
1732 return -EINVAL;
1733
1734 spin_lock_irq(&rp->lock);
1735 rp->wolopts = wol->wolopts;
1736 spin_unlock_irq(&rp->lock);
1737
1738 return 0;
1739}
1740
1741static struct ethtool_ops netdev_ethtool_ops = {
1742 .get_drvinfo = netdev_get_drvinfo,
1743 .get_settings = netdev_get_settings,
1744 .set_settings = netdev_set_settings,
1745 .nway_reset = netdev_nway_reset,
1746 .get_link = netdev_get_link,
1747 .get_msglevel = netdev_get_msglevel,
1748 .set_msglevel = netdev_set_msglevel,
1749 .get_wol = rhine_get_wol,
1750 .set_wol = rhine_set_wol,
1751 .get_sg = ethtool_op_get_sg,
1752 .get_tx_csum = ethtool_op_get_tx_csum,
John W. Linvilleb81e8e12005-09-12 10:48:58 -04001753 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754};
1755
1756static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1757{
1758 struct rhine_private *rp = netdev_priv(dev);
1759 int rc;
1760
1761 if (!netif_running(dev))
1762 return -EINVAL;
1763
1764 spin_lock_irq(&rp->lock);
1765 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1766 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02001767 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
1769 return rc;
1770}
1771
1772static int rhine_close(struct net_device *dev)
1773{
1774 struct rhine_private *rp = netdev_priv(dev);
1775 void __iomem *ioaddr = rp->base;
1776
1777 spin_lock_irq(&rp->lock);
1778
1779 netif_stop_queue(dev);
1780
1781 if (debug > 1)
1782 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1783 "status was %4.4x.\n",
1784 dev->name, ioread16(ioaddr + ChipCmd));
1785
1786 /* Switch to loopback mode to avoid hardware races. */
1787 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1788
1789 /* Disable interrupts by clearing the interrupt mask. */
1790 iowrite16(0x0000, ioaddr + IntrEnable);
1791
1792 /* Stop the chip's Tx and Rx processes. */
1793 iowrite16(CmdStop, ioaddr + ChipCmd);
1794
1795 spin_unlock_irq(&rp->lock);
1796
1797 free_irq(rp->pdev->irq, dev);
1798 free_rbufs(dev);
1799 free_tbufs(dev);
1800 free_ring(dev);
1801
1802 return 0;
1803}
1804
1805
1806static void __devexit rhine_remove_one(struct pci_dev *pdev)
1807{
1808 struct net_device *dev = pci_get_drvdata(pdev);
1809 struct rhine_private *rp = netdev_priv(dev);
1810
1811 unregister_netdev(dev);
1812
1813 pci_iounmap(pdev, rp->base);
1814 pci_release_regions(pdev);
1815
1816 free_netdev(dev);
1817 pci_disable_device(pdev);
1818 pci_set_drvdata(pdev, NULL);
1819}
1820
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001821static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 struct net_device *dev = pci_get_drvdata(pdev);
1824 struct rhine_private *rp = netdev_priv(dev);
1825 void __iomem *ioaddr = rp->base;
1826
1827 if (!(rp->quirks & rqWOL))
1828 return; /* Nothing to do for non-WOL adapters */
1829
1830 rhine_power_init(dev);
1831
1832 /* Make sure we use pattern 0, 1 and not 4, 5 */
1833 if (rp->quirks & rq6patterns)
1834 iowrite8(0x04, ioaddr + 0xA7);
1835
1836 if (rp->wolopts & WAKE_MAGIC) {
1837 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1838 /*
1839 * Turn EEPROM-controlled wake-up back on -- some hardware may
1840 * not cooperate otherwise.
1841 */
1842 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1843 }
1844
1845 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1846 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1847
1848 if (rp->wolopts & WAKE_PHY)
1849 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1850
1851 if (rp->wolopts & WAKE_UCAST)
1852 iowrite8(WOLucast, ioaddr + WOLcrSet);
1853
1854 if (rp->wolopts) {
1855 /* Enable legacy WOL (for old motherboards) */
1856 iowrite8(0x01, ioaddr + PwcfgSet);
1857 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1858 }
1859
1860 /* Hit power state D3 (sleep) */
1861 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1862
1863 /* TODO: Check use of pci_enable_wake() */
1864
1865}
1866
1867#ifdef CONFIG_PM
1868static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1869{
1870 struct net_device *dev = pci_get_drvdata(pdev);
1871 struct rhine_private *rp = netdev_priv(dev);
1872 unsigned long flags;
1873
1874 if (!netif_running(dev))
1875 return 0;
1876
1877 netif_device_detach(dev);
1878 pci_save_state(pdev);
1879
1880 spin_lock_irqsave(&rp->lock, flags);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001881 rhine_shutdown(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 spin_unlock_irqrestore(&rp->lock, flags);
1883
1884 free_irq(dev->irq, dev);
1885 return 0;
1886}
1887
1888static int rhine_resume(struct pci_dev *pdev)
1889{
1890 struct net_device *dev = pci_get_drvdata(pdev);
1891 struct rhine_private *rp = netdev_priv(dev);
1892 unsigned long flags;
1893 int ret;
1894
1895 if (!netif_running(dev))
1896 return 0;
1897
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001898 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1900
1901 ret = pci_set_power_state(pdev, PCI_D0);
1902 if (debug > 1)
1903 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1904 dev->name, ret ? "failed" : "succeeded", ret);
1905
1906 pci_restore_state(pdev);
1907
1908 spin_lock_irqsave(&rp->lock, flags);
1909#ifdef USE_MMIO
1910 enable_mmio(rp->pioaddr, rp->quirks);
1911#endif
1912 rhine_power_init(dev);
1913 free_tbufs(dev);
1914 free_rbufs(dev);
1915 alloc_tbufs(dev);
1916 alloc_rbufs(dev);
1917 init_registers(dev);
1918 spin_unlock_irqrestore(&rp->lock, flags);
1919
1920 netif_device_attach(dev);
1921
1922 return 0;
1923}
1924#endif /* CONFIG_PM */
1925
1926static struct pci_driver rhine_driver = {
1927 .name = DRV_NAME,
1928 .id_table = rhine_pci_tbl,
1929 .probe = rhine_init_one,
1930 .remove = __devexit_p(rhine_remove_one),
1931#ifdef CONFIG_PM
1932 .suspend = rhine_suspend,
1933 .resume = rhine_resume,
1934#endif /* CONFIG_PM */
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001935 .shutdown = rhine_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936};
1937
1938
1939static int __init rhine_init(void)
1940{
1941/* when a module, this is printed whether or not devices are found in probe */
1942#ifdef MODULE
1943 printk(version);
1944#endif
1945 return pci_module_init(&rhine_driver);
1946}
1947
1948
1949static void __exit rhine_cleanup(void)
1950{
1951 pci_unregister_driver(&rhine_driver);
1952}
1953
1954
1955module_init(rhine_init);
1956module_exit(rhine_cleanup);