blob: fee824002d10aed5e571291a392825483106c04f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
32#define DRV_NAME "via-rhine"
Roger Luethi633949a2006-08-14 23:00:17 -070033#define DRV_VERSION "1.4.1"
34#define DRV_RELDATE "July-24-2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36
37/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45static int rx_copybreak;
46
47/*
48 * In case you are looking for 'options[]' or 'full_duplex[]', they
49 * are gone. Use ethtool(8) instead.
50 */
51
52/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
53 The Rhine has a 64 element 8390-like hash table. */
54static const int multicast_filter_limit = 32;
55
56
57/* Operational parameters that are set at compile time. */
58
59/* Keep the ring sizes a power of two for compile efficiency.
60 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
61 Making the Tx ring too large decreases the effectiveness of channel
62 bonding and packet priority.
63 There are no ill effects from too-large receive rings. */
64#define TX_RING_SIZE 16
65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070066#ifdef CONFIG_VIA_RHINE_NAPI
67#define RX_RING_SIZE 64
68#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#define RX_RING_SIZE 16
Roger Luethi633949a2006-08-14 23:00:17 -070070#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72
73/* Operational parameters that usually are not changed. */
74
75/* Time in jiffies before concluding the transmitter is hung. */
76#define TX_TIMEOUT (2*HZ)
77
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80#include <linux/module.h>
81#include <linux/moduleparam.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -040090#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
93#include <linux/skbuff.h>
94#include <linux/init.h>
95#include <linux/delay.h>
96#include <linux/mii.h>
97#include <linux/ethtool.h>
98#include <linux/crc32.h>
99#include <linux/bitops.h>
100#include <asm/processor.h> /* Processor type for cache alignment. */
101#include <asm/io.h>
102#include <asm/irq.h>
103#include <asm/uaccess.h>
104
105/* These identify the driver base version and may not be removed. */
106static char version[] __devinitdata =
107KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
108
109/* This driver was written to use PCI memory space. Some early versions
110 of the Rhine may only work correctly with I/O space accesses. */
111#ifdef CONFIG_VIA_RHINE_MMIO
112#define USE_MMIO
113#else
114#endif
115
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
118MODULE_LICENSE("GPL");
119
120module_param(max_interrupt_work, int, 0);
121module_param(debug, int, 0);
122module_param(rx_copybreak, int, 0);
123MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
124MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
125MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
126
127/*
128 Theory of Operation
129
130I. Board Compatibility
131
132This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
133controller.
134
135II. Board-specific settings
136
137Boards with this chip are functional only in a bus-master PCI slot.
138
139Many operational settings are loaded from the EEPROM to the Config word at
140offset 0x78. For most of these settings, this driver assumes that they are
141correct.
142If this driver is compiled to use PCI memory space operations the EEPROM
143must be configured to enable memory ops.
144
145III. Driver operation
146
147IIIa. Ring buffers
148
149This driver uses two statically allocated fixed-size descriptor lists
150formed into rings by a branch from the final descriptor to the beginning of
151the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
152
153IIIb/c. Transmit/Receive Structure
154
155This driver attempts to use a zero-copy receive and transmit scheme.
156
157Alas, all data buffers are required to start on a 32 bit boundary, so
158the driver must often copy transmit packets into bounce buffers.
159
160The driver allocates full frame size skbuffs for the Rx ring buffers at
161open() time and passes the skb->data field to the chip as receive data
162buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
163a fresh skbuff is allocated and the frame is copied to the new skbuff.
164When the incoming frame is larger, the skbuff is passed directly up the
165protocol stack. Buffers consumed this way are replaced by newly allocated
166skbuffs in the last phase of rhine_rx().
167
168The RX_COPYBREAK value is chosen to trade-off the memory wasted by
169using a full-sized skbuff for small frames vs. the copying costs of larger
170frames. New boards are typically used in generously configured machines
171and the underfilled buffers have negligible impact compared to the benefit of
172a single allocation size, so the default value of zero results in never
173copying packets. When copying is done, the cost is usually mitigated by using
174a combined copy/checksum routine. Copying also preloads the cache, which is
175most useful with small frames.
176
177Since the VIA chips are only able to transfer data to buffers on 32 bit
178boundaries, the IP header at offset 14 in an ethernet frame isn't
179longword aligned for further processing. Copying these unaligned buffers
180has the beneficial effect of 16-byte aligning the IP header.
181
182IIId. Synchronization
183
184The driver runs as two independent, single-threaded flows of control. One
185is the send-packet routine, which enforces single-threaded use by the
186dev->priv->lock spinlock. The other thread is the interrupt handler, which
187is single threaded by the hardware and interrupt handling software.
188
189The send packet thread has partial control over the Tx ring. It locks the
190dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
191is not available it stops the transmit queue by calling netif_stop_queue.
192
193The interrupt handler has exclusive control over the Rx ring and records stats
194from the Tx ring. After reaping the stats, it marks the Tx queue entry as
195empty by incrementing the dirty_tx mark. If at least half of the entries in
196the Rx ring are available the transmit queue is woken up if it was stopped.
197
198IV. Notes
199
200IVb. References
201
202Preliminary VT86C100A manual from http://www.via.com.tw/
203http://www.scyld.com/expert/100mbps.html
204http://www.scyld.com/expert/NWay.html
205ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
206ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
207
208
209IVc. Errata
210
211The VT86C100A manual is not reliable information.
212The 3043 chip does not handle unaligned transmit or receive buffers, resulting
213in significant performance degradation for bounce buffer copies on transmit
214and unaligned IP headers on receive.
215The chip does not pad to minimum transmit length.
216
217*/
218
219
220/* This table drives the PCI probe routines. It's mostly boilerplate in all
221 of the drivers, and will likely be provided by some future kernel.
222 Note the matching code -- the first table entry matchs all 56** cards but
223 second only the 1234 card.
224*/
225
226enum rhine_revs {
227 VT86C100A = 0x00,
228 VTunknown0 = 0x20,
229 VT6102 = 0x40,
230 VT8231 = 0x50, /* Integrated MAC */
231 VT8233 = 0x60, /* Integrated MAC */
232 VT8235 = 0x74, /* Integrated MAC */
233 VT8237 = 0x78, /* Integrated MAC */
234 VTunknown1 = 0x7C,
235 VT6105 = 0x80,
236 VT6105_B0 = 0x83,
237 VT6105L = 0x8A,
238 VT6107 = 0x8C,
239 VTunknown2 = 0x8E,
240 VT6105M = 0x90, /* Management adapter */
241};
242
243enum rhine_quirks {
244 rqWOL = 0x0001, /* Wake-On-LAN support */
245 rqForceReset = 0x0002,
246 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
247 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
248 rqRhineI = 0x0100, /* See comment below */
249};
250/*
251 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
252 * MMIO as well as for the collision counter and the Tx FIFO underflow
253 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
254 */
255
256/* Beware of PCI posted writes */
257#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
258
Jeff Garzik46009c82006-06-27 09:12:38 -0400259static const struct pci_device_id rhine_pci_tbl[] = {
260 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
261 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
262 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
263 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 { } /* terminate list */
265};
266MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
267
268
269/* Offsets to the device registers. */
270enum register_offsets {
271 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
272 ChipCmd1=0x09,
273 IntrStatus=0x0C, IntrEnable=0x0E,
274 MulticastFilter0=0x10, MulticastFilter1=0x14,
275 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
276 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
277 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
278 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
279 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
280 StickyHW=0x83, IntrStatus2=0x84,
281 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
282 WOLcrClr1=0xA6, WOLcgClr=0xA7,
283 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
284};
285
286/* Bits in ConfigD */
287enum backoff_bits {
288 BackOptional=0x01, BackModify=0x02,
289 BackCaptureEffect=0x04, BackRandom=0x08
290};
291
292#ifdef USE_MMIO
293/* Registers we check that mmio and reg are the same. */
294static const int mmio_verify_registers[] = {
295 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
296 0
297};
298#endif
299
300/* Bits in the interrupt status/mask registers. */
301enum intr_status_bits {
302 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
303 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
304 IntrPCIErr=0x0040,
305 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
306 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
307 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
308 IntrRxWakeUp=0x8000,
309 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
310 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
311 IntrTxErrSummary=0x082218,
312};
313
314/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
315enum wol_bits {
316 WOLucast = 0x10,
317 WOLmagic = 0x20,
318 WOLbmcast = 0x30,
319 WOLlnkon = 0x40,
320 WOLlnkoff = 0x80,
321};
322
323/* The Rx and Tx buffer descriptors. */
324struct rx_desc {
325 s32 rx_status;
326 u32 desc_length; /* Chain flag, Buffer/frame length */
327 u32 addr;
328 u32 next_desc;
329};
330struct tx_desc {
331 s32 tx_status;
332 u32 desc_length; /* Chain flag, Tx Config, Frame length */
333 u32 addr;
334 u32 next_desc;
335};
336
337/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
338#define TXDESC 0x00e08000
339
340enum rx_status_bits {
341 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
342};
343
344/* Bits in *_desc.*_status */
345enum desc_status_bits {
346 DescOwn=0x80000000
347};
348
349/* Bits in ChipCmd. */
350enum chip_cmd_bits {
351 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
352 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
353 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
354 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
355};
356
357struct rhine_private {
358 /* Descriptor rings */
359 struct rx_desc *rx_ring;
360 struct tx_desc *tx_ring;
361 dma_addr_t rx_ring_dma;
362 dma_addr_t tx_ring_dma;
363
364 /* The addresses of receive-in-place skbuffs. */
365 struct sk_buff *rx_skbuff[RX_RING_SIZE];
366 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
367
368 /* The saved address of a sent-in-place packet/buffer, for later free(). */
369 struct sk_buff *tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
371
Roger Luethi4be5de22006-04-04 20:49:16 +0200372 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 unsigned char *tx_buf[TX_RING_SIZE];
374 unsigned char *tx_bufs;
375 dma_addr_t tx_bufs_dma;
376
377 struct pci_dev *pdev;
378 long pioaddr;
379 struct net_device_stats stats;
380 spinlock_t lock;
381
382 /* Frequently used values: keep some adjacent for cache effect. */
383 u32 quirks;
384 struct rx_desc *rx_head_desc;
385 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
386 unsigned int cur_tx, dirty_tx;
387 unsigned int rx_buf_sz; /* Based on MTU+slack. */
388 u8 wolopts;
389
390 u8 tx_thresh, rx_thresh;
391
392 struct mii_if_info mii_if;
393 void __iomem *base;
394};
395
396static int mdio_read(struct net_device *dev, int phy_id, int location);
397static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
398static int rhine_open(struct net_device *dev);
399static void rhine_tx_timeout(struct net_device *dev);
400static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
401static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
402static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700403static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static void rhine_error(struct net_device *dev, int intr_status);
405static void rhine_set_rx_mode(struct net_device *dev);
406static struct net_device_stats *rhine_get_stats(struct net_device *dev);
407static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
408static struct ethtool_ops netdev_ethtool_ops;
409static int rhine_close(struct net_device *dev);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -0700410static void rhine_shutdown (struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412#define RHINE_WAIT_FOR(condition) do { \
413 int i=1024; \
414 while (!(condition) && --i) \
415 ; \
416 if (debug > 1 && i < 512) \
417 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
418 DRV_NAME, 1024-i, __func__, __LINE__); \
419} while(0)
420
421static inline u32 get_intr_status(struct net_device *dev)
422{
423 struct rhine_private *rp = netdev_priv(dev);
424 void __iomem *ioaddr = rp->base;
425 u32 intr_status;
426
427 intr_status = ioread16(ioaddr + IntrStatus);
428 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
429 if (rp->quirks & rqStatusWBRace)
430 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
431 return intr_status;
432}
433
434/*
435 * Get power related registers into sane state.
436 * Notify user about past WOL event.
437 */
438static void rhine_power_init(struct net_device *dev)
439{
440 struct rhine_private *rp = netdev_priv(dev);
441 void __iomem *ioaddr = rp->base;
442 u16 wolstat;
443
444 if (rp->quirks & rqWOL) {
445 /* Make sure chip is in power state D0 */
446 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
447
448 /* Disable "force PME-enable" */
449 iowrite8(0x80, ioaddr + WOLcgClr);
450
451 /* Clear power-event config bits (WOL) */
452 iowrite8(0xFF, ioaddr + WOLcrClr);
453 /* More recent cards can manage two additional patterns */
454 if (rp->quirks & rq6patterns)
455 iowrite8(0x03, ioaddr + WOLcrClr1);
456
457 /* Save power-event status bits */
458 wolstat = ioread8(ioaddr + PwrcsrSet);
459 if (rp->quirks & rq6patterns)
460 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
461
462 /* Clear power-event status bits */
463 iowrite8(0xFF, ioaddr + PwrcsrClr);
464 if (rp->quirks & rq6patterns)
465 iowrite8(0x03, ioaddr + PwrcsrClr1);
466
467 if (wolstat) {
468 char *reason;
469 switch (wolstat) {
470 case WOLmagic:
471 reason = "Magic packet";
472 break;
473 case WOLlnkon:
474 reason = "Link went up";
475 break;
476 case WOLlnkoff:
477 reason = "Link went down";
478 break;
479 case WOLucast:
480 reason = "Unicast packet";
481 break;
482 case WOLbmcast:
483 reason = "Multicast/broadcast packet";
484 break;
485 default:
486 reason = "Unknown";
487 }
488 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
489 DRV_NAME, reason);
490 }
491 }
492}
493
494static void rhine_chip_reset(struct net_device *dev)
495{
496 struct rhine_private *rp = netdev_priv(dev);
497 void __iomem *ioaddr = rp->base;
498
499 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
500 IOSYNC;
501
502 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
503 printk(KERN_INFO "%s: Reset not complete yet. "
504 "Trying harder.\n", DRV_NAME);
505
506 /* Force reset */
507 if (rp->quirks & rqForceReset)
508 iowrite8(0x40, ioaddr + MiscCmd);
509
510 /* Reset can take somewhat longer (rare) */
511 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
512 }
513
514 if (debug > 1)
515 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
516 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
517 "failed" : "succeeded");
518}
519
520#ifdef USE_MMIO
521static void enable_mmio(long pioaddr, u32 quirks)
522{
523 int n;
524 if (quirks & rqRhineI) {
525 /* More recent docs say that this bit is reserved ... */
526 n = inb(pioaddr + ConfigA) | 0x20;
527 outb(n, pioaddr + ConfigA);
528 } else {
529 n = inb(pioaddr + ConfigD) | 0x80;
530 outb(n, pioaddr + ConfigD);
531 }
532}
533#endif
534
535/*
536 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
537 * (plus 0x6C for Rhine-I/II)
538 */
539static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
540{
541 struct rhine_private *rp = netdev_priv(dev);
542 void __iomem *ioaddr = rp->base;
543
544 outb(0x20, pioaddr + MACRegEEcsr);
545 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
546
547#ifdef USE_MMIO
548 /*
549 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
550 * MMIO. If reloading EEPROM was done first this could be avoided, but
551 * it is not known if that still works with the "win98-reboot" problem.
552 */
553 enable_mmio(pioaddr, rp->quirks);
554#endif
555
556 /* Turn off EEPROM-controlled wake-up (magic packet) */
557 if (rp->quirks & rqWOL)
558 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
559
560}
561
562#ifdef CONFIG_NET_POLL_CONTROLLER
563static void rhine_poll(struct net_device *dev)
564{
565 disable_irq(dev->irq);
566 rhine_interrupt(dev->irq, (void *)dev, NULL);
567 enable_irq(dev->irq);
568}
569#endif
570
Roger Luethi633949a2006-08-14 23:00:17 -0700571#ifdef CONFIG_VIA_RHINE_NAPI
572static int rhine_napipoll(struct net_device *dev, int *budget)
573{
574 struct rhine_private *rp = netdev_priv(dev);
575 void __iomem *ioaddr = rp->base;
576 int done, limit = min(dev->quota, *budget);
577
578 done = rhine_rx(dev, limit);
579 *budget -= done;
580 dev->quota -= done;
581
582 if (done < limit) {
583 netif_rx_complete(dev);
584
585 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
586 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
587 IntrTxDone | IntrTxError | IntrTxUnderrun |
588 IntrPCIErr | IntrStatsMax | IntrLinkChange,
589 ioaddr + IntrEnable);
590 return 0;
591 }
592 else
593 return 1;
594}
595#endif
596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597static void rhine_hw_init(struct net_device *dev, long pioaddr)
598{
599 struct rhine_private *rp = netdev_priv(dev);
600
601 /* Reset the chip to erase previous misconfiguration. */
602 rhine_chip_reset(dev);
603
604 /* Rhine-I needs extra time to recuperate before EEPROM reload */
605 if (rp->quirks & rqRhineI)
606 msleep(5);
607
608 /* Reload EEPROM controlled bytes cleared by soft reset */
609 rhine_reload_eeprom(pioaddr, dev);
610}
611
612static int __devinit rhine_init_one(struct pci_dev *pdev,
613 const struct pci_device_id *ent)
614{
615 struct net_device *dev;
616 struct rhine_private *rp;
617 int i, rc;
618 u8 pci_rev;
619 u32 quirks;
620 long pioaddr;
621 long memaddr;
622 void __iomem *ioaddr;
623 int io_size, phy_id;
624 const char *name;
625#ifdef USE_MMIO
626 int bar = 1;
627#else
628 int bar = 0;
629#endif
630
631/* when built into the kernel, we only print version if device is found */
632#ifndef MODULE
633 static int printed_version;
634 if (!printed_version++)
635 printk(version);
636#endif
637
638 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
639
640 io_size = 256;
641 phy_id = 0;
642 quirks = 0;
643 name = "Rhine";
644 if (pci_rev < VTunknown0) {
645 quirks = rqRhineI;
646 io_size = 128;
647 }
648 else if (pci_rev >= VT6102) {
649 quirks = rqWOL | rqForceReset;
650 if (pci_rev < VT6105) {
651 name = "Rhine II";
652 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
653 }
654 else {
655 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
656 if (pci_rev >= VT6105_B0)
657 quirks |= rq6patterns;
658 if (pci_rev < VT6105M)
659 name = "Rhine III";
660 else
661 name = "Rhine III (Management Adapter)";
662 }
663 }
664
665 rc = pci_enable_device(pdev);
666 if (rc)
667 goto err_out;
668
669 /* this should always be supported */
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400670 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (rc) {
672 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
673 "the card!?\n");
674 goto err_out;
675 }
676
677 /* sanity check */
678 if ((pci_resource_len(pdev, 0) < io_size) ||
679 (pci_resource_len(pdev, 1) < io_size)) {
680 rc = -EIO;
681 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
682 goto err_out;
683 }
684
685 pioaddr = pci_resource_start(pdev, 0);
686 memaddr = pci_resource_start(pdev, 1);
687
688 pci_set_master(pdev);
689
690 dev = alloc_etherdev(sizeof(struct rhine_private));
691 if (!dev) {
692 rc = -ENOMEM;
693 printk(KERN_ERR "alloc_etherdev failed\n");
694 goto err_out;
695 }
696 SET_MODULE_OWNER(dev);
697 SET_NETDEV_DEV(dev, &pdev->dev);
698
699 rp = netdev_priv(dev);
700 rp->quirks = quirks;
701 rp->pioaddr = pioaddr;
702 rp->pdev = pdev;
703
704 rc = pci_request_regions(pdev, DRV_NAME);
705 if (rc)
706 goto err_out_free_netdev;
707
708 ioaddr = pci_iomap(pdev, bar, io_size);
709 if (!ioaddr) {
710 rc = -EIO;
711 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
712 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
713 goto err_out_free_res;
714 }
715
716#ifdef USE_MMIO
717 enable_mmio(pioaddr, quirks);
718
719 /* Check that selected MMIO registers match the PIO ones */
720 i = 0;
721 while (mmio_verify_registers[i]) {
722 int reg = mmio_verify_registers[i++];
723 unsigned char a = inb(pioaddr+reg);
724 unsigned char b = readb(ioaddr+reg);
725 if (a != b) {
726 rc = -EIO;
727 printk(KERN_ERR "MMIO do not match PIO [%02x] "
728 "(%02x != %02x)\n", reg, a, b);
729 goto err_out_unmap;
730 }
731 }
732#endif /* USE_MMIO */
733
734 dev->base_addr = (unsigned long)ioaddr;
735 rp->base = ioaddr;
736
737 /* Get chip registers into a sane state */
738 rhine_power_init(dev);
739 rhine_hw_init(dev, pioaddr);
740
741 for (i = 0; i < 6; i++)
742 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400743 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400745 if (!is_valid_ether_addr(dev->perm_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 rc = -EIO;
747 printk(KERN_ERR "Invalid MAC address\n");
748 goto err_out_unmap;
749 }
750
751 /* For Rhine-I/II, phy_id is loaded from EEPROM */
752 if (!phy_id)
753 phy_id = ioread8(ioaddr + 0x6C);
754
755 dev->irq = pdev->irq;
756
757 spin_lock_init(&rp->lock);
758 rp->mii_if.dev = dev;
759 rp->mii_if.mdio_read = mdio_read;
760 rp->mii_if.mdio_write = mdio_write;
761 rp->mii_if.phy_id_mask = 0x1f;
762 rp->mii_if.reg_num_mask = 0x1f;
763
764 /* The chip-specific entries in the device structure. */
765 dev->open = rhine_open;
766 dev->hard_start_xmit = rhine_start_tx;
767 dev->stop = rhine_close;
768 dev->get_stats = rhine_get_stats;
769 dev->set_multicast_list = rhine_set_rx_mode;
770 dev->do_ioctl = netdev_ioctl;
771 dev->ethtool_ops = &netdev_ethtool_ops;
772 dev->tx_timeout = rhine_tx_timeout;
773 dev->watchdog_timeo = TX_TIMEOUT;
774#ifdef CONFIG_NET_POLL_CONTROLLER
775 dev->poll_controller = rhine_poll;
776#endif
Roger Luethi633949a2006-08-14 23:00:17 -0700777#ifdef CONFIG_VIA_RHINE_NAPI
778 dev->poll = rhine_napipoll;
779 dev->weight = 64;
780#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 if (rp->quirks & rqRhineI)
782 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
783
784 /* dev->name not defined before register_netdev()! */
785 rc = register_netdev(dev);
786 if (rc)
787 goto err_out_unmap;
788
789 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
790 dev->name, name,
791#ifdef USE_MMIO
792 memaddr
793#else
794 (long)ioaddr
795#endif
796 );
797
798 for (i = 0; i < 5; i++)
799 printk("%2.2x:", dev->dev_addr[i]);
800 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
801
802 pci_set_drvdata(pdev, dev);
803
804 {
805 u16 mii_cmd;
806 int mii_status = mdio_read(dev, phy_id, 1);
807 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
808 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
809 if (mii_status != 0xffff && mii_status != 0x0000) {
810 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
811 printk(KERN_INFO "%s: MII PHY found at address "
812 "%d, status 0x%4.4x advertising %4.4x "
813 "Link %4.4x.\n", dev->name, phy_id,
814 mii_status, rp->mii_if.advertising,
815 mdio_read(dev, phy_id, 5));
816
817 /* set IFF_RUNNING */
818 if (mii_status & BMSR_LSTATUS)
819 netif_carrier_on(dev);
820 else
821 netif_carrier_off(dev);
822
823 }
824 }
825 rp->mii_if.phy_id = phy_id;
826
827 return 0;
828
829err_out_unmap:
830 pci_iounmap(pdev, ioaddr);
831err_out_free_res:
832 pci_release_regions(pdev);
833err_out_free_netdev:
834 free_netdev(dev);
835err_out:
836 return rc;
837}
838
839static int alloc_ring(struct net_device* dev)
840{
841 struct rhine_private *rp = netdev_priv(dev);
842 void *ring;
843 dma_addr_t ring_dma;
844
845 ring = pci_alloc_consistent(rp->pdev,
846 RX_RING_SIZE * sizeof(struct rx_desc) +
847 TX_RING_SIZE * sizeof(struct tx_desc),
848 &ring_dma);
849 if (!ring) {
850 printk(KERN_ERR "Could not allocate DMA memory.\n");
851 return -ENOMEM;
852 }
853 if (rp->quirks & rqRhineI) {
854 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
855 PKT_BUF_SZ * TX_RING_SIZE,
856 &rp->tx_bufs_dma);
857 if (rp->tx_bufs == NULL) {
858 pci_free_consistent(rp->pdev,
859 RX_RING_SIZE * sizeof(struct rx_desc) +
860 TX_RING_SIZE * sizeof(struct tx_desc),
861 ring, ring_dma);
862 return -ENOMEM;
863 }
864 }
865
866 rp->rx_ring = ring;
867 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
868 rp->rx_ring_dma = ring_dma;
869 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
870
871 return 0;
872}
873
874static void free_ring(struct net_device* dev)
875{
876 struct rhine_private *rp = netdev_priv(dev);
877
878 pci_free_consistent(rp->pdev,
879 RX_RING_SIZE * sizeof(struct rx_desc) +
880 TX_RING_SIZE * sizeof(struct tx_desc),
881 rp->rx_ring, rp->rx_ring_dma);
882 rp->tx_ring = NULL;
883
884 if (rp->tx_bufs)
885 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
886 rp->tx_bufs, rp->tx_bufs_dma);
887
888 rp->tx_bufs = NULL;
889
890}
891
892static void alloc_rbufs(struct net_device *dev)
893{
894 struct rhine_private *rp = netdev_priv(dev);
895 dma_addr_t next;
896 int i;
897
898 rp->dirty_rx = rp->cur_rx = 0;
899
900 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
901 rp->rx_head_desc = &rp->rx_ring[0];
902 next = rp->rx_ring_dma;
903
904 /* Init the ring entries */
905 for (i = 0; i < RX_RING_SIZE; i++) {
906 rp->rx_ring[i].rx_status = 0;
907 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
908 next += sizeof(struct rx_desc);
909 rp->rx_ring[i].next_desc = cpu_to_le32(next);
910 rp->rx_skbuff[i] = NULL;
911 }
912 /* Mark the last entry as wrapping the ring. */
913 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
914
915 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
916 for (i = 0; i < RX_RING_SIZE; i++) {
917 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
918 rp->rx_skbuff[i] = skb;
919 if (skb == NULL)
920 break;
921 skb->dev = dev; /* Mark as being used by this device. */
922
923 rp->rx_skbuff_dma[i] =
David S. Miller689be432005-06-28 15:25:31 -0700924 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 PCI_DMA_FROMDEVICE);
926
927 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
928 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
929 }
930 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
931}
932
933static void free_rbufs(struct net_device* dev)
934{
935 struct rhine_private *rp = netdev_priv(dev);
936 int i;
937
938 /* Free all the skbuffs in the Rx queue. */
939 for (i = 0; i < RX_RING_SIZE; i++) {
940 rp->rx_ring[i].rx_status = 0;
941 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
942 if (rp->rx_skbuff[i]) {
943 pci_unmap_single(rp->pdev,
944 rp->rx_skbuff_dma[i],
945 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
946 dev_kfree_skb(rp->rx_skbuff[i]);
947 }
948 rp->rx_skbuff[i] = NULL;
949 }
950}
951
952static void alloc_tbufs(struct net_device* dev)
953{
954 struct rhine_private *rp = netdev_priv(dev);
955 dma_addr_t next;
956 int i;
957
958 rp->dirty_tx = rp->cur_tx = 0;
959 next = rp->tx_ring_dma;
960 for (i = 0; i < TX_RING_SIZE; i++) {
961 rp->tx_skbuff[i] = NULL;
962 rp->tx_ring[i].tx_status = 0;
963 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
964 next += sizeof(struct tx_desc);
965 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +0200966 if (rp->quirks & rqRhineI)
967 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 }
969 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
970
971}
972
973static void free_tbufs(struct net_device* dev)
974{
975 struct rhine_private *rp = netdev_priv(dev);
976 int i;
977
978 for (i = 0; i < TX_RING_SIZE; i++) {
979 rp->tx_ring[i].tx_status = 0;
980 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
981 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
982 if (rp->tx_skbuff[i]) {
983 if (rp->tx_skbuff_dma[i]) {
984 pci_unmap_single(rp->pdev,
985 rp->tx_skbuff_dma[i],
986 rp->tx_skbuff[i]->len,
987 PCI_DMA_TODEVICE);
988 }
989 dev_kfree_skb(rp->tx_skbuff[i]);
990 }
991 rp->tx_skbuff[i] = NULL;
992 rp->tx_buf[i] = NULL;
993 }
994}
995
996static void rhine_check_media(struct net_device *dev, unsigned int init_media)
997{
998 struct rhine_private *rp = netdev_priv(dev);
999 void __iomem *ioaddr = rp->base;
1000
1001 mii_check_media(&rp->mii_if, debug, init_media);
1002
1003 if (rp->mii_if.full_duplex)
1004 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1005 ioaddr + ChipCmd1);
1006 else
1007 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1008 ioaddr + ChipCmd1);
Roger Luethi00b428c2006-03-28 20:53:56 +02001009 if (debug > 1)
1010 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1011 rp->mii_if.force_media, netif_carrier_ok(dev));
1012}
1013
1014/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001015static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001016{
1017 if (mii->force_media) {
1018 /* autoneg is off: Link is always assumed to be up */
1019 if (!netif_carrier_ok(mii->dev))
1020 netif_carrier_on(mii->dev);
1021 }
1022 else /* Let MMI library update carrier status */
1023 rhine_check_media(mii->dev, 0);
1024 if (debug > 1)
1025 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1026 mii->dev->name, mii->force_media,
1027 netif_carrier_ok(mii->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028}
1029
1030static void init_registers(struct net_device *dev)
1031{
1032 struct rhine_private *rp = netdev_priv(dev);
1033 void __iomem *ioaddr = rp->base;
1034 int i;
1035
1036 for (i = 0; i < 6; i++)
1037 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1038
1039 /* Initialize other registers. */
1040 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1041 /* Configure initial FIFO thresholds. */
1042 iowrite8(0x20, ioaddr + TxConfig);
1043 rp->tx_thresh = 0x20;
1044 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1045
1046 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1047 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1048
1049 rhine_set_rx_mode(dev);
1050
1051 /* Enable interrupts by setting the interrupt mask. */
1052 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1053 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1054 IntrTxDone | IntrTxError | IntrTxUnderrun |
1055 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1056 ioaddr + IntrEnable);
1057
1058 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1059 ioaddr + ChipCmd);
1060 rhine_check_media(dev, 1);
1061}
1062
1063/* Enable MII link status auto-polling (required for IntrLinkChange) */
1064static void rhine_enable_linkmon(void __iomem *ioaddr)
1065{
1066 iowrite8(0, ioaddr + MIICmd);
1067 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1068 iowrite8(0x80, ioaddr + MIICmd);
1069
1070 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1071
1072 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1073}
1074
1075/* Disable MII link status auto-polling (required for MDIO access) */
1076static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1077{
1078 iowrite8(0, ioaddr + MIICmd);
1079
1080 if (quirks & rqRhineI) {
1081 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1082
John W. Linville38bb6b22006-05-19 10:51:21 -04001083 /* Can be called from ISR. Evil. */
1084 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 /* 0x80 must be set immediately before turning it off */
1087 iowrite8(0x80, ioaddr + MIICmd);
1088
1089 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1090
1091 /* Heh. Now clear 0x80 again. */
1092 iowrite8(0, ioaddr + MIICmd);
1093 }
1094 else
1095 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1096}
1097
1098/* Read and write over the MII Management Data I/O (MDIO) interface. */
1099
1100static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1101{
1102 struct rhine_private *rp = netdev_priv(dev);
1103 void __iomem *ioaddr = rp->base;
1104 int result;
1105
1106 rhine_disable_linkmon(ioaddr, rp->quirks);
1107
1108 /* rhine_disable_linkmon already cleared MIICmd */
1109 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1110 iowrite8(regnum, ioaddr + MIIRegAddr);
1111 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1112 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1113 result = ioread16(ioaddr + MIIData);
1114
1115 rhine_enable_linkmon(ioaddr);
1116 return result;
1117}
1118
1119static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1120{
1121 struct rhine_private *rp = netdev_priv(dev);
1122 void __iomem *ioaddr = rp->base;
1123
1124 rhine_disable_linkmon(ioaddr, rp->quirks);
1125
1126 /* rhine_disable_linkmon already cleared MIICmd */
1127 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1128 iowrite8(regnum, ioaddr + MIIRegAddr);
1129 iowrite16(value, ioaddr + MIIData);
1130 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1131 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1132
1133 rhine_enable_linkmon(ioaddr);
1134}
1135
1136static int rhine_open(struct net_device *dev)
1137{
1138 struct rhine_private *rp = netdev_priv(dev);
1139 void __iomem *ioaddr = rp->base;
1140 int rc;
1141
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001142 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 dev);
1144 if (rc)
1145 return rc;
1146
1147 if (debug > 1)
1148 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1149 dev->name, rp->pdev->irq);
1150
1151 rc = alloc_ring(dev);
1152 if (rc) {
1153 free_irq(rp->pdev->irq, dev);
1154 return rc;
1155 }
1156 alloc_rbufs(dev);
1157 alloc_tbufs(dev);
1158 rhine_chip_reset(dev);
1159 init_registers(dev);
1160 if (debug > 2)
1161 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1162 "MII status: %4.4x.\n",
1163 dev->name, ioread16(ioaddr + ChipCmd),
1164 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1165
1166 netif_start_queue(dev);
1167
1168 return 0;
1169}
1170
1171static void rhine_tx_timeout(struct net_device *dev)
1172{
1173 struct rhine_private *rp = netdev_priv(dev);
1174 void __iomem *ioaddr = rp->base;
1175
1176 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1177 "%4.4x, resetting...\n",
1178 dev->name, ioread16(ioaddr + IntrStatus),
1179 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1180
1181 /* protect against concurrent rx interrupts */
1182 disable_irq(rp->pdev->irq);
1183
1184 spin_lock(&rp->lock);
1185
1186 /* clear all descriptors */
1187 free_tbufs(dev);
1188 free_rbufs(dev);
1189 alloc_tbufs(dev);
1190 alloc_rbufs(dev);
1191
1192 /* Reinitialize the hardware. */
1193 rhine_chip_reset(dev);
1194 init_registers(dev);
1195
1196 spin_unlock(&rp->lock);
1197 enable_irq(rp->pdev->irq);
1198
1199 dev->trans_start = jiffies;
1200 rp->stats.tx_errors++;
1201 netif_wake_queue(dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001202 netif_poll_enable(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203}
1204
1205static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1206{
1207 struct rhine_private *rp = netdev_priv(dev);
1208 void __iomem *ioaddr = rp->base;
1209 unsigned entry;
1210
1211 /* Caution: the write order is important here, set the field
1212 with the "ownership" bits last. */
1213
1214 /* Calculate the next Tx descriptor entry. */
1215 entry = rp->cur_tx % TX_RING_SIZE;
1216
Herbert Xu5b057c62006-06-23 02:06:41 -07001217 if (skb_padto(skb, ETH_ZLEN))
1218 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 rp->tx_skbuff[entry] = skb;
1221
1222 if ((rp->quirks & rqRhineI) &&
1223 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1224 /* Must use alignment buffer. */
1225 if (skb->len > PKT_BUF_SZ) {
1226 /* packet too long, drop it */
1227 dev_kfree_skb(skb);
1228 rp->tx_skbuff[entry] = NULL;
1229 rp->stats.tx_dropped++;
1230 return 0;
1231 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001232
1233 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001235 if (skb->len < ETH_ZLEN)
1236 memset(rp->tx_buf[entry] + skb->len, 0,
1237 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 rp->tx_skbuff_dma[entry] = 0;
1239 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1240 (rp->tx_buf[entry] -
1241 rp->tx_bufs));
1242 } else {
1243 rp->tx_skbuff_dma[entry] =
1244 pci_map_single(rp->pdev, skb->data, skb->len,
1245 PCI_DMA_TODEVICE);
1246 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1247 }
1248
1249 rp->tx_ring[entry].desc_length =
1250 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1251
1252 /* lock eth irq */
1253 spin_lock_irq(&rp->lock);
1254 wmb();
1255 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1256 wmb();
1257
1258 rp->cur_tx++;
1259
1260 /* Non-x86 Todo: explicitly flush cache lines here. */
1261
1262 /* Wake the potentially-idle transmit channel */
1263 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1264 ioaddr + ChipCmd1);
1265 IOSYNC;
1266
1267 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1268 netif_stop_queue(dev);
1269
1270 dev->trans_start = jiffies;
1271
1272 spin_unlock_irq(&rp->lock);
1273
1274 if (debug > 4) {
1275 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1276 dev->name, rp->cur_tx-1, entry);
1277 }
1278 return 0;
1279}
1280
1281/* The interrupt handler does all of the Rx thread work and cleans up
1282 after the Tx thread. */
1283static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1284{
1285 struct net_device *dev = dev_instance;
1286 struct rhine_private *rp = netdev_priv(dev);
1287 void __iomem *ioaddr = rp->base;
1288 u32 intr_status;
1289 int boguscnt = max_interrupt_work;
1290 int handled = 0;
1291
1292 while ((intr_status = get_intr_status(dev))) {
1293 handled = 1;
1294
1295 /* Acknowledge all of the current interrupt sources ASAP. */
1296 if (intr_status & IntrTxDescRace)
1297 iowrite8(0x08, ioaddr + IntrStatus2);
1298 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1299 IOSYNC;
1300
1301 if (debug > 4)
1302 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1303 dev->name, intr_status);
1304
1305 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
Roger Luethi633949a2006-08-14 23:00:17 -07001306 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1307#ifdef CONFIG_VIA_RHINE_NAPI
1308 iowrite16(IntrTxAborted |
1309 IntrTxDone | IntrTxError | IntrTxUnderrun |
1310 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1311 ioaddr + IntrEnable);
1312
1313 netif_rx_schedule(dev);
1314#else
1315 rhine_rx(dev, RX_RING_SIZE);
1316#endif
1317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1320 if (intr_status & IntrTxErrSummary) {
1321 /* Avoid scavenging before Tx engine turned off */
1322 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1323 if (debug > 2 &&
1324 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1325 printk(KERN_WARNING "%s: "
1326 "rhine_interrupt() Tx engine"
1327 "still on.\n", dev->name);
1328 }
1329 rhine_tx(dev);
1330 }
1331
1332 /* Abnormal error summary/uncommon events handlers. */
1333 if (intr_status & (IntrPCIErr | IntrLinkChange |
1334 IntrStatsMax | IntrTxError | IntrTxAborted |
1335 IntrTxUnderrun | IntrTxDescRace))
1336 rhine_error(dev, intr_status);
1337
1338 if (--boguscnt < 0) {
1339 printk(KERN_WARNING "%s: Too much work at interrupt, "
1340 "status=%#8.8x.\n",
1341 dev->name, intr_status);
1342 break;
1343 }
1344 }
1345
1346 if (debug > 3)
1347 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1348 dev->name, ioread16(ioaddr + IntrStatus));
1349 return IRQ_RETVAL(handled);
1350}
1351
1352/* This routine is logically part of the interrupt handler, but isolated
1353 for clarity. */
1354static void rhine_tx(struct net_device *dev)
1355{
1356 struct rhine_private *rp = netdev_priv(dev);
1357 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1358
1359 spin_lock(&rp->lock);
1360
1361 /* find and cleanup dirty tx descriptors */
1362 while (rp->dirty_tx != rp->cur_tx) {
1363 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1364 if (debug > 6)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001365 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 entry, txstatus);
1367 if (txstatus & DescOwn)
1368 break;
1369 if (txstatus & 0x8000) {
1370 if (debug > 1)
1371 printk(KERN_DEBUG "%s: Transmit error, "
1372 "Tx status %8.8x.\n",
1373 dev->name, txstatus);
1374 rp->stats.tx_errors++;
1375 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1376 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1377 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1378 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1379 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1380 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1381 rp->stats.tx_fifo_errors++;
1382 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1383 break; /* Keep the skb - we try again */
1384 }
1385 /* Transmitter restarted in 'abnormal' handler. */
1386 } else {
1387 if (rp->quirks & rqRhineI)
1388 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1389 else
1390 rp->stats.collisions += txstatus & 0x0F;
1391 if (debug > 6)
1392 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1393 (txstatus >> 3) & 0xF,
1394 txstatus & 0xF);
1395 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1396 rp->stats.tx_packets++;
1397 }
1398 /* Free the original skb. */
1399 if (rp->tx_skbuff_dma[entry]) {
1400 pci_unmap_single(rp->pdev,
1401 rp->tx_skbuff_dma[entry],
1402 rp->tx_skbuff[entry]->len,
1403 PCI_DMA_TODEVICE);
1404 }
1405 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1406 rp->tx_skbuff[entry] = NULL;
1407 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1408 }
1409 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1410 netif_wake_queue(dev);
1411
1412 spin_unlock(&rp->lock);
1413}
1414
Roger Luethi633949a2006-08-14 23:00:17 -07001415/* Process up to limit frames from receive ring */
1416static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417{
1418 struct rhine_private *rp = netdev_priv(dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001419 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422 if (debug > 4) {
1423 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1424 dev->name, entry,
1425 le32_to_cpu(rp->rx_head_desc->rx_status));
1426 }
1427
1428 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001429 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 struct rx_desc *desc = rp->rx_head_desc;
1431 u32 desc_status = le32_to_cpu(desc->rx_status);
1432 int data_size = desc_status >> 16;
1433
Roger Luethi633949a2006-08-14 23:00:17 -07001434 if (desc_status & DescOwn)
1435 break;
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 if (debug > 4)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001438 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1442 if ((desc_status & RxWholePkt) != RxWholePkt) {
1443 printk(KERN_WARNING "%s: Oversized Ethernet "
1444 "frame spanned multiple buffers, entry "
1445 "%#x length %d status %8.8x!\n",
1446 dev->name, entry, data_size,
1447 desc_status);
1448 printk(KERN_WARNING "%s: Oversized Ethernet "
1449 "frame %p vs %p.\n", dev->name,
1450 rp->rx_head_desc, &rp->rx_ring[entry]);
1451 rp->stats.rx_length_errors++;
1452 } else if (desc_status & RxErr) {
1453 /* There was a error. */
1454 if (debug > 2)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001455 printk(KERN_DEBUG "rhine_rx() Rx "
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 "error was %8.8x.\n",
1457 desc_status);
1458 rp->stats.rx_errors++;
1459 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1460 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1461 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1462 if (desc_status & 0x0002) {
1463 /* this can also be updated outside the interrupt handler */
1464 spin_lock(&rp->lock);
1465 rp->stats.rx_crc_errors++;
1466 spin_unlock(&rp->lock);
1467 }
1468 }
1469 } else {
1470 struct sk_buff *skb;
1471 /* Length should omit the CRC */
1472 int pkt_len = data_size - 4;
1473
1474 /* Check if the packet is long enough to accept without
1475 copying to a minimally-sized skbuff. */
1476 if (pkt_len < rx_copybreak &&
1477 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1478 skb->dev = dev;
1479 skb_reserve(skb, 2); /* 16 byte align the IP header */
1480 pci_dma_sync_single_for_cpu(rp->pdev,
1481 rp->rx_skbuff_dma[entry],
1482 rp->rx_buf_sz,
1483 PCI_DMA_FROMDEVICE);
1484
1485 eth_copy_and_sum(skb,
David S. Miller689be432005-06-28 15:25:31 -07001486 rp->rx_skbuff[entry]->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 pkt_len, 0);
1488 skb_put(skb, pkt_len);
1489 pci_dma_sync_single_for_device(rp->pdev,
1490 rp->rx_skbuff_dma[entry],
1491 rp->rx_buf_sz,
1492 PCI_DMA_FROMDEVICE);
1493 } else {
1494 skb = rp->rx_skbuff[entry];
1495 if (skb == NULL) {
1496 printk(KERN_ERR "%s: Inconsistent Rx "
1497 "descriptor chain.\n",
1498 dev->name);
1499 break;
1500 }
1501 rp->rx_skbuff[entry] = NULL;
1502 skb_put(skb, pkt_len);
1503 pci_unmap_single(rp->pdev,
1504 rp->rx_skbuff_dma[entry],
1505 rp->rx_buf_sz,
1506 PCI_DMA_FROMDEVICE);
1507 }
1508 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001509#ifdef CONFIG_VIA_RHINE_NAPI
1510 netif_receive_skb(skb);
1511#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 netif_rx(skb);
Roger Luethi633949a2006-08-14 23:00:17 -07001513#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 dev->last_rx = jiffies;
1515 rp->stats.rx_bytes += pkt_len;
1516 rp->stats.rx_packets++;
1517 }
1518 entry = (++rp->cur_rx) % RX_RING_SIZE;
1519 rp->rx_head_desc = &rp->rx_ring[entry];
1520 }
1521
1522 /* Refill the Rx ring buffers. */
1523 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1524 struct sk_buff *skb;
1525 entry = rp->dirty_rx % RX_RING_SIZE;
1526 if (rp->rx_skbuff[entry] == NULL) {
1527 skb = dev_alloc_skb(rp->rx_buf_sz);
1528 rp->rx_skbuff[entry] = skb;
1529 if (skb == NULL)
1530 break; /* Better luck next round. */
1531 skb->dev = dev; /* Mark as being used by this device. */
1532 rp->rx_skbuff_dma[entry] =
David S. Miller689be432005-06-28 15:25:31 -07001533 pci_map_single(rp->pdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 rp->rx_buf_sz,
1535 PCI_DMA_FROMDEVICE);
1536 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1537 }
1538 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1539 }
Roger Luethi633949a2006-08-14 23:00:17 -07001540
1541 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542}
1543
1544/*
1545 * Clears the "tally counters" for CRC errors and missed frames(?).
1546 * It has been reported that some chips need a write of 0 to clear
1547 * these, for others the counters are set to 1 when written to and
1548 * instead cleared when read. So we clear them both ways ...
1549 */
1550static inline void clear_tally_counters(void __iomem *ioaddr)
1551{
1552 iowrite32(0, ioaddr + RxMissed);
1553 ioread16(ioaddr + RxCRCErrs);
1554 ioread16(ioaddr + RxMissed);
1555}
1556
1557static void rhine_restart_tx(struct net_device *dev) {
1558 struct rhine_private *rp = netdev_priv(dev);
1559 void __iomem *ioaddr = rp->base;
1560 int entry = rp->dirty_tx % TX_RING_SIZE;
1561 u32 intr_status;
1562
1563 /*
1564 * If new errors occured, we need to sort them out before doing Tx.
1565 * In that case the ISR will be back here RSN anyway.
1566 */
1567 intr_status = get_intr_status(dev);
1568
1569 if ((intr_status & IntrTxErrSummary) == 0) {
1570
1571 /* We know better than the chip where it should continue. */
1572 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1573 ioaddr + TxRingPtr);
1574
1575 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1576 ioaddr + ChipCmd);
1577 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1578 ioaddr + ChipCmd1);
1579 IOSYNC;
1580 }
1581 else {
1582 /* This should never happen */
1583 if (debug > 1)
1584 printk(KERN_WARNING "%s: rhine_restart_tx() "
1585 "Another error occured %8.8x.\n",
1586 dev->name, intr_status);
1587 }
1588
1589}
1590
1591static void rhine_error(struct net_device *dev, int intr_status)
1592{
1593 struct rhine_private *rp = netdev_priv(dev);
1594 void __iomem *ioaddr = rp->base;
1595
1596 spin_lock(&rp->lock);
1597
1598 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04001599 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 if (intr_status & IntrStatsMax) {
1601 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1602 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1603 clear_tally_counters(ioaddr);
1604 }
1605 if (intr_status & IntrTxAborted) {
1606 if (debug > 1)
1607 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1608 dev->name, intr_status);
1609 }
1610 if (intr_status & IntrTxUnderrun) {
1611 if (rp->tx_thresh < 0xE0)
1612 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1613 if (debug > 1)
1614 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1615 "threshold now %2.2x.\n",
1616 dev->name, rp->tx_thresh);
1617 }
1618 if (intr_status & IntrTxDescRace) {
1619 if (debug > 2)
1620 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1621 dev->name);
1622 }
1623 if ((intr_status & IntrTxError) &&
1624 (intr_status & (IntrTxAborted |
1625 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1626 if (rp->tx_thresh < 0xE0) {
1627 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1628 }
1629 if (debug > 1)
1630 printk(KERN_INFO "%s: Unspecified error. Tx "
1631 "threshold now %2.2x.\n",
1632 dev->name, rp->tx_thresh);
1633 }
1634 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1635 IntrTxError))
1636 rhine_restart_tx(dev);
1637
1638 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1639 IntrTxError | IntrTxAborted | IntrNormalSummary |
1640 IntrTxDescRace)) {
1641 if (debug > 1)
1642 printk(KERN_ERR "%s: Something Wicked happened! "
1643 "%8.8x.\n", dev->name, intr_status);
1644 }
1645
1646 spin_unlock(&rp->lock);
1647}
1648
1649static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1650{
1651 struct rhine_private *rp = netdev_priv(dev);
1652 void __iomem *ioaddr = rp->base;
1653 unsigned long flags;
1654
1655 spin_lock_irqsave(&rp->lock, flags);
1656 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1657 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1658 clear_tally_counters(ioaddr);
1659 spin_unlock_irqrestore(&rp->lock, flags);
1660
1661 return &rp->stats;
1662}
1663
1664static void rhine_set_rx_mode(struct net_device *dev)
1665{
1666 struct rhine_private *rp = netdev_priv(dev);
1667 void __iomem *ioaddr = rp->base;
1668 u32 mc_filter[2]; /* Multicast hash filter */
1669 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1670
1671 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1672 /* Unconditionally log net taps. */
1673 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1674 dev->name);
1675 rx_mode = 0x1C;
1676 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1677 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1678 } else if ((dev->mc_count > multicast_filter_limit)
1679 || (dev->flags & IFF_ALLMULTI)) {
1680 /* Too many to match, or accept all multicasts. */
1681 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1682 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1683 rx_mode = 0x0C;
1684 } else {
1685 struct dev_mc_list *mclist;
1686 int i;
1687 memset(mc_filter, 0, sizeof(mc_filter));
1688 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1689 i++, mclist = mclist->next) {
1690 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1691
1692 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1693 }
1694 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1695 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1696 rx_mode = 0x0C;
1697 }
1698 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1699}
1700
1701static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1702{
1703 struct rhine_private *rp = netdev_priv(dev);
1704
1705 strcpy(info->driver, DRV_NAME);
1706 strcpy(info->version, DRV_VERSION);
1707 strcpy(info->bus_info, pci_name(rp->pdev));
1708}
1709
1710static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1711{
1712 struct rhine_private *rp = netdev_priv(dev);
1713 int rc;
1714
1715 spin_lock_irq(&rp->lock);
1716 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1717 spin_unlock_irq(&rp->lock);
1718
1719 return rc;
1720}
1721
1722static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1723{
1724 struct rhine_private *rp = netdev_priv(dev);
1725 int rc;
1726
1727 spin_lock_irq(&rp->lock);
1728 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1729 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02001730 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 return rc;
1733}
1734
1735static int netdev_nway_reset(struct net_device *dev)
1736{
1737 struct rhine_private *rp = netdev_priv(dev);
1738
1739 return mii_nway_restart(&rp->mii_if);
1740}
1741
1742static u32 netdev_get_link(struct net_device *dev)
1743{
1744 struct rhine_private *rp = netdev_priv(dev);
1745
1746 return mii_link_ok(&rp->mii_if);
1747}
1748
1749static u32 netdev_get_msglevel(struct net_device *dev)
1750{
1751 return debug;
1752}
1753
1754static void netdev_set_msglevel(struct net_device *dev, u32 value)
1755{
1756 debug = value;
1757}
1758
1759static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1760{
1761 struct rhine_private *rp = netdev_priv(dev);
1762
1763 if (!(rp->quirks & rqWOL))
1764 return;
1765
1766 spin_lock_irq(&rp->lock);
1767 wol->supported = WAKE_PHY | WAKE_MAGIC |
1768 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1769 wol->wolopts = rp->wolopts;
1770 spin_unlock_irq(&rp->lock);
1771}
1772
1773static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1774{
1775 struct rhine_private *rp = netdev_priv(dev);
1776 u32 support = WAKE_PHY | WAKE_MAGIC |
1777 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1778
1779 if (!(rp->quirks & rqWOL))
1780 return -EINVAL;
1781
1782 if (wol->wolopts & ~support)
1783 return -EINVAL;
1784
1785 spin_lock_irq(&rp->lock);
1786 rp->wolopts = wol->wolopts;
1787 spin_unlock_irq(&rp->lock);
1788
1789 return 0;
1790}
1791
1792static struct ethtool_ops netdev_ethtool_ops = {
1793 .get_drvinfo = netdev_get_drvinfo,
1794 .get_settings = netdev_get_settings,
1795 .set_settings = netdev_set_settings,
1796 .nway_reset = netdev_nway_reset,
1797 .get_link = netdev_get_link,
1798 .get_msglevel = netdev_get_msglevel,
1799 .set_msglevel = netdev_set_msglevel,
1800 .get_wol = rhine_get_wol,
1801 .set_wol = rhine_set_wol,
1802 .get_sg = ethtool_op_get_sg,
1803 .get_tx_csum = ethtool_op_get_tx_csum,
John W. Linvilleb81e8e12005-09-12 10:48:58 -04001804 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805};
1806
1807static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1808{
1809 struct rhine_private *rp = netdev_priv(dev);
1810 int rc;
1811
1812 if (!netif_running(dev))
1813 return -EINVAL;
1814
1815 spin_lock_irq(&rp->lock);
1816 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1817 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02001818 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
1820 return rc;
1821}
1822
1823static int rhine_close(struct net_device *dev)
1824{
1825 struct rhine_private *rp = netdev_priv(dev);
1826 void __iomem *ioaddr = rp->base;
1827
1828 spin_lock_irq(&rp->lock);
1829
1830 netif_stop_queue(dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001831 netif_poll_disable(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
1833 if (debug > 1)
1834 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1835 "status was %4.4x.\n",
1836 dev->name, ioread16(ioaddr + ChipCmd));
1837
1838 /* Switch to loopback mode to avoid hardware races. */
1839 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1840
1841 /* Disable interrupts by clearing the interrupt mask. */
1842 iowrite16(0x0000, ioaddr + IntrEnable);
1843
1844 /* Stop the chip's Tx and Rx processes. */
1845 iowrite16(CmdStop, ioaddr + ChipCmd);
1846
1847 spin_unlock_irq(&rp->lock);
1848
1849 free_irq(rp->pdev->irq, dev);
1850 free_rbufs(dev);
1851 free_tbufs(dev);
1852 free_ring(dev);
1853
1854 return 0;
1855}
1856
1857
1858static void __devexit rhine_remove_one(struct pci_dev *pdev)
1859{
1860 struct net_device *dev = pci_get_drvdata(pdev);
1861 struct rhine_private *rp = netdev_priv(dev);
1862
1863 unregister_netdev(dev);
1864
1865 pci_iounmap(pdev, rp->base);
1866 pci_release_regions(pdev);
1867
1868 free_netdev(dev);
1869 pci_disable_device(pdev);
1870 pci_set_drvdata(pdev, NULL);
1871}
1872
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001873static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 struct net_device *dev = pci_get_drvdata(pdev);
1876 struct rhine_private *rp = netdev_priv(dev);
1877 void __iomem *ioaddr = rp->base;
1878
1879 if (!(rp->quirks & rqWOL))
1880 return; /* Nothing to do for non-WOL adapters */
1881
1882 rhine_power_init(dev);
1883
1884 /* Make sure we use pattern 0, 1 and not 4, 5 */
1885 if (rp->quirks & rq6patterns)
1886 iowrite8(0x04, ioaddr + 0xA7);
1887
1888 if (rp->wolopts & WAKE_MAGIC) {
1889 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1890 /*
1891 * Turn EEPROM-controlled wake-up back on -- some hardware may
1892 * not cooperate otherwise.
1893 */
1894 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1895 }
1896
1897 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1898 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1899
1900 if (rp->wolopts & WAKE_PHY)
1901 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1902
1903 if (rp->wolopts & WAKE_UCAST)
1904 iowrite8(WOLucast, ioaddr + WOLcrSet);
1905
1906 if (rp->wolopts) {
1907 /* Enable legacy WOL (for old motherboards) */
1908 iowrite8(0x01, ioaddr + PwcfgSet);
1909 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1910 }
1911
1912 /* Hit power state D3 (sleep) */
1913 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1914
1915 /* TODO: Check use of pci_enable_wake() */
1916
1917}
1918
1919#ifdef CONFIG_PM
1920static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1921{
1922 struct net_device *dev = pci_get_drvdata(pdev);
1923 struct rhine_private *rp = netdev_priv(dev);
1924 unsigned long flags;
1925
1926 if (!netif_running(dev))
1927 return 0;
1928
1929 netif_device_detach(dev);
1930 pci_save_state(pdev);
1931
1932 spin_lock_irqsave(&rp->lock, flags);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001933 rhine_shutdown(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 spin_unlock_irqrestore(&rp->lock, flags);
1935
1936 free_irq(dev->irq, dev);
1937 return 0;
1938}
1939
1940static int rhine_resume(struct pci_dev *pdev)
1941{
1942 struct net_device *dev = pci_get_drvdata(pdev);
1943 struct rhine_private *rp = netdev_priv(dev);
1944 unsigned long flags;
1945 int ret;
1946
1947 if (!netif_running(dev))
1948 return 0;
1949
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001950 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1952
1953 ret = pci_set_power_state(pdev, PCI_D0);
1954 if (debug > 1)
1955 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1956 dev->name, ret ? "failed" : "succeeded", ret);
1957
1958 pci_restore_state(pdev);
1959
1960 spin_lock_irqsave(&rp->lock, flags);
1961#ifdef USE_MMIO
1962 enable_mmio(rp->pioaddr, rp->quirks);
1963#endif
1964 rhine_power_init(dev);
1965 free_tbufs(dev);
1966 free_rbufs(dev);
1967 alloc_tbufs(dev);
1968 alloc_rbufs(dev);
1969 init_registers(dev);
1970 spin_unlock_irqrestore(&rp->lock, flags);
1971
1972 netif_device_attach(dev);
1973
1974 return 0;
1975}
1976#endif /* CONFIG_PM */
1977
1978static struct pci_driver rhine_driver = {
1979 .name = DRV_NAME,
1980 .id_table = rhine_pci_tbl,
1981 .probe = rhine_init_one,
1982 .remove = __devexit_p(rhine_remove_one),
1983#ifdef CONFIG_PM
1984 .suspend = rhine_suspend,
1985 .resume = rhine_resume,
1986#endif /* CONFIG_PM */
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001987 .shutdown = rhine_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988};
1989
1990
1991static int __init rhine_init(void)
1992{
1993/* when a module, this is printed whether or not devices are found in probe */
1994#ifdef MODULE
1995 printk(version);
1996#endif
1997 return pci_module_init(&rhine_driver);
1998}
1999
2000
2001static void __exit rhine_cleanup(void)
2002{
2003 pci_unregister_driver(&rhine_driver);
2004}
2005
2006
2007module_init(rhine_init);
2008module_exit(rhine_cleanup);