blob: 7a5899059c445703606e41912b740d180eb6a784 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
32#define DRV_NAME "via-rhine"
Roger Luethie84df482007-03-06 19:57:37 +010033#define DRV_VERSION "1.4.3"
34#define DRV_RELDATE "2007-03-06"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36
37/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
Dustin Marquessb47157f2007-08-10 14:05:15 -070045#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48static int rx_copybreak = 1518;
49#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070050static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070051#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Roger Luethib933b4d2006-08-14 23:00:21 -070053/* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
55static int avoid_D3;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/*
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
60 */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64static const int multicast_filter_limit = 32;
65
66
67/* Operational parameters that are set at compile time. */
68
69/* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74#define TX_RING_SIZE 16
75#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070076#ifdef CONFIG_VIA_RHINE_NAPI
77#define RX_RING_SIZE 64
78#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#define RX_RING_SIZE 16
Roger Luethi633949a2006-08-14 23:00:17 -070080#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87
88#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89
90#include <linux/module.h>
91#include <linux/moduleparam.h>
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/timer.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
97#include <linux/slab.h>
98#include <linux/interrupt.h>
99#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400100#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/netdevice.h>
102#include <linux/etherdevice.h>
103#include <linux/skbuff.h>
104#include <linux/init.h>
105#include <linux/delay.h>
106#include <linux/mii.h>
107#include <linux/ethtool.h>
108#include <linux/crc32.h>
109#include <linux/bitops.h>
110#include <asm/processor.h> /* Processor type for cache alignment. */
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100114#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116/* These identify the driver base version and may not be removed. */
117static char version[] __devinitdata =
118KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
119
120/* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700134module_param(avoid_D3, bool, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140/*
141 Theory of Operation
142
143I. Board Compatibility
144
145This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146controller.
147
148II. Board-specific settings
149
150Boards with this chip are functional only in a bus-master PCI slot.
151
152Many operational settings are loaded from the EEPROM to the Config word at
153offset 0x78. For most of these settings, this driver assumes that they are
154correct.
155If this driver is compiled to use PCI memory space operations the EEPROM
156must be configured to enable memory ops.
157
158III. Driver operation
159
160IIIa. Ring buffers
161
162This driver uses two statically allocated fixed-size descriptor lists
163formed into rings by a branch from the final descriptor to the beginning of
164the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165
166IIIb/c. Transmit/Receive Structure
167
168This driver attempts to use a zero-copy receive and transmit scheme.
169
170Alas, all data buffers are required to start on a 32 bit boundary, so
171the driver must often copy transmit packets into bounce buffers.
172
173The driver allocates full frame size skbuffs for the Rx ring buffers at
174open() time and passes the skb->data field to the chip as receive data
175buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176a fresh skbuff is allocated and the frame is copied to the new skbuff.
177When the incoming frame is larger, the skbuff is passed directly up the
178protocol stack. Buffers consumed this way are replaced by newly allocated
179skbuffs in the last phase of rhine_rx().
180
181The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182using a full-sized skbuff for small frames vs. the copying costs of larger
183frames. New boards are typically used in generously configured machines
184and the underfilled buffers have negligible impact compared to the benefit of
185a single allocation size, so the default value of zero results in never
186copying packets. When copying is done, the cost is usually mitigated by using
187a combined copy/checksum routine. Copying also preloads the cache, which is
188most useful with small frames.
189
190Since the VIA chips are only able to transfer data to buffers on 32 bit
191boundaries, the IP header at offset 14 in an ethernet frame isn't
192longword aligned for further processing. Copying these unaligned buffers
193has the beneficial effect of 16-byte aligning the IP header.
194
195IIId. Synchronization
196
197The driver runs as two independent, single-threaded flows of control. One
198is the send-packet routine, which enforces single-threaded use by the
199dev->priv->lock spinlock. The other thread is the interrupt handler, which
200is single threaded by the hardware and interrupt handling software.
201
202The send packet thread has partial control over the Tx ring. It locks the
203dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
204is not available it stops the transmit queue by calling netif_stop_queue.
205
206The interrupt handler has exclusive control over the Rx ring and records stats
207from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208empty by incrementing the dirty_tx mark. If at least half of the entries in
209the Rx ring are available the transmit queue is woken up if it was stopped.
210
211IV. Notes
212
213IVb. References
214
215Preliminary VT86C100A manual from http://www.via.com.tw/
216http://www.scyld.com/expert/100mbps.html
217http://www.scyld.com/expert/NWay.html
218ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
220
221
222IVc. Errata
223
224The VT86C100A manual is not reliable information.
225The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226in significant performance degradation for bounce buffer copies on transmit
227and unaligned IP headers on receive.
228The chip does not pad to minimum transmit length.
229
230*/
231
232
233/* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
237*/
238
239enum rhine_revs {
240 VT86C100A = 0x00,
241 VTunknown0 = 0x20,
242 VT6102 = 0x40,
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
247 VTunknown1 = 0x7C,
248 VT6105 = 0x80,
249 VT6105_B0 = 0x83,
250 VT6105L = 0x8A,
251 VT6107 = 0x8C,
252 VTunknown2 = 0x8E,
253 VT6105M = 0x90, /* Management adapter */
254};
255
256enum rhine_quirks {
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
262};
263/*
264 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
265 * MMIO as well as for the collision counter and the Tx FIFO underflow
266 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
267 */
268
269/* Beware of PCI posted writes */
270#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
271
Jeff Garzik46009c82006-06-27 09:12:38 -0400272static const struct pci_device_id rhine_pci_tbl[] = {
273 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
274 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
275 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
276 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 { } /* terminate list */
278};
279MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
280
281
282/* Offsets to the device registers. */
283enum register_offsets {
284 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
285 ChipCmd1=0x09,
286 IntrStatus=0x0C, IntrEnable=0x0E,
287 MulticastFilter0=0x10, MulticastFilter1=0x14,
288 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
289 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
290 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
291 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
292 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
293 StickyHW=0x83, IntrStatus2=0x84,
294 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
295 WOLcrClr1=0xA6, WOLcgClr=0xA7,
296 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
297};
298
299/* Bits in ConfigD */
300enum backoff_bits {
301 BackOptional=0x01, BackModify=0x02,
302 BackCaptureEffect=0x04, BackRandom=0x08
303};
304
305#ifdef USE_MMIO
306/* Registers we check that mmio and reg are the same. */
307static const int mmio_verify_registers[] = {
308 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
309 0
310};
311#endif
312
313/* Bits in the interrupt status/mask registers. */
314enum intr_status_bits {
315 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
316 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
317 IntrPCIErr=0x0040,
318 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
319 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
320 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
321 IntrRxWakeUp=0x8000,
322 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
323 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
324 IntrTxErrSummary=0x082218,
325};
326
327/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
328enum wol_bits {
329 WOLucast = 0x10,
330 WOLmagic = 0x20,
331 WOLbmcast = 0x30,
332 WOLlnkon = 0x40,
333 WOLlnkoff = 0x80,
334};
335
336/* The Rx and Tx buffer descriptors. */
337struct rx_desc {
338 s32 rx_status;
339 u32 desc_length; /* Chain flag, Buffer/frame length */
340 u32 addr;
341 u32 next_desc;
342};
343struct tx_desc {
344 s32 tx_status;
345 u32 desc_length; /* Chain flag, Tx Config, Frame length */
346 u32 addr;
347 u32 next_desc;
348};
349
350/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
351#define TXDESC 0x00e08000
352
353enum rx_status_bits {
354 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
355};
356
357/* Bits in *_desc.*_status */
358enum desc_status_bits {
359 DescOwn=0x80000000
360};
361
362/* Bits in ChipCmd. */
363enum chip_cmd_bits {
364 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
365 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
366 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
367 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
368};
369
370struct rhine_private {
371 /* Descriptor rings */
372 struct rx_desc *rx_ring;
373 struct tx_desc *tx_ring;
374 dma_addr_t rx_ring_dma;
375 dma_addr_t tx_ring_dma;
376
377 /* The addresses of receive-in-place skbuffs. */
378 struct sk_buff *rx_skbuff[RX_RING_SIZE];
379 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
380
381 /* The saved address of a sent-in-place packet/buffer, for later free(). */
382 struct sk_buff *tx_skbuff[TX_RING_SIZE];
383 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
384
Roger Luethi4be5de22006-04-04 20:49:16 +0200385 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 unsigned char *tx_buf[TX_RING_SIZE];
387 unsigned char *tx_bufs;
388 dma_addr_t tx_bufs_dma;
389
390 struct pci_dev *pdev;
391 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700392 struct net_device *dev;
393 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 struct net_device_stats stats;
395 spinlock_t lock;
396
397 /* Frequently used values: keep some adjacent for cache effect. */
398 u32 quirks;
399 struct rx_desc *rx_head_desc;
400 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
401 unsigned int cur_tx, dirty_tx;
402 unsigned int rx_buf_sz; /* Based on MTU+slack. */
403 u8 wolopts;
404
405 u8 tx_thresh, rx_thresh;
406
407 struct mii_if_info mii_if;
408 void __iomem *base;
409};
410
411static int mdio_read(struct net_device *dev, int phy_id, int location);
412static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413static int rhine_open(struct net_device *dev);
414static void rhine_tx_timeout(struct net_device *dev);
415static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100416static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700418static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419static void rhine_error(struct net_device *dev, int intr_status);
420static void rhine_set_rx_mode(struct net_device *dev);
421static struct net_device_stats *rhine_get_stats(struct net_device *dev);
422static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400423static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424static int rhine_close(struct net_device *dev);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -0700425static void rhine_shutdown (struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427#define RHINE_WAIT_FOR(condition) do { \
428 int i=1024; \
429 while (!(condition) && --i) \
430 ; \
431 if (debug > 1 && i < 512) \
432 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
433 DRV_NAME, 1024-i, __func__, __LINE__); \
434} while(0)
435
436static inline u32 get_intr_status(struct net_device *dev)
437{
438 struct rhine_private *rp = netdev_priv(dev);
439 void __iomem *ioaddr = rp->base;
440 u32 intr_status;
441
442 intr_status = ioread16(ioaddr + IntrStatus);
443 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
444 if (rp->quirks & rqStatusWBRace)
445 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
446 return intr_status;
447}
448
449/*
450 * Get power related registers into sane state.
451 * Notify user about past WOL event.
452 */
453static void rhine_power_init(struct net_device *dev)
454{
455 struct rhine_private *rp = netdev_priv(dev);
456 void __iomem *ioaddr = rp->base;
457 u16 wolstat;
458
459 if (rp->quirks & rqWOL) {
460 /* Make sure chip is in power state D0 */
461 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
462
463 /* Disable "force PME-enable" */
464 iowrite8(0x80, ioaddr + WOLcgClr);
465
466 /* Clear power-event config bits (WOL) */
467 iowrite8(0xFF, ioaddr + WOLcrClr);
468 /* More recent cards can manage two additional patterns */
469 if (rp->quirks & rq6patterns)
470 iowrite8(0x03, ioaddr + WOLcrClr1);
471
472 /* Save power-event status bits */
473 wolstat = ioread8(ioaddr + PwrcsrSet);
474 if (rp->quirks & rq6patterns)
475 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
476
477 /* Clear power-event status bits */
478 iowrite8(0xFF, ioaddr + PwrcsrClr);
479 if (rp->quirks & rq6patterns)
480 iowrite8(0x03, ioaddr + PwrcsrClr1);
481
482 if (wolstat) {
483 char *reason;
484 switch (wolstat) {
485 case WOLmagic:
486 reason = "Magic packet";
487 break;
488 case WOLlnkon:
489 reason = "Link went up";
490 break;
491 case WOLlnkoff:
492 reason = "Link went down";
493 break;
494 case WOLucast:
495 reason = "Unicast packet";
496 break;
497 case WOLbmcast:
498 reason = "Multicast/broadcast packet";
499 break;
500 default:
501 reason = "Unknown";
502 }
503 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
504 DRV_NAME, reason);
505 }
506 }
507}
508
509static void rhine_chip_reset(struct net_device *dev)
510{
511 struct rhine_private *rp = netdev_priv(dev);
512 void __iomem *ioaddr = rp->base;
513
514 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
515 IOSYNC;
516
517 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
518 printk(KERN_INFO "%s: Reset not complete yet. "
519 "Trying harder.\n", DRV_NAME);
520
521 /* Force reset */
522 if (rp->quirks & rqForceReset)
523 iowrite8(0x40, ioaddr + MiscCmd);
524
525 /* Reset can take somewhat longer (rare) */
526 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
527 }
528
529 if (debug > 1)
530 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
531 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
532 "failed" : "succeeded");
533}
534
535#ifdef USE_MMIO
536static void enable_mmio(long pioaddr, u32 quirks)
537{
538 int n;
539 if (quirks & rqRhineI) {
540 /* More recent docs say that this bit is reserved ... */
541 n = inb(pioaddr + ConfigA) | 0x20;
542 outb(n, pioaddr + ConfigA);
543 } else {
544 n = inb(pioaddr + ConfigD) | 0x80;
545 outb(n, pioaddr + ConfigD);
546 }
547}
548#endif
549
550/*
551 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
552 * (plus 0x6C for Rhine-I/II)
553 */
554static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
555{
556 struct rhine_private *rp = netdev_priv(dev);
557 void __iomem *ioaddr = rp->base;
558
559 outb(0x20, pioaddr + MACRegEEcsr);
560 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
561
562#ifdef USE_MMIO
563 /*
564 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
565 * MMIO. If reloading EEPROM was done first this could be avoided, but
566 * it is not known if that still works with the "win98-reboot" problem.
567 */
568 enable_mmio(pioaddr, rp->quirks);
569#endif
570
571 /* Turn off EEPROM-controlled wake-up (magic packet) */
572 if (rp->quirks & rqWOL)
573 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
574
575}
576
577#ifdef CONFIG_NET_POLL_CONTROLLER
578static void rhine_poll(struct net_device *dev)
579{
580 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100581 rhine_interrupt(dev->irq, (void *)dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 enable_irq(dev->irq);
583}
584#endif
585
Roger Luethi633949a2006-08-14 23:00:17 -0700586#ifdef CONFIG_VIA_RHINE_NAPI
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700587static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700588{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700589 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
590 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700591 void __iomem *ioaddr = rp->base;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700592 int work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700593
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700594 work_done = rhine_rx(dev, budget);
Roger Luethi633949a2006-08-14 23:00:17 -0700595
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700596 if (work_done < budget) {
597 netif_rx_complete(dev, napi);
Roger Luethi633949a2006-08-14 23:00:17 -0700598
599 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
600 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
601 IntrTxDone | IntrTxError | IntrTxUnderrun |
602 IntrPCIErr | IntrStatsMax | IntrLinkChange,
603 ioaddr + IntrEnable);
Roger Luethi633949a2006-08-14 23:00:17 -0700604 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700605 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700606}
607#endif
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609static void rhine_hw_init(struct net_device *dev, long pioaddr)
610{
611 struct rhine_private *rp = netdev_priv(dev);
612
613 /* Reset the chip to erase previous misconfiguration. */
614 rhine_chip_reset(dev);
615
616 /* Rhine-I needs extra time to recuperate before EEPROM reload */
617 if (rp->quirks & rqRhineI)
618 msleep(5);
619
620 /* Reload EEPROM controlled bytes cleared by soft reset */
621 rhine_reload_eeprom(pioaddr, dev);
622}
623
624static int __devinit rhine_init_one(struct pci_dev *pdev,
625 const struct pci_device_id *ent)
626{
627 struct net_device *dev;
628 struct rhine_private *rp;
629 int i, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 u32 quirks;
631 long pioaddr;
632 long memaddr;
633 void __iomem *ioaddr;
634 int io_size, phy_id;
635 const char *name;
636#ifdef USE_MMIO
637 int bar = 1;
638#else
639 int bar = 0;
640#endif
641
642/* when built into the kernel, we only print version if device is found */
643#ifndef MODULE
644 static int printed_version;
645 if (!printed_version++)
646 printk(version);
647#endif
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 io_size = 256;
650 phy_id = 0;
651 quirks = 0;
652 name = "Rhine";
Auke Kok44c10132007-06-08 15:46:36 -0700653 if (pdev->revision < VTunknown0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 quirks = rqRhineI;
655 io_size = 128;
656 }
Auke Kok44c10132007-06-08 15:46:36 -0700657 else if (pdev->revision >= VT6102) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 quirks = rqWOL | rqForceReset;
Auke Kok44c10132007-06-08 15:46:36 -0700659 if (pdev->revision < VT6105) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 name = "Rhine II";
661 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
662 }
663 else {
664 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
Auke Kok44c10132007-06-08 15:46:36 -0700665 if (pdev->revision >= VT6105_B0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 quirks |= rq6patterns;
Auke Kok44c10132007-06-08 15:46:36 -0700667 if (pdev->revision < VT6105M)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 name = "Rhine III";
669 else
670 name = "Rhine III (Management Adapter)";
671 }
672 }
673
674 rc = pci_enable_device(pdev);
675 if (rc)
676 goto err_out;
677
678 /* this should always be supported */
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400679 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (rc) {
681 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
682 "the card!?\n");
683 goto err_out;
684 }
685
686 /* sanity check */
687 if ((pci_resource_len(pdev, 0) < io_size) ||
688 (pci_resource_len(pdev, 1) < io_size)) {
689 rc = -EIO;
690 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
691 goto err_out;
692 }
693
694 pioaddr = pci_resource_start(pdev, 0);
695 memaddr = pci_resource_start(pdev, 1);
696
697 pci_set_master(pdev);
698
699 dev = alloc_etherdev(sizeof(struct rhine_private));
700 if (!dev) {
701 rc = -ENOMEM;
702 printk(KERN_ERR "alloc_etherdev failed\n");
703 goto err_out;
704 }
705 SET_MODULE_OWNER(dev);
706 SET_NETDEV_DEV(dev, &pdev->dev);
707
708 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700709 rp->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 rp->quirks = quirks;
711 rp->pioaddr = pioaddr;
712 rp->pdev = pdev;
713
714 rc = pci_request_regions(pdev, DRV_NAME);
715 if (rc)
716 goto err_out_free_netdev;
717
718 ioaddr = pci_iomap(pdev, bar, io_size);
719 if (!ioaddr) {
720 rc = -EIO;
721 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
722 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
723 goto err_out_free_res;
724 }
725
726#ifdef USE_MMIO
727 enable_mmio(pioaddr, quirks);
728
729 /* Check that selected MMIO registers match the PIO ones */
730 i = 0;
731 while (mmio_verify_registers[i]) {
732 int reg = mmio_verify_registers[i++];
733 unsigned char a = inb(pioaddr+reg);
734 unsigned char b = readb(ioaddr+reg);
735 if (a != b) {
736 rc = -EIO;
737 printk(KERN_ERR "MMIO do not match PIO [%02x] "
738 "(%02x != %02x)\n", reg, a, b);
739 goto err_out_unmap;
740 }
741 }
742#endif /* USE_MMIO */
743
744 dev->base_addr = (unsigned long)ioaddr;
745 rp->base = ioaddr;
746
747 /* Get chip registers into a sane state */
748 rhine_power_init(dev);
749 rhine_hw_init(dev, pioaddr);
750
751 for (i = 0; i < 6; i++)
752 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400753 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
John W. Linvilleb81e8e12005-09-12 10:48:58 -0400755 if (!is_valid_ether_addr(dev->perm_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 rc = -EIO;
757 printk(KERN_ERR "Invalid MAC address\n");
758 goto err_out_unmap;
759 }
760
761 /* For Rhine-I/II, phy_id is loaded from EEPROM */
762 if (!phy_id)
763 phy_id = ioread8(ioaddr + 0x6C);
764
765 dev->irq = pdev->irq;
766
767 spin_lock_init(&rp->lock);
768 rp->mii_if.dev = dev;
769 rp->mii_if.mdio_read = mdio_read;
770 rp->mii_if.mdio_write = mdio_write;
771 rp->mii_if.phy_id_mask = 0x1f;
772 rp->mii_if.reg_num_mask = 0x1f;
773
774 /* The chip-specific entries in the device structure. */
775 dev->open = rhine_open;
776 dev->hard_start_xmit = rhine_start_tx;
777 dev->stop = rhine_close;
778 dev->get_stats = rhine_get_stats;
779 dev->set_multicast_list = rhine_set_rx_mode;
780 dev->do_ioctl = netdev_ioctl;
781 dev->ethtool_ops = &netdev_ethtool_ops;
782 dev->tx_timeout = rhine_tx_timeout;
783 dev->watchdog_timeo = TX_TIMEOUT;
784#ifdef CONFIG_NET_POLL_CONTROLLER
785 dev->poll_controller = rhine_poll;
786#endif
Roger Luethi633949a2006-08-14 23:00:17 -0700787#ifdef CONFIG_VIA_RHINE_NAPI
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700788 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Roger Luethi633949a2006-08-14 23:00:17 -0700789#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 if (rp->quirks & rqRhineI)
791 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
792
793 /* dev->name not defined before register_netdev()! */
794 rc = register_netdev(dev);
795 if (rc)
796 goto err_out_unmap;
797
798 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
799 dev->name, name,
800#ifdef USE_MMIO
801 memaddr
802#else
803 (long)ioaddr
804#endif
805 );
806
807 for (i = 0; i < 5; i++)
808 printk("%2.2x:", dev->dev_addr[i]);
809 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
810
811 pci_set_drvdata(pdev, dev);
812
813 {
814 u16 mii_cmd;
815 int mii_status = mdio_read(dev, phy_id, 1);
816 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
817 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
818 if (mii_status != 0xffff && mii_status != 0x0000) {
819 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
820 printk(KERN_INFO "%s: MII PHY found at address "
821 "%d, status 0x%4.4x advertising %4.4x "
822 "Link %4.4x.\n", dev->name, phy_id,
823 mii_status, rp->mii_if.advertising,
824 mdio_read(dev, phy_id, 5));
825
826 /* set IFF_RUNNING */
827 if (mii_status & BMSR_LSTATUS)
828 netif_carrier_on(dev);
829 else
830 netif_carrier_off(dev);
831
832 }
833 }
834 rp->mii_if.phy_id = phy_id;
Roger Luethib933b4d2006-08-14 23:00:21 -0700835 if (debug > 1 && avoid_D3)
836 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
837 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 return 0;
840
841err_out_unmap:
842 pci_iounmap(pdev, ioaddr);
843err_out_free_res:
844 pci_release_regions(pdev);
845err_out_free_netdev:
846 free_netdev(dev);
847err_out:
848 return rc;
849}
850
851static int alloc_ring(struct net_device* dev)
852{
853 struct rhine_private *rp = netdev_priv(dev);
854 void *ring;
855 dma_addr_t ring_dma;
856
857 ring = pci_alloc_consistent(rp->pdev,
858 RX_RING_SIZE * sizeof(struct rx_desc) +
859 TX_RING_SIZE * sizeof(struct tx_desc),
860 &ring_dma);
861 if (!ring) {
862 printk(KERN_ERR "Could not allocate DMA memory.\n");
863 return -ENOMEM;
864 }
865 if (rp->quirks & rqRhineI) {
866 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
867 PKT_BUF_SZ * TX_RING_SIZE,
868 &rp->tx_bufs_dma);
869 if (rp->tx_bufs == NULL) {
870 pci_free_consistent(rp->pdev,
871 RX_RING_SIZE * sizeof(struct rx_desc) +
872 TX_RING_SIZE * sizeof(struct tx_desc),
873 ring, ring_dma);
874 return -ENOMEM;
875 }
876 }
877
878 rp->rx_ring = ring;
879 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
880 rp->rx_ring_dma = ring_dma;
881 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
882
883 return 0;
884}
885
886static void free_ring(struct net_device* dev)
887{
888 struct rhine_private *rp = netdev_priv(dev);
889
890 pci_free_consistent(rp->pdev,
891 RX_RING_SIZE * sizeof(struct rx_desc) +
892 TX_RING_SIZE * sizeof(struct tx_desc),
893 rp->rx_ring, rp->rx_ring_dma);
894 rp->tx_ring = NULL;
895
896 if (rp->tx_bufs)
897 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
898 rp->tx_bufs, rp->tx_bufs_dma);
899
900 rp->tx_bufs = NULL;
901
902}
903
904static void alloc_rbufs(struct net_device *dev)
905{
906 struct rhine_private *rp = netdev_priv(dev);
907 dma_addr_t next;
908 int i;
909
910 rp->dirty_rx = rp->cur_rx = 0;
911
912 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
913 rp->rx_head_desc = &rp->rx_ring[0];
914 next = rp->rx_ring_dma;
915
916 /* Init the ring entries */
917 for (i = 0; i < RX_RING_SIZE; i++) {
918 rp->rx_ring[i].rx_status = 0;
919 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
920 next += sizeof(struct rx_desc);
921 rp->rx_ring[i].next_desc = cpu_to_le32(next);
922 rp->rx_skbuff[i] = NULL;
923 }
924 /* Mark the last entry as wrapping the ring. */
925 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
926
927 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
928 for (i = 0; i < RX_RING_SIZE; i++) {
929 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
930 rp->rx_skbuff[i] = skb;
931 if (skb == NULL)
932 break;
933 skb->dev = dev; /* Mark as being used by this device. */
934
935 rp->rx_skbuff_dma[i] =
David S. Miller689be432005-06-28 15:25:31 -0700936 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 PCI_DMA_FROMDEVICE);
938
939 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
940 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
941 }
942 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
943}
944
945static void free_rbufs(struct net_device* dev)
946{
947 struct rhine_private *rp = netdev_priv(dev);
948 int i;
949
950 /* Free all the skbuffs in the Rx queue. */
951 for (i = 0; i < RX_RING_SIZE; i++) {
952 rp->rx_ring[i].rx_status = 0;
953 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
954 if (rp->rx_skbuff[i]) {
955 pci_unmap_single(rp->pdev,
956 rp->rx_skbuff_dma[i],
957 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
958 dev_kfree_skb(rp->rx_skbuff[i]);
959 }
960 rp->rx_skbuff[i] = NULL;
961 }
962}
963
964static void alloc_tbufs(struct net_device* dev)
965{
966 struct rhine_private *rp = netdev_priv(dev);
967 dma_addr_t next;
968 int i;
969
970 rp->dirty_tx = rp->cur_tx = 0;
971 next = rp->tx_ring_dma;
972 for (i = 0; i < TX_RING_SIZE; i++) {
973 rp->tx_skbuff[i] = NULL;
974 rp->tx_ring[i].tx_status = 0;
975 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
976 next += sizeof(struct tx_desc);
977 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +0200978 if (rp->quirks & rqRhineI)
979 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 }
981 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
982
983}
984
985static void free_tbufs(struct net_device* dev)
986{
987 struct rhine_private *rp = netdev_priv(dev);
988 int i;
989
990 for (i = 0; i < TX_RING_SIZE; i++) {
991 rp->tx_ring[i].tx_status = 0;
992 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
993 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
994 if (rp->tx_skbuff[i]) {
995 if (rp->tx_skbuff_dma[i]) {
996 pci_unmap_single(rp->pdev,
997 rp->tx_skbuff_dma[i],
998 rp->tx_skbuff[i]->len,
999 PCI_DMA_TODEVICE);
1000 }
1001 dev_kfree_skb(rp->tx_skbuff[i]);
1002 }
1003 rp->tx_skbuff[i] = NULL;
1004 rp->tx_buf[i] = NULL;
1005 }
1006}
1007
1008static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1009{
1010 struct rhine_private *rp = netdev_priv(dev);
1011 void __iomem *ioaddr = rp->base;
1012
1013 mii_check_media(&rp->mii_if, debug, init_media);
1014
1015 if (rp->mii_if.full_duplex)
1016 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1017 ioaddr + ChipCmd1);
1018 else
1019 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1020 ioaddr + ChipCmd1);
Roger Luethi00b428c2006-03-28 20:53:56 +02001021 if (debug > 1)
1022 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1023 rp->mii_if.force_media, netif_carrier_ok(dev));
1024}
1025
1026/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001027static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001028{
1029 if (mii->force_media) {
1030 /* autoneg is off: Link is always assumed to be up */
1031 if (!netif_carrier_ok(mii->dev))
1032 netif_carrier_on(mii->dev);
1033 }
1034 else /* Let MMI library update carrier status */
1035 rhine_check_media(mii->dev, 0);
1036 if (debug > 1)
1037 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1038 mii->dev->name, mii->force_media,
1039 netif_carrier_ok(mii->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
1042static void init_registers(struct net_device *dev)
1043{
1044 struct rhine_private *rp = netdev_priv(dev);
1045 void __iomem *ioaddr = rp->base;
1046 int i;
1047
1048 for (i = 0; i < 6; i++)
1049 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1050
1051 /* Initialize other registers. */
1052 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1053 /* Configure initial FIFO thresholds. */
1054 iowrite8(0x20, ioaddr + TxConfig);
1055 rp->tx_thresh = 0x20;
1056 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1057
1058 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1059 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1060
1061 rhine_set_rx_mode(dev);
1062
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001063#ifdef CONFIG_VIA_RHINE_NAPI
1064 napi_enable(&rp->napi);
1065#endif
Stephen Hemmingerab197662006-08-14 23:00:18 -07001066
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 /* Enable interrupts by setting the interrupt mask. */
1068 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1069 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1070 IntrTxDone | IntrTxError | IntrTxUnderrun |
1071 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1072 ioaddr + IntrEnable);
1073
1074 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1075 ioaddr + ChipCmd);
1076 rhine_check_media(dev, 1);
1077}
1078
1079/* Enable MII link status auto-polling (required for IntrLinkChange) */
1080static void rhine_enable_linkmon(void __iomem *ioaddr)
1081{
1082 iowrite8(0, ioaddr + MIICmd);
1083 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1084 iowrite8(0x80, ioaddr + MIICmd);
1085
1086 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1087
1088 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1089}
1090
1091/* Disable MII link status auto-polling (required for MDIO access) */
1092static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1093{
1094 iowrite8(0, ioaddr + MIICmd);
1095
1096 if (quirks & rqRhineI) {
1097 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1098
John W. Linville38bb6b22006-05-19 10:51:21 -04001099 /* Can be called from ISR. Evil. */
1100 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 /* 0x80 must be set immediately before turning it off */
1103 iowrite8(0x80, ioaddr + MIICmd);
1104
1105 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1106
1107 /* Heh. Now clear 0x80 again. */
1108 iowrite8(0, ioaddr + MIICmd);
1109 }
1110 else
1111 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1112}
1113
1114/* Read and write over the MII Management Data I/O (MDIO) interface. */
1115
1116static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1117{
1118 struct rhine_private *rp = netdev_priv(dev);
1119 void __iomem *ioaddr = rp->base;
1120 int result;
1121
1122 rhine_disable_linkmon(ioaddr, rp->quirks);
1123
1124 /* rhine_disable_linkmon already cleared MIICmd */
1125 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1126 iowrite8(regnum, ioaddr + MIIRegAddr);
1127 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1128 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1129 result = ioread16(ioaddr + MIIData);
1130
1131 rhine_enable_linkmon(ioaddr);
1132 return result;
1133}
1134
1135static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1136{
1137 struct rhine_private *rp = netdev_priv(dev);
1138 void __iomem *ioaddr = rp->base;
1139
1140 rhine_disable_linkmon(ioaddr, rp->quirks);
1141
1142 /* rhine_disable_linkmon already cleared MIICmd */
1143 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1144 iowrite8(regnum, ioaddr + MIIRegAddr);
1145 iowrite16(value, ioaddr + MIIData);
1146 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1147 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1148
1149 rhine_enable_linkmon(ioaddr);
1150}
1151
1152static int rhine_open(struct net_device *dev)
1153{
1154 struct rhine_private *rp = netdev_priv(dev);
1155 void __iomem *ioaddr = rp->base;
1156 int rc;
1157
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001158 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 dev);
1160 if (rc)
1161 return rc;
1162
1163 if (debug > 1)
1164 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1165 dev->name, rp->pdev->irq);
1166
1167 rc = alloc_ring(dev);
1168 if (rc) {
1169 free_irq(rp->pdev->irq, dev);
1170 return rc;
1171 }
1172 alloc_rbufs(dev);
1173 alloc_tbufs(dev);
1174 rhine_chip_reset(dev);
1175 init_registers(dev);
1176 if (debug > 2)
1177 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1178 "MII status: %4.4x.\n",
1179 dev->name, ioread16(ioaddr + ChipCmd),
1180 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1181
1182 netif_start_queue(dev);
1183
1184 return 0;
1185}
1186
1187static void rhine_tx_timeout(struct net_device *dev)
1188{
1189 struct rhine_private *rp = netdev_priv(dev);
1190 void __iomem *ioaddr = rp->base;
1191
1192 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1193 "%4.4x, resetting...\n",
1194 dev->name, ioread16(ioaddr + IntrStatus),
1195 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1196
1197 /* protect against concurrent rx interrupts */
1198 disable_irq(rp->pdev->irq);
1199
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001200#ifdef CONFIG_VIA_RHINE_NAPI
1201 napi_disable(&rp->napi);
1202#endif
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 spin_lock(&rp->lock);
1205
1206 /* clear all descriptors */
1207 free_tbufs(dev);
1208 free_rbufs(dev);
1209 alloc_tbufs(dev);
1210 alloc_rbufs(dev);
1211
1212 /* Reinitialize the hardware. */
1213 rhine_chip_reset(dev);
1214 init_registers(dev);
1215
1216 spin_unlock(&rp->lock);
1217 enable_irq(rp->pdev->irq);
1218
1219 dev->trans_start = jiffies;
1220 rp->stats.tx_errors++;
1221 netif_wake_queue(dev);
1222}
1223
1224static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1225{
1226 struct rhine_private *rp = netdev_priv(dev);
1227 void __iomem *ioaddr = rp->base;
1228 unsigned entry;
1229
1230 /* Caution: the write order is important here, set the field
1231 with the "ownership" bits last. */
1232
1233 /* Calculate the next Tx descriptor entry. */
1234 entry = rp->cur_tx % TX_RING_SIZE;
1235
Herbert Xu5b057c62006-06-23 02:06:41 -07001236 if (skb_padto(skb, ETH_ZLEN))
1237 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
1239 rp->tx_skbuff[entry] = skb;
1240
1241 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001242 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 /* Must use alignment buffer. */
1244 if (skb->len > PKT_BUF_SZ) {
1245 /* packet too long, drop it */
1246 dev_kfree_skb(skb);
1247 rp->tx_skbuff[entry] = NULL;
1248 rp->stats.tx_dropped++;
1249 return 0;
1250 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001251
1252 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001254 if (skb->len < ETH_ZLEN)
1255 memset(rp->tx_buf[entry] + skb->len, 0,
1256 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 rp->tx_skbuff_dma[entry] = 0;
1258 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1259 (rp->tx_buf[entry] -
1260 rp->tx_bufs));
1261 } else {
1262 rp->tx_skbuff_dma[entry] =
1263 pci_map_single(rp->pdev, skb->data, skb->len,
1264 PCI_DMA_TODEVICE);
1265 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1266 }
1267
1268 rp->tx_ring[entry].desc_length =
1269 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1270
1271 /* lock eth irq */
1272 spin_lock_irq(&rp->lock);
1273 wmb();
1274 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1275 wmb();
1276
1277 rp->cur_tx++;
1278
1279 /* Non-x86 Todo: explicitly flush cache lines here. */
1280
1281 /* Wake the potentially-idle transmit channel */
1282 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1283 ioaddr + ChipCmd1);
1284 IOSYNC;
1285
1286 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1287 netif_stop_queue(dev);
1288
1289 dev->trans_start = jiffies;
1290
1291 spin_unlock_irq(&rp->lock);
1292
1293 if (debug > 4) {
1294 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1295 dev->name, rp->cur_tx-1, entry);
1296 }
1297 return 0;
1298}
1299
1300/* The interrupt handler does all of the Rx thread work and cleans up
1301 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001302static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303{
1304 struct net_device *dev = dev_instance;
1305 struct rhine_private *rp = netdev_priv(dev);
1306 void __iomem *ioaddr = rp->base;
1307 u32 intr_status;
1308 int boguscnt = max_interrupt_work;
1309 int handled = 0;
1310
1311 while ((intr_status = get_intr_status(dev))) {
1312 handled = 1;
1313
1314 /* Acknowledge all of the current interrupt sources ASAP. */
1315 if (intr_status & IntrTxDescRace)
1316 iowrite8(0x08, ioaddr + IntrStatus2);
1317 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1318 IOSYNC;
1319
1320 if (debug > 4)
1321 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1322 dev->name, intr_status);
1323
1324 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
Roger Luethi633949a2006-08-14 23:00:17 -07001325 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1326#ifdef CONFIG_VIA_RHINE_NAPI
1327 iowrite16(IntrTxAborted |
1328 IntrTxDone | IntrTxError | IntrTxUnderrun |
1329 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1330 ioaddr + IntrEnable);
1331
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001332 netif_rx_schedule(dev, &rp->napi);
Roger Luethi633949a2006-08-14 23:00:17 -07001333#else
1334 rhine_rx(dev, RX_RING_SIZE);
1335#endif
1336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1339 if (intr_status & IntrTxErrSummary) {
1340 /* Avoid scavenging before Tx engine turned off */
1341 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1342 if (debug > 2 &&
1343 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1344 printk(KERN_WARNING "%s: "
1345 "rhine_interrupt() Tx engine"
1346 "still on.\n", dev->name);
1347 }
1348 rhine_tx(dev);
1349 }
1350
1351 /* Abnormal error summary/uncommon events handlers. */
1352 if (intr_status & (IntrPCIErr | IntrLinkChange |
1353 IntrStatsMax | IntrTxError | IntrTxAborted |
1354 IntrTxUnderrun | IntrTxDescRace))
1355 rhine_error(dev, intr_status);
1356
1357 if (--boguscnt < 0) {
1358 printk(KERN_WARNING "%s: Too much work at interrupt, "
1359 "status=%#8.8x.\n",
1360 dev->name, intr_status);
1361 break;
1362 }
1363 }
1364
1365 if (debug > 3)
1366 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1367 dev->name, ioread16(ioaddr + IntrStatus));
1368 return IRQ_RETVAL(handled);
1369}
1370
1371/* This routine is logically part of the interrupt handler, but isolated
1372 for clarity. */
1373static void rhine_tx(struct net_device *dev)
1374{
1375 struct rhine_private *rp = netdev_priv(dev);
1376 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1377
1378 spin_lock(&rp->lock);
1379
1380 /* find and cleanup dirty tx descriptors */
1381 while (rp->dirty_tx != rp->cur_tx) {
1382 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1383 if (debug > 6)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001384 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 entry, txstatus);
1386 if (txstatus & DescOwn)
1387 break;
1388 if (txstatus & 0x8000) {
1389 if (debug > 1)
1390 printk(KERN_DEBUG "%s: Transmit error, "
1391 "Tx status %8.8x.\n",
1392 dev->name, txstatus);
1393 rp->stats.tx_errors++;
1394 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1395 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1396 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1397 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1398 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1399 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1400 rp->stats.tx_fifo_errors++;
1401 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1402 break; /* Keep the skb - we try again */
1403 }
1404 /* Transmitter restarted in 'abnormal' handler. */
1405 } else {
1406 if (rp->quirks & rqRhineI)
1407 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1408 else
1409 rp->stats.collisions += txstatus & 0x0F;
1410 if (debug > 6)
1411 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1412 (txstatus >> 3) & 0xF,
1413 txstatus & 0xF);
1414 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1415 rp->stats.tx_packets++;
1416 }
1417 /* Free the original skb. */
1418 if (rp->tx_skbuff_dma[entry]) {
1419 pci_unmap_single(rp->pdev,
1420 rp->tx_skbuff_dma[entry],
1421 rp->tx_skbuff[entry]->len,
1422 PCI_DMA_TODEVICE);
1423 }
1424 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1425 rp->tx_skbuff[entry] = NULL;
1426 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1427 }
1428 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1429 netif_wake_queue(dev);
1430
1431 spin_unlock(&rp->lock);
1432}
1433
Roger Luethi633949a2006-08-14 23:00:17 -07001434/* Process up to limit frames from receive ring */
1435static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
1437 struct rhine_private *rp = netdev_priv(dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001438 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441 if (debug > 4) {
1442 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1443 dev->name, entry,
1444 le32_to_cpu(rp->rx_head_desc->rx_status));
1445 }
1446
1447 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001448 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 struct rx_desc *desc = rp->rx_head_desc;
1450 u32 desc_status = le32_to_cpu(desc->rx_status);
1451 int data_size = desc_status >> 16;
1452
Roger Luethi633949a2006-08-14 23:00:17 -07001453 if (desc_status & DescOwn)
1454 break;
1455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 if (debug > 4)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001457 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1461 if ((desc_status & RxWholePkt) != RxWholePkt) {
1462 printk(KERN_WARNING "%s: Oversized Ethernet "
1463 "frame spanned multiple buffers, entry "
1464 "%#x length %d status %8.8x!\n",
1465 dev->name, entry, data_size,
1466 desc_status);
1467 printk(KERN_WARNING "%s: Oversized Ethernet "
1468 "frame %p vs %p.\n", dev->name,
1469 rp->rx_head_desc, &rp->rx_ring[entry]);
1470 rp->stats.rx_length_errors++;
1471 } else if (desc_status & RxErr) {
1472 /* There was a error. */
1473 if (debug > 2)
Denis Vlasenkoed4030d2005-06-17 08:23:17 +03001474 printk(KERN_DEBUG "rhine_rx() Rx "
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 "error was %8.8x.\n",
1476 desc_status);
1477 rp->stats.rx_errors++;
1478 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1479 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1480 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1481 if (desc_status & 0x0002) {
1482 /* this can also be updated outside the interrupt handler */
1483 spin_lock(&rp->lock);
1484 rp->stats.rx_crc_errors++;
1485 spin_unlock(&rp->lock);
1486 }
1487 }
1488 } else {
1489 struct sk_buff *skb;
1490 /* Length should omit the CRC */
1491 int pkt_len = data_size - 4;
1492
1493 /* Check if the packet is long enough to accept without
1494 copying to a minimally-sized skbuff. */
1495 if (pkt_len < rx_copybreak &&
1496 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 skb_reserve(skb, 2); /* 16 byte align the IP header */
1498 pci_dma_sync_single_for_cpu(rp->pdev,
1499 rp->rx_skbuff_dma[entry],
1500 rp->rx_buf_sz,
1501 PCI_DMA_FROMDEVICE);
1502
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001503 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07001504 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001505 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 skb_put(skb, pkt_len);
1507 pci_dma_sync_single_for_device(rp->pdev,
1508 rp->rx_skbuff_dma[entry],
1509 rp->rx_buf_sz,
1510 PCI_DMA_FROMDEVICE);
1511 } else {
1512 skb = rp->rx_skbuff[entry];
1513 if (skb == NULL) {
1514 printk(KERN_ERR "%s: Inconsistent Rx "
1515 "descriptor chain.\n",
1516 dev->name);
1517 break;
1518 }
1519 rp->rx_skbuff[entry] = NULL;
1520 skb_put(skb, pkt_len);
1521 pci_unmap_single(rp->pdev,
1522 rp->rx_skbuff_dma[entry],
1523 rp->rx_buf_sz,
1524 PCI_DMA_FROMDEVICE);
1525 }
1526 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001527#ifdef CONFIG_VIA_RHINE_NAPI
1528 netif_receive_skb(skb);
1529#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 netif_rx(skb);
Roger Luethi633949a2006-08-14 23:00:17 -07001531#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 dev->last_rx = jiffies;
1533 rp->stats.rx_bytes += pkt_len;
1534 rp->stats.rx_packets++;
1535 }
1536 entry = (++rp->cur_rx) % RX_RING_SIZE;
1537 rp->rx_head_desc = &rp->rx_ring[entry];
1538 }
1539
1540 /* Refill the Rx ring buffers. */
1541 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1542 struct sk_buff *skb;
1543 entry = rp->dirty_rx % RX_RING_SIZE;
1544 if (rp->rx_skbuff[entry] == NULL) {
1545 skb = dev_alloc_skb(rp->rx_buf_sz);
1546 rp->rx_skbuff[entry] = skb;
1547 if (skb == NULL)
1548 break; /* Better luck next round. */
1549 skb->dev = dev; /* Mark as being used by this device. */
1550 rp->rx_skbuff_dma[entry] =
David S. Miller689be432005-06-28 15:25:31 -07001551 pci_map_single(rp->pdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 rp->rx_buf_sz,
1553 PCI_DMA_FROMDEVICE);
1554 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1555 }
1556 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1557 }
Roger Luethi633949a2006-08-14 23:00:17 -07001558
1559 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560}
1561
1562/*
1563 * Clears the "tally counters" for CRC errors and missed frames(?).
1564 * It has been reported that some chips need a write of 0 to clear
1565 * these, for others the counters are set to 1 when written to and
1566 * instead cleared when read. So we clear them both ways ...
1567 */
1568static inline void clear_tally_counters(void __iomem *ioaddr)
1569{
1570 iowrite32(0, ioaddr + RxMissed);
1571 ioread16(ioaddr + RxCRCErrs);
1572 ioread16(ioaddr + RxMissed);
1573}
1574
1575static void rhine_restart_tx(struct net_device *dev) {
1576 struct rhine_private *rp = netdev_priv(dev);
1577 void __iomem *ioaddr = rp->base;
1578 int entry = rp->dirty_tx % TX_RING_SIZE;
1579 u32 intr_status;
1580
1581 /*
1582 * If new errors occured, we need to sort them out before doing Tx.
1583 * In that case the ISR will be back here RSN anyway.
1584 */
1585 intr_status = get_intr_status(dev);
1586
1587 if ((intr_status & IntrTxErrSummary) == 0) {
1588
1589 /* We know better than the chip where it should continue. */
1590 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1591 ioaddr + TxRingPtr);
1592
1593 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1594 ioaddr + ChipCmd);
1595 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1596 ioaddr + ChipCmd1);
1597 IOSYNC;
1598 }
1599 else {
1600 /* This should never happen */
1601 if (debug > 1)
1602 printk(KERN_WARNING "%s: rhine_restart_tx() "
1603 "Another error occured %8.8x.\n",
1604 dev->name, intr_status);
1605 }
1606
1607}
1608
1609static void rhine_error(struct net_device *dev, int intr_status)
1610{
1611 struct rhine_private *rp = netdev_priv(dev);
1612 void __iomem *ioaddr = rp->base;
1613
1614 spin_lock(&rp->lock);
1615
1616 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04001617 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 if (intr_status & IntrStatsMax) {
1619 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1620 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1621 clear_tally_counters(ioaddr);
1622 }
1623 if (intr_status & IntrTxAborted) {
1624 if (debug > 1)
1625 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1626 dev->name, intr_status);
1627 }
1628 if (intr_status & IntrTxUnderrun) {
1629 if (rp->tx_thresh < 0xE0)
1630 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1631 if (debug > 1)
1632 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1633 "threshold now %2.2x.\n",
1634 dev->name, rp->tx_thresh);
1635 }
1636 if (intr_status & IntrTxDescRace) {
1637 if (debug > 2)
1638 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1639 dev->name);
1640 }
1641 if ((intr_status & IntrTxError) &&
1642 (intr_status & (IntrTxAborted |
1643 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1644 if (rp->tx_thresh < 0xE0) {
1645 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1646 }
1647 if (debug > 1)
1648 printk(KERN_INFO "%s: Unspecified error. Tx "
1649 "threshold now %2.2x.\n",
1650 dev->name, rp->tx_thresh);
1651 }
1652 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1653 IntrTxError))
1654 rhine_restart_tx(dev);
1655
1656 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1657 IntrTxError | IntrTxAborted | IntrNormalSummary |
1658 IntrTxDescRace)) {
1659 if (debug > 1)
1660 printk(KERN_ERR "%s: Something Wicked happened! "
1661 "%8.8x.\n", dev->name, intr_status);
1662 }
1663
1664 spin_unlock(&rp->lock);
1665}
1666
1667static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1668{
1669 struct rhine_private *rp = netdev_priv(dev);
1670 void __iomem *ioaddr = rp->base;
1671 unsigned long flags;
1672
1673 spin_lock_irqsave(&rp->lock, flags);
1674 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1675 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1676 clear_tally_counters(ioaddr);
1677 spin_unlock_irqrestore(&rp->lock, flags);
1678
1679 return &rp->stats;
1680}
1681
1682static void rhine_set_rx_mode(struct net_device *dev)
1683{
1684 struct rhine_private *rp = netdev_priv(dev);
1685 void __iomem *ioaddr = rp->base;
1686 u32 mc_filter[2]; /* Multicast hash filter */
1687 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1688
1689 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 rx_mode = 0x1C;
1691 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1692 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1693 } else if ((dev->mc_count > multicast_filter_limit)
1694 || (dev->flags & IFF_ALLMULTI)) {
1695 /* Too many to match, or accept all multicasts. */
1696 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1697 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1698 rx_mode = 0x0C;
1699 } else {
1700 struct dev_mc_list *mclist;
1701 int i;
1702 memset(mc_filter, 0, sizeof(mc_filter));
1703 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1704 i++, mclist = mclist->next) {
1705 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1706
1707 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1708 }
1709 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1710 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1711 rx_mode = 0x0C;
1712 }
1713 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1714}
1715
1716static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1717{
1718 struct rhine_private *rp = netdev_priv(dev);
1719
1720 strcpy(info->driver, DRV_NAME);
1721 strcpy(info->version, DRV_VERSION);
1722 strcpy(info->bus_info, pci_name(rp->pdev));
1723}
1724
1725static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1726{
1727 struct rhine_private *rp = netdev_priv(dev);
1728 int rc;
1729
1730 spin_lock_irq(&rp->lock);
1731 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1732 spin_unlock_irq(&rp->lock);
1733
1734 return rc;
1735}
1736
1737static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1738{
1739 struct rhine_private *rp = netdev_priv(dev);
1740 int rc;
1741
1742 spin_lock_irq(&rp->lock);
1743 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1744 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02001745 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747 return rc;
1748}
1749
1750static int netdev_nway_reset(struct net_device *dev)
1751{
1752 struct rhine_private *rp = netdev_priv(dev);
1753
1754 return mii_nway_restart(&rp->mii_if);
1755}
1756
1757static u32 netdev_get_link(struct net_device *dev)
1758{
1759 struct rhine_private *rp = netdev_priv(dev);
1760
1761 return mii_link_ok(&rp->mii_if);
1762}
1763
1764static u32 netdev_get_msglevel(struct net_device *dev)
1765{
1766 return debug;
1767}
1768
1769static void netdev_set_msglevel(struct net_device *dev, u32 value)
1770{
1771 debug = value;
1772}
1773
1774static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1775{
1776 struct rhine_private *rp = netdev_priv(dev);
1777
1778 if (!(rp->quirks & rqWOL))
1779 return;
1780
1781 spin_lock_irq(&rp->lock);
1782 wol->supported = WAKE_PHY | WAKE_MAGIC |
1783 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1784 wol->wolopts = rp->wolopts;
1785 spin_unlock_irq(&rp->lock);
1786}
1787
1788static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1789{
1790 struct rhine_private *rp = netdev_priv(dev);
1791 u32 support = WAKE_PHY | WAKE_MAGIC |
1792 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1793
1794 if (!(rp->quirks & rqWOL))
1795 return -EINVAL;
1796
1797 if (wol->wolopts & ~support)
1798 return -EINVAL;
1799
1800 spin_lock_irq(&rp->lock);
1801 rp->wolopts = wol->wolopts;
1802 spin_unlock_irq(&rp->lock);
1803
1804 return 0;
1805}
1806
Jeff Garzik7282d492006-09-13 14:30:00 -04001807static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 .get_drvinfo = netdev_get_drvinfo,
1809 .get_settings = netdev_get_settings,
1810 .set_settings = netdev_set_settings,
1811 .nway_reset = netdev_nway_reset,
1812 .get_link = netdev_get_link,
1813 .get_msglevel = netdev_get_msglevel,
1814 .set_msglevel = netdev_set_msglevel,
1815 .get_wol = rhine_get_wol,
1816 .set_wol = rhine_set_wol,
1817 .get_sg = ethtool_op_get_sg,
1818 .get_tx_csum = ethtool_op_get_tx_csum,
1819};
1820
1821static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1822{
1823 struct rhine_private *rp = netdev_priv(dev);
1824 int rc;
1825
1826 if (!netif_running(dev))
1827 return -EINVAL;
1828
1829 spin_lock_irq(&rp->lock);
1830 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1831 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02001832 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
1834 return rc;
1835}
1836
1837static int rhine_close(struct net_device *dev)
1838{
1839 struct rhine_private *rp = netdev_priv(dev);
1840 void __iomem *ioaddr = rp->base;
1841
1842 spin_lock_irq(&rp->lock);
1843
1844 netif_stop_queue(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001845#ifdef CONFIG_VIA_RHINE_NAPI
1846 napi_disable(&rp->napi);
1847#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
1849 if (debug > 1)
1850 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1851 "status was %4.4x.\n",
1852 dev->name, ioread16(ioaddr + ChipCmd));
1853
1854 /* Switch to loopback mode to avoid hardware races. */
1855 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1856
1857 /* Disable interrupts by clearing the interrupt mask. */
1858 iowrite16(0x0000, ioaddr + IntrEnable);
1859
1860 /* Stop the chip's Tx and Rx processes. */
1861 iowrite16(CmdStop, ioaddr + ChipCmd);
1862
1863 spin_unlock_irq(&rp->lock);
1864
1865 free_irq(rp->pdev->irq, dev);
1866 free_rbufs(dev);
1867 free_tbufs(dev);
1868 free_ring(dev);
1869
1870 return 0;
1871}
1872
1873
1874static void __devexit rhine_remove_one(struct pci_dev *pdev)
1875{
1876 struct net_device *dev = pci_get_drvdata(pdev);
1877 struct rhine_private *rp = netdev_priv(dev);
1878
1879 unregister_netdev(dev);
1880
1881 pci_iounmap(pdev, rp->base);
1882 pci_release_regions(pdev);
1883
1884 free_netdev(dev);
1885 pci_disable_device(pdev);
1886 pci_set_drvdata(pdev, NULL);
1887}
1888
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001889static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 struct net_device *dev = pci_get_drvdata(pdev);
1892 struct rhine_private *rp = netdev_priv(dev);
1893 void __iomem *ioaddr = rp->base;
1894
1895 if (!(rp->quirks & rqWOL))
1896 return; /* Nothing to do for non-WOL adapters */
1897
1898 rhine_power_init(dev);
1899
1900 /* Make sure we use pattern 0, 1 and not 4, 5 */
1901 if (rp->quirks & rq6patterns)
1902 iowrite8(0x04, ioaddr + 0xA7);
1903
1904 if (rp->wolopts & WAKE_MAGIC) {
1905 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1906 /*
1907 * Turn EEPROM-controlled wake-up back on -- some hardware may
1908 * not cooperate otherwise.
1909 */
1910 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1911 }
1912
1913 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1914 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1915
1916 if (rp->wolopts & WAKE_PHY)
1917 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1918
1919 if (rp->wolopts & WAKE_UCAST)
1920 iowrite8(WOLucast, ioaddr + WOLcrSet);
1921
1922 if (rp->wolopts) {
1923 /* Enable legacy WOL (for old motherboards) */
1924 iowrite8(0x01, ioaddr + PwcfgSet);
1925 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1926 }
1927
1928 /* Hit power state D3 (sleep) */
Roger Luethib933b4d2006-08-14 23:00:21 -07001929 if (!avoid_D3)
1930 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
1932 /* TODO: Check use of pci_enable_wake() */
1933
1934}
1935
1936#ifdef CONFIG_PM
1937static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1938{
1939 struct net_device *dev = pci_get_drvdata(pdev);
1940 struct rhine_private *rp = netdev_priv(dev);
1941 unsigned long flags;
1942
1943 if (!netif_running(dev))
1944 return 0;
1945
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001946#ifdef CONFIG_VIA_RHINE_NAPI
1947 napi_disable(&rp->napi);
1948#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 netif_device_detach(dev);
1950 pci_save_state(pdev);
1951
1952 spin_lock_irqsave(&rp->lock, flags);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07001953 rhine_shutdown(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 spin_unlock_irqrestore(&rp->lock, flags);
1955
1956 free_irq(dev->irq, dev);
1957 return 0;
1958}
1959
1960static int rhine_resume(struct pci_dev *pdev)
1961{
1962 struct net_device *dev = pci_get_drvdata(pdev);
1963 struct rhine_private *rp = netdev_priv(dev);
1964 unsigned long flags;
1965 int ret;
1966
1967 if (!netif_running(dev))
1968 return 0;
1969
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001970 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1972
1973 ret = pci_set_power_state(pdev, PCI_D0);
1974 if (debug > 1)
1975 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1976 dev->name, ret ? "failed" : "succeeded", ret);
1977
1978 pci_restore_state(pdev);
1979
1980 spin_lock_irqsave(&rp->lock, flags);
1981#ifdef USE_MMIO
1982 enable_mmio(rp->pioaddr, rp->quirks);
1983#endif
1984 rhine_power_init(dev);
1985 free_tbufs(dev);
1986 free_rbufs(dev);
1987 alloc_tbufs(dev);
1988 alloc_rbufs(dev);
1989 init_registers(dev);
1990 spin_unlock_irqrestore(&rp->lock, flags);
1991
1992 netif_device_attach(dev);
1993
1994 return 0;
1995}
1996#endif /* CONFIG_PM */
1997
1998static struct pci_driver rhine_driver = {
1999 .name = DRV_NAME,
2000 .id_table = rhine_pci_tbl,
2001 .probe = rhine_init_one,
2002 .remove = __devexit_p(rhine_remove_one),
2003#ifdef CONFIG_PM
2004 .suspend = rhine_suspend,
2005 .resume = rhine_resume,
2006#endif /* CONFIG_PM */
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002007 .shutdown = rhine_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008};
2009
Roger Luethie84df482007-03-06 19:57:37 +01002010static struct dmi_system_id __initdata rhine_dmi_table[] = {
2011 {
2012 .ident = "EPIA-M",
2013 .matches = {
2014 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2015 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2016 },
2017 },
2018 {
2019 .ident = "KV7",
2020 .matches = {
2021 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2022 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2023 },
2024 },
2025 { NULL }
2026};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
2028static int __init rhine_init(void)
2029{
2030/* when a module, this is printed whether or not devices are found in probe */
2031#ifdef MODULE
2032 printk(version);
2033#endif
Roger Luethie84df482007-03-06 19:57:37 +01002034 if (dmi_check_system(rhine_dmi_table)) {
2035 /* these BIOSes fail at PXE boot if chip is in D3 */
2036 avoid_D3 = 1;
2037 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2038 "enabled.\n",
2039 DRV_NAME);
2040 }
2041 else if (avoid_D3)
2042 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2043
Jeff Garzik29917622006-08-19 17:48:59 -04002044 return pci_register_driver(&rhine_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045}
2046
2047
2048static void __exit rhine_cleanup(void)
2049{
2050 pci_unregister_driver(&rhine_driver);
2051}
2052
2053
2054module_init(rhine_init);
2055module_exit(rhine_cleanup);