blob: 716fd423670d18182cdbfc1438a5fc2b65cec7a4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi38f49e82010-12-06 00:59:40 +000035#define DRV_VERSION "1.5.0"
36#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
42
Joe Perchesdf4511f2011-04-16 14:15:25 +000043#define DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
45static int max_interrupt_work = 20;
46
47/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
48 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000049#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
50 defined(CONFIG_SPARC) || defined(__ia64__) || \
51 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070052static int rx_copybreak = 1518;
53#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070054static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070055#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Roger Luethib933b4d2006-08-14 23:00:21 -070057/* Work-around for broken BIOSes: they are unable to get the chip back out of
58 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000059static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * In case you are looking for 'options[]' or 'full_duplex[]', they
63 * are gone. Use ethtool(8) instead.
64 */
65
66/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
67 The Rhine has a 64 element 8390-like hash table. */
68static const int multicast_filter_limit = 32;
69
70
71/* Operational parameters that are set at compile time. */
72
73/* Keep the ring sizes a power of two for compile efficiency.
74 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
75 Making the Tx ring too large decreases the effectiveness of channel
76 bonding and packet priority.
77 There are no ill effects from too-large receive rings. */
78#define TX_RING_SIZE 16
79#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070080#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82/* Operational parameters that usually are not changed. */
83
84/* Time in jiffies before concluding the transmitter is hung. */
85#define TX_TIMEOUT (2*HZ)
86
87#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88
89#include <linux/module.h>
90#include <linux/moduleparam.h>
91#include <linux/kernel.h>
92#include <linux/string.h>
93#include <linux/timer.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/interrupt.h>
97#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -040098#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/netdevice.h>
100#include <linux/etherdevice.h>
101#include <linux/skbuff.h>
102#include <linux/init.h>
103#include <linux/delay.h>
104#include <linux/mii.h>
105#include <linux/ethtool.h>
106#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000107#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800109#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <asm/processor.h> /* Processor type for cache alignment. */
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100114#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116/* These identify the driver base version and may not be removed. */
Stephen Hemmingerc8de1fc2009-02-26 10:19:31 +0000117static const char version[] __devinitconst =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000118 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120/* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700134module_param(avoid_D3, bool, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Roger Luethi38f49e82010-12-06 00:59:40 +0000140#define MCAM_SIZE 32
141#define VCAM_SIZE 32
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
144 Theory of Operation
145
146I. Board Compatibility
147
148This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
149controller.
150
151II. Board-specific settings
152
153Boards with this chip are functional only in a bus-master PCI slot.
154
155Many operational settings are loaded from the EEPROM to the Config word at
156offset 0x78. For most of these settings, this driver assumes that they are
157correct.
158If this driver is compiled to use PCI memory space operations the EEPROM
159must be configured to enable memory ops.
160
161III. Driver operation
162
163IIIa. Ring buffers
164
165This driver uses two statically allocated fixed-size descriptor lists
166formed into rings by a branch from the final descriptor to the beginning of
167the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
168
169IIIb/c. Transmit/Receive Structure
170
171This driver attempts to use a zero-copy receive and transmit scheme.
172
173Alas, all data buffers are required to start on a 32 bit boundary, so
174the driver must often copy transmit packets into bounce buffers.
175
176The driver allocates full frame size skbuffs for the Rx ring buffers at
177open() time and passes the skb->data field to the chip as receive data
178buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
179a fresh skbuff is allocated and the frame is copied to the new skbuff.
180When the incoming frame is larger, the skbuff is passed directly up the
181protocol stack. Buffers consumed this way are replaced by newly allocated
182skbuffs in the last phase of rhine_rx().
183
184The RX_COPYBREAK value is chosen to trade-off the memory wasted by
185using a full-sized skbuff for small frames vs. the copying costs of larger
186frames. New boards are typically used in generously configured machines
187and the underfilled buffers have negligible impact compared to the benefit of
188a single allocation size, so the default value of zero results in never
189copying packets. When copying is done, the cost is usually mitigated by using
190a combined copy/checksum routine. Copying also preloads the cache, which is
191most useful with small frames.
192
193Since the VIA chips are only able to transfer data to buffers on 32 bit
194boundaries, the IP header at offset 14 in an ethernet frame isn't
195longword aligned for further processing. Copying these unaligned buffers
196has the beneficial effect of 16-byte aligning the IP header.
197
198IIId. Synchronization
199
200The driver runs as two independent, single-threaded flows of control. One
201is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800202netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
203which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800206netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
207the ring is not available it stops the transmit queue by
208calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210The interrupt handler has exclusive control over the Rx ring and records stats
211from the Tx ring. After reaping the stats, it marks the Tx queue entry as
212empty by incrementing the dirty_tx mark. If at least half of the entries in
213the Rx ring are available the transmit queue is woken up if it was stopped.
214
215IV. Notes
216
217IVb. References
218
219Preliminary VT86C100A manual from http://www.via.com.tw/
220http://www.scyld.com/expert/100mbps.html
221http://www.scyld.com/expert/NWay.html
222ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
223ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
224
225
226IVc. Errata
227
228The VT86C100A manual is not reliable information.
229The 3043 chip does not handle unaligned transmit or receive buffers, resulting
230in significant performance degradation for bounce buffer copies on transmit
231and unaligned IP headers on receive.
232The chip does not pad to minimum transmit length.
233
234*/
235
236
237/* This table drives the PCI probe routines. It's mostly boilerplate in all
238 of the drivers, and will likely be provided by some future kernel.
239 Note the matching code -- the first table entry matchs all 56** cards but
240 second only the 1234 card.
241*/
242
243enum rhine_revs {
244 VT86C100A = 0x00,
245 VTunknown0 = 0x20,
246 VT6102 = 0x40,
247 VT8231 = 0x50, /* Integrated MAC */
248 VT8233 = 0x60, /* Integrated MAC */
249 VT8235 = 0x74, /* Integrated MAC */
250 VT8237 = 0x78, /* Integrated MAC */
251 VTunknown1 = 0x7C,
252 VT6105 = 0x80,
253 VT6105_B0 = 0x83,
254 VT6105L = 0x8A,
255 VT6107 = 0x8C,
256 VTunknown2 = 0x8E,
257 VT6105M = 0x90, /* Management adapter */
258};
259
260enum rhine_quirks {
261 rqWOL = 0x0001, /* Wake-On-LAN support */
262 rqForceReset = 0x0002,
263 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
264 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
265 rqRhineI = 0x0100, /* See comment below */
266};
267/*
268 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269 * MMIO as well as for the collision counter and the Tx FIFO underflow
270 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271 */
272
273/* Beware of PCI posted writes */
274#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
275
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000276static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400277 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
278 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
279 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
280 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 { } /* terminate list */
282};
283MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284
285
286/* Offsets to the device registers. */
287enum register_offsets {
288 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000289 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 IntrStatus=0x0C, IntrEnable=0x0E,
291 MulticastFilter0=0x10, MulticastFilter1=0x14,
292 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000293 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
295 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
296 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
297 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000298 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
300 WOLcrClr1=0xA6, WOLcgClr=0xA7,
301 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
302};
303
304/* Bits in ConfigD */
305enum backoff_bits {
306 BackOptional=0x01, BackModify=0x02,
307 BackCaptureEffect=0x04, BackRandom=0x08
308};
309
Roger Luethi38f49e82010-12-06 00:59:40 +0000310/* Bits in the TxConfig (TCR) register */
311enum tcr_bits {
312 TCR_PQEN=0x01,
313 TCR_LB0=0x02, /* loopback[0] */
314 TCR_LB1=0x04, /* loopback[1] */
315 TCR_OFSET=0x08,
316 TCR_RTGOPT=0x10,
317 TCR_RTFT0=0x20,
318 TCR_RTFT1=0x40,
319 TCR_RTSF=0x80,
320};
321
322/* Bits in the CamCon (CAMC) register */
323enum camcon_bits {
324 CAMC_CAMEN=0x01,
325 CAMC_VCAMSL=0x02,
326 CAMC_CAMWR=0x04,
327 CAMC_CAMRD=0x08,
328};
329
330/* Bits in the PCIBusConfig1 (BCR1) register */
331enum bcr1_bits {
332 BCR1_POT0=0x01,
333 BCR1_POT1=0x02,
334 BCR1_POT2=0x04,
335 BCR1_CTFT0=0x08,
336 BCR1_CTFT1=0x10,
337 BCR1_CTSF=0x20,
338 BCR1_TXQNOBK=0x40, /* for VT6105 */
339 BCR1_VIDFR=0x80, /* for VT6105 */
340 BCR1_MED0=0x40, /* for VT6102 */
341 BCR1_MED1=0x80, /* for VT6102 */
342};
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#ifdef USE_MMIO
345/* Registers we check that mmio and reg are the same. */
346static const int mmio_verify_registers[] = {
347 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
348 0
349};
350#endif
351
352/* Bits in the interrupt status/mask registers. */
353enum intr_status_bits {
354 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
355 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
356 IntrPCIErr=0x0040,
357 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
358 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
359 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
360 IntrRxWakeUp=0x8000,
361 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
362 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
363 IntrTxErrSummary=0x082218,
364};
365
366/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
367enum wol_bits {
368 WOLucast = 0x10,
369 WOLmagic = 0x20,
370 WOLbmcast = 0x30,
371 WOLlnkon = 0x40,
372 WOLlnkoff = 0x80,
373};
374
375/* The Rx and Tx buffer descriptors. */
376struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400377 __le32 rx_status;
378 __le32 desc_length; /* Chain flag, Buffer/frame length */
379 __le32 addr;
380 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381};
382struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400383 __le32 tx_status;
384 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
385 __le32 addr;
386 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387};
388
389/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
390#define TXDESC 0x00e08000
391
392enum rx_status_bits {
393 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
394};
395
396/* Bits in *_desc.*_status */
397enum desc_status_bits {
398 DescOwn=0x80000000
399};
400
Roger Luethi38f49e82010-12-06 00:59:40 +0000401/* Bits in *_desc.*_length */
402enum desc_length_bits {
403 DescTag=0x00010000
404};
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406/* Bits in ChipCmd. */
407enum chip_cmd_bits {
408 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
409 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
410 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
411 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
412};
413
414struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000415 /* Bit mask for configured VLAN ids */
416 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* Descriptor rings */
419 struct rx_desc *rx_ring;
420 struct tx_desc *tx_ring;
421 dma_addr_t rx_ring_dma;
422 dma_addr_t tx_ring_dma;
423
424 /* The addresses of receive-in-place skbuffs. */
425 struct sk_buff *rx_skbuff[RX_RING_SIZE];
426 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
427
428 /* The saved address of a sent-in-place packet/buffer, for later free(). */
429 struct sk_buff *tx_skbuff[TX_RING_SIZE];
430 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
431
Roger Luethi4be5de22006-04-04 20:49:16 +0200432 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 unsigned char *tx_buf[TX_RING_SIZE];
434 unsigned char *tx_bufs;
435 dma_addr_t tx_bufs_dma;
436
437 struct pci_dev *pdev;
438 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700439 struct net_device *dev;
440 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 spinlock_t lock;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800442 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 /* Frequently used values: keep some adjacent for cache effect. */
445 u32 quirks;
446 struct rx_desc *rx_head_desc;
447 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
448 unsigned int cur_tx, dirty_tx;
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 u8 wolopts;
451
452 u8 tx_thresh, rx_thresh;
453
454 struct mii_if_info mii_if;
455 void __iomem *base;
456};
457
Roger Luethi38f49e82010-12-06 00:59:40 +0000458#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
459#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
460#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
461
462#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
463#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
464#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
465
466#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
467#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
468#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
469
470#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
471#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
472#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
473
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475static int mdio_read(struct net_device *dev, int phy_id, int location);
476static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
477static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800478static void rhine_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000480static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
481 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100482static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700484static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485static void rhine_error(struct net_device *dev, int intr_status);
486static void rhine_set_rx_mode(struct net_device *dev);
487static struct net_device_stats *rhine_get_stats(struct net_device *dev);
488static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400489static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490static int rhine_close(struct net_device *dev);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -0700491static void rhine_shutdown (struct pci_dev *pdev);
Jiri Pirko8e586132011-12-08 19:52:37 -0500492static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
493static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
Roger Luethi38f49e82010-12-06 00:59:40 +0000494static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
495static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
496static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
497static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
498static void rhine_init_cam_filter(struct net_device *dev);
499static void rhine_update_vcam(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Joe Perchesdf4511f2011-04-16 14:15:25 +0000501#define RHINE_WAIT_FOR(condition) \
502do { \
503 int i = 1024; \
504 while (!(condition) && --i) \
505 ; \
506 if (debug > 1 && i < 512) \
507 pr_info("%4d cycles used @ %s:%d\n", \
508 1024 - i, __func__, __LINE__); \
509} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Francois Romieua20a28b2011-12-30 14:53:58 +0100511static u32 rhine_get_events(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 void __iomem *ioaddr = rp->base;
514 u32 intr_status;
515
516 intr_status = ioread16(ioaddr + IntrStatus);
517 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
518 if (rp->quirks & rqStatusWBRace)
519 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
520 return intr_status;
521}
522
Francois Romieua20a28b2011-12-30 14:53:58 +0100523static void rhine_ack_events(struct rhine_private *rp, u32 mask)
524{
525 void __iomem *ioaddr = rp->base;
526
527 if (rp->quirks & rqStatusWBRace)
528 iowrite8(mask >> 16, ioaddr + IntrStatus2);
529 iowrite16(mask, ioaddr + IntrStatus);
530 IOSYNC;
531}
532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533/*
534 * Get power related registers into sane state.
535 * Notify user about past WOL event.
536 */
537static void rhine_power_init(struct net_device *dev)
538{
539 struct rhine_private *rp = netdev_priv(dev);
540 void __iomem *ioaddr = rp->base;
541 u16 wolstat;
542
543 if (rp->quirks & rqWOL) {
544 /* Make sure chip is in power state D0 */
545 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
546
547 /* Disable "force PME-enable" */
548 iowrite8(0x80, ioaddr + WOLcgClr);
549
550 /* Clear power-event config bits (WOL) */
551 iowrite8(0xFF, ioaddr + WOLcrClr);
552 /* More recent cards can manage two additional patterns */
553 if (rp->quirks & rq6patterns)
554 iowrite8(0x03, ioaddr + WOLcrClr1);
555
556 /* Save power-event status bits */
557 wolstat = ioread8(ioaddr + PwrcsrSet);
558 if (rp->quirks & rq6patterns)
559 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
560
561 /* Clear power-event status bits */
562 iowrite8(0xFF, ioaddr + PwrcsrClr);
563 if (rp->quirks & rq6patterns)
564 iowrite8(0x03, ioaddr + PwrcsrClr1);
565
566 if (wolstat) {
567 char *reason;
568 switch (wolstat) {
569 case WOLmagic:
570 reason = "Magic packet";
571 break;
572 case WOLlnkon:
573 reason = "Link went up";
574 break;
575 case WOLlnkoff:
576 reason = "Link went down";
577 break;
578 case WOLucast:
579 reason = "Unicast packet";
580 break;
581 case WOLbmcast:
582 reason = "Multicast/broadcast packet";
583 break;
584 default:
585 reason = "Unknown";
586 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000587 netdev_info(dev, "Woke system up. Reason: %s\n",
588 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
590 }
591}
592
593static void rhine_chip_reset(struct net_device *dev)
594{
595 struct rhine_private *rp = netdev_priv(dev);
596 void __iomem *ioaddr = rp->base;
597
598 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
599 IOSYNC;
600
601 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000602 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604 /* Force reset */
605 if (rp->quirks & rqForceReset)
606 iowrite8(0x40, ioaddr + MiscCmd);
607
608 /* Reset can take somewhat longer (rare) */
609 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
610 }
611
612 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +0000613 netdev_info(dev, "Reset %s\n",
614 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
615 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616}
617
618#ifdef USE_MMIO
619static void enable_mmio(long pioaddr, u32 quirks)
620{
621 int n;
622 if (quirks & rqRhineI) {
623 /* More recent docs say that this bit is reserved ... */
624 n = inb(pioaddr + ConfigA) | 0x20;
625 outb(n, pioaddr + ConfigA);
626 } else {
627 n = inb(pioaddr + ConfigD) | 0x80;
628 outb(n, pioaddr + ConfigD);
629 }
630}
631#endif
632
633/*
634 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
635 * (plus 0x6C for Rhine-I/II)
636 */
637static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
638{
639 struct rhine_private *rp = netdev_priv(dev);
640 void __iomem *ioaddr = rp->base;
641
642 outb(0x20, pioaddr + MACRegEEcsr);
643 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
644
645#ifdef USE_MMIO
646 /*
647 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
648 * MMIO. If reloading EEPROM was done first this could be avoided, but
649 * it is not known if that still works with the "win98-reboot" problem.
650 */
651 enable_mmio(pioaddr, rp->quirks);
652#endif
653
654 /* Turn off EEPROM-controlled wake-up (magic packet) */
655 if (rp->quirks & rqWOL)
656 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
657
658}
659
660#ifdef CONFIG_NET_POLL_CONTROLLER
661static void rhine_poll(struct net_device *dev)
662{
663 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100664 rhine_interrupt(dev->irq, (void *)dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 enable_irq(dev->irq);
666}
667#endif
668
Francois Romieu269f3112011-12-30 14:43:54 +0100669static void rhine_kick_tx_threshold(struct rhine_private *rp)
670{
671 if (rp->tx_thresh < 0xe0) {
672 void __iomem *ioaddr = rp->base;
673
674 rp->tx_thresh += 0x20;
675 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
676 }
677}
678
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700679static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700680{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700681 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
682 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700683 void __iomem *ioaddr = rp->base;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700684 int work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700685
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700686 work_done = rhine_rx(dev, budget);
Roger Luethi633949a2006-08-14 23:00:17 -0700687
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700688 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800689 napi_complete(napi);
Roger Luethi633949a2006-08-14 23:00:17 -0700690
691 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
692 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
693 IntrTxDone | IntrTxError | IntrTxUnderrun |
694 IntrPCIErr | IntrStatsMax | IntrLinkChange,
695 ioaddr + IntrEnable);
Roger Luethi633949a2006-08-14 23:00:17 -0700696 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700697 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700698}
Roger Luethi633949a2006-08-14 23:00:17 -0700699
Adrian Bunkde4e7c82008-01-30 22:02:05 +0200700static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701{
702 struct rhine_private *rp = netdev_priv(dev);
703
704 /* Reset the chip to erase previous misconfiguration. */
705 rhine_chip_reset(dev);
706
707 /* Rhine-I needs extra time to recuperate before EEPROM reload */
708 if (rp->quirks & rqRhineI)
709 msleep(5);
710
711 /* Reload EEPROM controlled bytes cleared by soft reset */
712 rhine_reload_eeprom(pioaddr, dev);
713}
714
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800715static const struct net_device_ops rhine_netdev_ops = {
716 .ndo_open = rhine_open,
717 .ndo_stop = rhine_close,
718 .ndo_start_xmit = rhine_start_tx,
719 .ndo_get_stats = rhine_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000720 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000721 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800722 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000723 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800724 .ndo_do_ioctl = netdev_ioctl,
725 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000726 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
727 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800728#ifdef CONFIG_NET_POLL_CONTROLLER
729 .ndo_poll_controller = rhine_poll,
730#endif
731};
732
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733static int __devinit rhine_init_one(struct pci_dev *pdev,
734 const struct pci_device_id *ent)
735{
736 struct net_device *dev;
737 struct rhine_private *rp;
738 int i, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 u32 quirks;
740 long pioaddr;
741 long memaddr;
742 void __iomem *ioaddr;
743 int io_size, phy_id;
744 const char *name;
745#ifdef USE_MMIO
746 int bar = 1;
747#else
748 int bar = 0;
749#endif
750
751/* when built into the kernel, we only print version if device is found */
752#ifndef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +0000753 pr_info_once("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754#endif
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 io_size = 256;
757 phy_id = 0;
758 quirks = 0;
759 name = "Rhine";
Auke Kok44c10132007-06-08 15:46:36 -0700760 if (pdev->revision < VTunknown0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 quirks = rqRhineI;
762 io_size = 128;
763 }
Auke Kok44c10132007-06-08 15:46:36 -0700764 else if (pdev->revision >= VT6102) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 quirks = rqWOL | rqForceReset;
Auke Kok44c10132007-06-08 15:46:36 -0700766 if (pdev->revision < VT6105) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 name = "Rhine II";
768 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
769 }
770 else {
771 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
Auke Kok44c10132007-06-08 15:46:36 -0700772 if (pdev->revision >= VT6105_B0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 quirks |= rq6patterns;
Auke Kok44c10132007-06-08 15:46:36 -0700774 if (pdev->revision < VT6105M)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 name = "Rhine III";
776 else
777 name = "Rhine III (Management Adapter)";
778 }
779 }
780
781 rc = pci_enable_device(pdev);
782 if (rc)
783 goto err_out;
784
785 /* this should always be supported */
Yang Hongyang284901a2009-04-06 19:01:15 -0700786 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (rc) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000788 dev_err(&pdev->dev,
789 "32-bit PCI DMA addresses not supported by the card!?\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 goto err_out;
791 }
792
793 /* sanity check */
794 if ((pci_resource_len(pdev, 0) < io_size) ||
795 (pci_resource_len(pdev, 1) < io_size)) {
796 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000797 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 goto err_out;
799 }
800
801 pioaddr = pci_resource_start(pdev, 0);
802 memaddr = pci_resource_start(pdev, 1);
803
804 pci_set_master(pdev);
805
806 dev = alloc_etherdev(sizeof(struct rhine_private));
807 if (!dev) {
808 rc = -ENOMEM;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000809 dev_err(&pdev->dev, "alloc_etherdev failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 goto err_out;
811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 SET_NETDEV_DEV(dev, &pdev->dev);
813
814 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700815 rp->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 rp->quirks = quirks;
817 rp->pioaddr = pioaddr;
818 rp->pdev = pdev;
819
820 rc = pci_request_regions(pdev, DRV_NAME);
821 if (rc)
822 goto err_out_free_netdev;
823
824 ioaddr = pci_iomap(pdev, bar, io_size);
825 if (!ioaddr) {
826 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000827 dev_err(&pdev->dev,
828 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
829 pci_name(pdev), io_size, memaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 goto err_out_free_res;
831 }
832
833#ifdef USE_MMIO
834 enable_mmio(pioaddr, quirks);
835
836 /* Check that selected MMIO registers match the PIO ones */
837 i = 0;
838 while (mmio_verify_registers[i]) {
839 int reg = mmio_verify_registers[i++];
840 unsigned char a = inb(pioaddr+reg);
841 unsigned char b = readb(ioaddr+reg);
842 if (a != b) {
843 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000844 dev_err(&pdev->dev,
845 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
846 reg, a, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 goto err_out_unmap;
848 }
849 }
850#endif /* USE_MMIO */
851
852 dev->base_addr = (unsigned long)ioaddr;
853 rp->base = ioaddr;
854
855 /* Get chip registers into a sane state */
856 rhine_power_init(dev);
857 rhine_hw_init(dev, pioaddr);
858
859 for (i = 0; i < 6; i++)
860 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
861
Joe Perches482e3fe2011-04-16 14:15:26 +0000862 if (!is_valid_ether_addr(dev->dev_addr)) {
863 /* Report it and use a random ethernet address instead */
864 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
865 random_ether_addr(dev->dev_addr);
866 netdev_info(dev, "Using random MAC address: %pM\n",
867 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 }
Joe Perches482e3fe2011-04-16 14:15:26 +0000869 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 /* For Rhine-I/II, phy_id is loaded from EEPROM */
872 if (!phy_id)
873 phy_id = ioread8(ioaddr + 0x6C);
874
875 dev->irq = pdev->irq;
876
877 spin_lock_init(&rp->lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800878 INIT_WORK(&rp->reset_task, rhine_reset_task);
879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 rp->mii_if.dev = dev;
881 rp->mii_if.mdio_read = mdio_read;
882 rp->mii_if.mdio_write = mdio_write;
883 rp->mii_if.phy_id_mask = 0x1f;
884 rp->mii_if.reg_num_mask = 0x1f;
885
886 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800887 dev->netdev_ops = &rhine_netdev_ops;
888 dev->ethtool_ops = &netdev_ethtool_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800890
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700891 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +0200892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (rp->quirks & rqRhineI)
894 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
895
Roger Luethi38f49e82010-12-06 00:59:40 +0000896 if (pdev->revision >= VT6105M)
897 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
898 NETIF_F_HW_VLAN_FILTER;
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 /* dev->name not defined before register_netdev()! */
901 rc = register_netdev(dev);
902 if (rc)
903 goto err_out_unmap;
904
Joe Perchesdf4511f2011-04-16 14:15:25 +0000905 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
906 name,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907#ifdef USE_MMIO
Joe Perchesdf4511f2011-04-16 14:15:25 +0000908 memaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909#else
Joe Perchesdf4511f2011-04-16 14:15:25 +0000910 (long)ioaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911#endif
Joe Perchesdf4511f2011-04-16 14:15:25 +0000912 dev->dev_addr, pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914 pci_set_drvdata(pdev, dev);
915
916 {
917 u16 mii_cmd;
918 int mii_status = mdio_read(dev, phy_id, 1);
919 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
920 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
921 if (mii_status != 0xffff && mii_status != 0x0000) {
922 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +0000923 netdev_info(dev,
924 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
925 phy_id,
926 mii_status, rp->mii_if.advertising,
927 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
929 /* set IFF_RUNNING */
930 if (mii_status & BMSR_LSTATUS)
931 netif_carrier_on(dev);
932 else
933 netif_carrier_off(dev);
934
935 }
936 }
937 rp->mii_if.phy_id = phy_id;
Roger Luethib933b4d2006-08-14 23:00:21 -0700938 if (debug > 1 && avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +0000939 netdev_info(dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
941 return 0;
942
943err_out_unmap:
944 pci_iounmap(pdev, ioaddr);
945err_out_free_res:
946 pci_release_regions(pdev);
947err_out_free_netdev:
948 free_netdev(dev);
949err_out:
950 return rc;
951}
952
953static int alloc_ring(struct net_device* dev)
954{
955 struct rhine_private *rp = netdev_priv(dev);
956 void *ring;
957 dma_addr_t ring_dma;
958
959 ring = pci_alloc_consistent(rp->pdev,
960 RX_RING_SIZE * sizeof(struct rx_desc) +
961 TX_RING_SIZE * sizeof(struct tx_desc),
962 &ring_dma);
963 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000964 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 return -ENOMEM;
966 }
967 if (rp->quirks & rqRhineI) {
968 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
969 PKT_BUF_SZ * TX_RING_SIZE,
970 &rp->tx_bufs_dma);
971 if (rp->tx_bufs == NULL) {
972 pci_free_consistent(rp->pdev,
973 RX_RING_SIZE * sizeof(struct rx_desc) +
974 TX_RING_SIZE * sizeof(struct tx_desc),
975 ring, ring_dma);
976 return -ENOMEM;
977 }
978 }
979
980 rp->rx_ring = ring;
981 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
982 rp->rx_ring_dma = ring_dma;
983 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
984
985 return 0;
986}
987
988static void free_ring(struct net_device* dev)
989{
990 struct rhine_private *rp = netdev_priv(dev);
991
992 pci_free_consistent(rp->pdev,
993 RX_RING_SIZE * sizeof(struct rx_desc) +
994 TX_RING_SIZE * sizeof(struct tx_desc),
995 rp->rx_ring, rp->rx_ring_dma);
996 rp->tx_ring = NULL;
997
998 if (rp->tx_bufs)
999 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1000 rp->tx_bufs, rp->tx_bufs_dma);
1001
1002 rp->tx_bufs = NULL;
1003
1004}
1005
1006static void alloc_rbufs(struct net_device *dev)
1007{
1008 struct rhine_private *rp = netdev_priv(dev);
1009 dma_addr_t next;
1010 int i;
1011
1012 rp->dirty_rx = rp->cur_rx = 0;
1013
1014 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1015 rp->rx_head_desc = &rp->rx_ring[0];
1016 next = rp->rx_ring_dma;
1017
1018 /* Init the ring entries */
1019 for (i = 0; i < RX_RING_SIZE; i++) {
1020 rp->rx_ring[i].rx_status = 0;
1021 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1022 next += sizeof(struct rx_desc);
1023 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1024 rp->rx_skbuff[i] = NULL;
1025 }
1026 /* Mark the last entry as wrapping the ring. */
1027 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1028
1029 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1030 for (i = 0; i < RX_RING_SIZE; i++) {
Kevin Lob26b5552008-08-27 11:35:09 +08001031 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 rp->rx_skbuff[i] = skb;
1033 if (skb == NULL)
1034 break;
1035 skb->dev = dev; /* Mark as being used by this device. */
1036
1037 rp->rx_skbuff_dma[i] =
David S. Miller689be432005-06-28 15:25:31 -07001038 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 PCI_DMA_FROMDEVICE);
1040
1041 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1042 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1043 }
1044 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1045}
1046
1047static void free_rbufs(struct net_device* dev)
1048{
1049 struct rhine_private *rp = netdev_priv(dev);
1050 int i;
1051
1052 /* Free all the skbuffs in the Rx queue. */
1053 for (i = 0; i < RX_RING_SIZE; i++) {
1054 rp->rx_ring[i].rx_status = 0;
1055 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1056 if (rp->rx_skbuff[i]) {
1057 pci_unmap_single(rp->pdev,
1058 rp->rx_skbuff_dma[i],
1059 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1060 dev_kfree_skb(rp->rx_skbuff[i]);
1061 }
1062 rp->rx_skbuff[i] = NULL;
1063 }
1064}
1065
1066static void alloc_tbufs(struct net_device* dev)
1067{
1068 struct rhine_private *rp = netdev_priv(dev);
1069 dma_addr_t next;
1070 int i;
1071
1072 rp->dirty_tx = rp->cur_tx = 0;
1073 next = rp->tx_ring_dma;
1074 for (i = 0; i < TX_RING_SIZE; i++) {
1075 rp->tx_skbuff[i] = NULL;
1076 rp->tx_ring[i].tx_status = 0;
1077 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1078 next += sizeof(struct tx_desc);
1079 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001080 if (rp->quirks & rqRhineI)
1081 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 }
1083 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1084
1085}
1086
1087static void free_tbufs(struct net_device* dev)
1088{
1089 struct rhine_private *rp = netdev_priv(dev);
1090 int i;
1091
1092 for (i = 0; i < TX_RING_SIZE; i++) {
1093 rp->tx_ring[i].tx_status = 0;
1094 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1095 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1096 if (rp->tx_skbuff[i]) {
1097 if (rp->tx_skbuff_dma[i]) {
1098 pci_unmap_single(rp->pdev,
1099 rp->tx_skbuff_dma[i],
1100 rp->tx_skbuff[i]->len,
1101 PCI_DMA_TODEVICE);
1102 }
1103 dev_kfree_skb(rp->tx_skbuff[i]);
1104 }
1105 rp->tx_skbuff[i] = NULL;
1106 rp->tx_buf[i] = NULL;
1107 }
1108}
1109
1110static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1111{
1112 struct rhine_private *rp = netdev_priv(dev);
1113 void __iomem *ioaddr = rp->base;
1114
1115 mii_check_media(&rp->mii_if, debug, init_media);
1116
1117 if (rp->mii_if.full_duplex)
1118 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1119 ioaddr + ChipCmd1);
1120 else
1121 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1122 ioaddr + ChipCmd1);
Roger Luethi00b428c2006-03-28 20:53:56 +02001123 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001124 netdev_info(dev, "force_media %d, carrier %d\n",
1125 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001126}
1127
1128/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001129static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001130{
1131 if (mii->force_media) {
1132 /* autoneg is off: Link is always assumed to be up */
1133 if (!netif_carrier_ok(mii->dev))
1134 netif_carrier_on(mii->dev);
1135 }
1136 else /* Let MMI library update carrier status */
1137 rhine_check_media(mii->dev, 0);
1138 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001139 netdev_info(mii->dev, "force_media %d, carrier %d\n",
1140 mii->force_media, netif_carrier_ok(mii->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
Roger Luethi38f49e82010-12-06 00:59:40 +00001143/**
1144 * rhine_set_cam - set CAM multicast filters
1145 * @ioaddr: register block of this Rhine
1146 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1147 * @addr: multicast address (6 bytes)
1148 *
1149 * Load addresses into multicast filters.
1150 */
1151static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1152{
1153 int i;
1154
1155 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1156 wmb();
1157
1158 /* Paranoid -- idx out of range should never happen */
1159 idx &= (MCAM_SIZE - 1);
1160
1161 iowrite8((u8) idx, ioaddr + CamAddr);
1162
1163 for (i = 0; i < 6; i++, addr++)
1164 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1165 udelay(10);
1166 wmb();
1167
1168 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1169 udelay(10);
1170
1171 iowrite8(0, ioaddr + CamCon);
1172}
1173
1174/**
1175 * rhine_set_vlan_cam - set CAM VLAN filters
1176 * @ioaddr: register block of this Rhine
1177 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1178 * @addr: VLAN ID (2 bytes)
1179 *
1180 * Load addresses into VLAN filters.
1181 */
1182static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1183{
1184 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1185 wmb();
1186
1187 /* Paranoid -- idx out of range should never happen */
1188 idx &= (VCAM_SIZE - 1);
1189
1190 iowrite8((u8) idx, ioaddr + CamAddr);
1191
1192 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1193 udelay(10);
1194 wmb();
1195
1196 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1197 udelay(10);
1198
1199 iowrite8(0, ioaddr + CamCon);
1200}
1201
1202/**
1203 * rhine_set_cam_mask - set multicast CAM mask
1204 * @ioaddr: register block of this Rhine
1205 * @mask: multicast CAM mask
1206 *
1207 * Mask sets multicast filters active/inactive.
1208 */
1209static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1210{
1211 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1212 wmb();
1213
1214 /* write mask */
1215 iowrite32(mask, ioaddr + CamMask);
1216
1217 /* disable CAMEN */
1218 iowrite8(0, ioaddr + CamCon);
1219}
1220
1221/**
1222 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1223 * @ioaddr: register block of this Rhine
1224 * @mask: VLAN CAM mask
1225 *
1226 * Mask sets VLAN filters active/inactive.
1227 */
1228static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1229{
1230 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1231 wmb();
1232
1233 /* write mask */
1234 iowrite32(mask, ioaddr + CamMask);
1235
1236 /* disable CAMEN */
1237 iowrite8(0, ioaddr + CamCon);
1238}
1239
1240/**
1241 * rhine_init_cam_filter - initialize CAM filters
1242 * @dev: network device
1243 *
1244 * Initialize (disable) hardware VLAN and multicast support on this
1245 * Rhine.
1246 */
1247static void rhine_init_cam_filter(struct net_device *dev)
1248{
1249 struct rhine_private *rp = netdev_priv(dev);
1250 void __iomem *ioaddr = rp->base;
1251
1252 /* Disable all CAMs */
1253 rhine_set_vlan_cam_mask(ioaddr, 0);
1254 rhine_set_cam_mask(ioaddr, 0);
1255
1256 /* disable hardware VLAN support */
1257 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1258 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1259}
1260
1261/**
1262 * rhine_update_vcam - update VLAN CAM filters
1263 * @rp: rhine_private data of this Rhine
1264 *
1265 * Update VLAN CAM filters to match configuration change.
1266 */
1267static void rhine_update_vcam(struct net_device *dev)
1268{
1269 struct rhine_private *rp = netdev_priv(dev);
1270 void __iomem *ioaddr = rp->base;
1271 u16 vid;
1272 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1273 unsigned int i = 0;
1274
1275 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1276 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1277 vCAMmask |= 1 << i;
1278 if (++i >= VCAM_SIZE)
1279 break;
1280 }
1281 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1282}
1283
Jiri Pirko8e586132011-12-08 19:52:37 -05001284static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001285{
1286 struct rhine_private *rp = netdev_priv(dev);
1287
1288 spin_lock_irq(&rp->lock);
1289 set_bit(vid, rp->active_vlans);
1290 rhine_update_vcam(dev);
1291 spin_unlock_irq(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001292 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001293}
1294
Jiri Pirko8e586132011-12-08 19:52:37 -05001295static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001296{
1297 struct rhine_private *rp = netdev_priv(dev);
1298
1299 spin_lock_irq(&rp->lock);
1300 clear_bit(vid, rp->active_vlans);
1301 rhine_update_vcam(dev);
1302 spin_unlock_irq(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001303 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001304}
1305
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306static void init_registers(struct net_device *dev)
1307{
1308 struct rhine_private *rp = netdev_priv(dev);
1309 void __iomem *ioaddr = rp->base;
1310 int i;
1311
1312 for (i = 0; i < 6; i++)
1313 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1314
1315 /* Initialize other registers. */
1316 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1317 /* Configure initial FIFO thresholds. */
1318 iowrite8(0x20, ioaddr + TxConfig);
1319 rp->tx_thresh = 0x20;
1320 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1321
1322 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1323 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1324
1325 rhine_set_rx_mode(dev);
1326
Roger Luethi38f49e82010-12-06 00:59:40 +00001327 if (rp->pdev->revision >= VT6105M)
1328 rhine_init_cam_filter(dev);
1329
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001330 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 /* Enable interrupts by setting the interrupt mask. */
1333 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1334 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1335 IntrTxDone | IntrTxError | IntrTxUnderrun |
1336 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1337 ioaddr + IntrEnable);
1338
1339 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1340 ioaddr + ChipCmd);
1341 rhine_check_media(dev, 1);
1342}
1343
1344/* Enable MII link status auto-polling (required for IntrLinkChange) */
1345static void rhine_enable_linkmon(void __iomem *ioaddr)
1346{
1347 iowrite8(0, ioaddr + MIICmd);
1348 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1349 iowrite8(0x80, ioaddr + MIICmd);
1350
1351 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1352
1353 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1354}
1355
1356/* Disable MII link status auto-polling (required for MDIO access) */
1357static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1358{
1359 iowrite8(0, ioaddr + MIICmd);
1360
1361 if (quirks & rqRhineI) {
1362 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1363
John W. Linville38bb6b22006-05-19 10:51:21 -04001364 /* Can be called from ISR. Evil. */
1365 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 /* 0x80 must be set immediately before turning it off */
1368 iowrite8(0x80, ioaddr + MIICmd);
1369
1370 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1371
1372 /* Heh. Now clear 0x80 again. */
1373 iowrite8(0, ioaddr + MIICmd);
1374 }
1375 else
1376 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1377}
1378
1379/* Read and write over the MII Management Data I/O (MDIO) interface. */
1380
1381static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1382{
1383 struct rhine_private *rp = netdev_priv(dev);
1384 void __iomem *ioaddr = rp->base;
1385 int result;
1386
1387 rhine_disable_linkmon(ioaddr, rp->quirks);
1388
1389 /* rhine_disable_linkmon already cleared MIICmd */
1390 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1391 iowrite8(regnum, ioaddr + MIIRegAddr);
1392 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1393 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1394 result = ioread16(ioaddr + MIIData);
1395
1396 rhine_enable_linkmon(ioaddr);
1397 return result;
1398}
1399
1400static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1401{
1402 struct rhine_private *rp = netdev_priv(dev);
1403 void __iomem *ioaddr = rp->base;
1404
1405 rhine_disable_linkmon(ioaddr, rp->quirks);
1406
1407 /* rhine_disable_linkmon already cleared MIICmd */
1408 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1409 iowrite8(regnum, ioaddr + MIIRegAddr);
1410 iowrite16(value, ioaddr + MIIData);
1411 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1412 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1413
1414 rhine_enable_linkmon(ioaddr);
1415}
1416
1417static int rhine_open(struct net_device *dev)
1418{
1419 struct rhine_private *rp = netdev_priv(dev);
1420 void __iomem *ioaddr = rp->base;
1421 int rc;
1422
Julia Lawall76781382009-11-18 08:23:53 +00001423 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 dev);
1425 if (rc)
1426 return rc;
1427
1428 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001429 netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
1431 rc = alloc_ring(dev);
1432 if (rc) {
1433 free_irq(rp->pdev->irq, dev);
1434 return rc;
1435 }
1436 alloc_rbufs(dev);
1437 alloc_tbufs(dev);
1438 rhine_chip_reset(dev);
1439 init_registers(dev);
1440 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001441 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
1442 __func__, ioread16(ioaddr + ChipCmd),
1443 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
1445 netif_start_queue(dev);
1446
1447 return 0;
1448}
1449
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001450static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001452 struct rhine_private *rp = container_of(work, struct rhine_private,
1453 reset_task);
1454 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 /* protect against concurrent rx interrupts */
1457 disable_irq(rp->pdev->irq);
1458
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001459 napi_disable(&rp->napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001460
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001461 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463 /* clear all descriptors */
1464 free_tbufs(dev);
1465 free_rbufs(dev);
1466 alloc_tbufs(dev);
1467 alloc_rbufs(dev);
1468
1469 /* Reinitialize the hardware. */
1470 rhine_chip_reset(dev);
1471 init_registers(dev);
1472
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001473 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 enable_irq(rp->pdev->irq);
1475
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001476 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001477 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 netif_wake_queue(dev);
1479}
1480
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001481static void rhine_tx_timeout(struct net_device *dev)
1482{
1483 struct rhine_private *rp = netdev_priv(dev);
1484 void __iomem *ioaddr = rp->base;
1485
Joe Perchesdf4511f2011-04-16 14:15:25 +00001486 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1487 ioread16(ioaddr + IntrStatus),
1488 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001489
1490 schedule_work(&rp->reset_task);
1491}
1492
Stephen Hemminger613573252009-08-31 19:50:58 +00001493static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1494 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495{
1496 struct rhine_private *rp = netdev_priv(dev);
1497 void __iomem *ioaddr = rp->base;
1498 unsigned entry;
Dongdong Deng22580f82009-08-13 19:12:31 +00001499 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 /* Caution: the write order is important here, set the field
1502 with the "ownership" bits last. */
1503
1504 /* Calculate the next Tx descriptor entry. */
1505 entry = rp->cur_tx % TX_RING_SIZE;
1506
Herbert Xu5b057c62006-06-23 02:06:41 -07001507 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001508 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 rp->tx_skbuff[entry] = skb;
1511
1512 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001513 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 /* Must use alignment buffer. */
1515 if (skb->len > PKT_BUF_SZ) {
1516 /* packet too long, drop it */
1517 dev_kfree_skb(skb);
1518 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001519 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001520 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001522
1523 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001525 if (skb->len < ETH_ZLEN)
1526 memset(rp->tx_buf[entry] + skb->len, 0,
1527 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 rp->tx_skbuff_dma[entry] = 0;
1529 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1530 (rp->tx_buf[entry] -
1531 rp->tx_bufs));
1532 } else {
1533 rp->tx_skbuff_dma[entry] =
1534 pci_map_single(rp->pdev, skb->data, skb->len,
1535 PCI_DMA_TODEVICE);
1536 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1537 }
1538
1539 rp->tx_ring[entry].desc_length =
1540 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1541
Roger Luethi38f49e82010-12-06 00:59:40 +00001542 if (unlikely(vlan_tx_tag_present(skb))) {
1543 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1544 /* request tagging */
1545 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1546 }
1547 else
1548 rp->tx_ring[entry].tx_status = 0;
1549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 /* lock eth irq */
Dongdong Deng22580f82009-08-13 19:12:31 +00001551 spin_lock_irqsave(&rp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001553 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 wmb();
1555
1556 rp->cur_tx++;
1557
1558 /* Non-x86 Todo: explicitly flush cache lines here. */
1559
Roger Luethi38f49e82010-12-06 00:59:40 +00001560 if (vlan_tx_tag_present(skb))
1561 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1562 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 /* Wake the potentially-idle transmit channel */
1565 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1566 ioaddr + ChipCmd1);
1567 IOSYNC;
1568
1569 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1570 netif_stop_queue(dev);
1571
Dongdong Deng22580f82009-08-13 19:12:31 +00001572 spin_unlock_irqrestore(&rp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
1574 if (debug > 4) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001575 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1576 rp->cur_tx-1, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 }
Patrick McHardy6ed10652009-06-23 06:03:08 +00001578 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579}
1580
1581/* The interrupt handler does all of the Rx thread work and cleans up
1582 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001583static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584{
1585 struct net_device *dev = dev_instance;
1586 struct rhine_private *rp = netdev_priv(dev);
1587 void __iomem *ioaddr = rp->base;
1588 u32 intr_status;
1589 int boguscnt = max_interrupt_work;
1590 int handled = 0;
1591
Francois Romieua20a28b2011-12-30 14:53:58 +01001592 while ((intr_status = rhine_get_events(rp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 handled = 1;
1594
1595 /* Acknowledge all of the current interrupt sources ASAP. */
Francois Romieua20a28b2011-12-30 14:53:58 +01001596 rhine_ack_events(rp, intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597
1598 if (debug > 4)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001599 netdev_dbg(dev, "Interrupt, status %08x\n",
1600 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
1602 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
Roger Luethi633949a2006-08-14 23:00:17 -07001603 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
Roger Luethi633949a2006-08-14 23:00:17 -07001604 iowrite16(IntrTxAborted |
1605 IntrTxDone | IntrTxError | IntrTxUnderrun |
1606 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1607 ioaddr + IntrEnable);
1608
Ben Hutchings288379f2009-01-19 16:43:59 -08001609 napi_schedule(&rp->napi);
Roger Luethi633949a2006-08-14 23:00:17 -07001610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1613 if (intr_status & IntrTxErrSummary) {
1614 /* Avoid scavenging before Tx engine turned off */
1615 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1616 if (debug > 2 &&
1617 ioread8(ioaddr+ChipCmd) & CmdTxOn)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001618 netdev_warn(dev,
1619 "%s: Tx engine still on\n",
1620 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 }
1622 rhine_tx(dev);
1623 }
1624
1625 /* Abnormal error summary/uncommon events handlers. */
1626 if (intr_status & (IntrPCIErr | IntrLinkChange |
1627 IntrStatsMax | IntrTxError | IntrTxAborted |
1628 IntrTxUnderrun | IntrTxDescRace))
1629 rhine_error(dev, intr_status);
1630
1631 if (--boguscnt < 0) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001632 netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
1633 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 break;
1635 }
1636 }
1637
1638 if (debug > 3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001639 netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1640 ioread16(ioaddr + IntrStatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 return IRQ_RETVAL(handled);
1642}
1643
1644/* This routine is logically part of the interrupt handler, but isolated
1645 for clarity. */
1646static void rhine_tx(struct net_device *dev)
1647{
1648 struct rhine_private *rp = netdev_priv(dev);
1649 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1650
1651 spin_lock(&rp->lock);
1652
1653 /* find and cleanup dirty tx descriptors */
1654 while (rp->dirty_tx != rp->cur_tx) {
1655 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1656 if (debug > 6)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001657 netdev_dbg(dev, "Tx scavenge %d status %08x\n",
1658 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 if (txstatus & DescOwn)
1660 break;
1661 if (txstatus & 0x8000) {
1662 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001663 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1664 txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001665 dev->stats.tx_errors++;
1666 if (txstatus & 0x0400)
1667 dev->stats.tx_carrier_errors++;
1668 if (txstatus & 0x0200)
1669 dev->stats.tx_window_errors++;
1670 if (txstatus & 0x0100)
1671 dev->stats.tx_aborted_errors++;
1672 if (txstatus & 0x0080)
1673 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1675 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001676 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1678 break; /* Keep the skb - we try again */
1679 }
1680 /* Transmitter restarted in 'abnormal' handler. */
1681 } else {
1682 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001683 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001685 dev->stats.collisions += txstatus & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 if (debug > 6)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001687 netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
1688 (txstatus >> 3) & 0xF,
1689 txstatus & 0xF);
Eric Dumazet553e2332009-05-27 10:34:50 +00001690 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1691 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 }
1693 /* Free the original skb. */
1694 if (rp->tx_skbuff_dma[entry]) {
1695 pci_unmap_single(rp->pdev,
1696 rp->tx_skbuff_dma[entry],
1697 rp->tx_skbuff[entry]->len,
1698 PCI_DMA_TODEVICE);
1699 }
1700 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1701 rp->tx_skbuff[entry] = NULL;
1702 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1703 }
1704 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1705 netif_wake_queue(dev);
1706
1707 spin_unlock(&rp->lock);
1708}
1709
Roger Luethi38f49e82010-12-06 00:59:40 +00001710/**
1711 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1712 * @skb: pointer to sk_buff
1713 * @data_size: used data area of the buffer including CRC
1714 *
1715 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1716 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1717 * aligned following the CRC.
1718 */
1719static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1720{
1721 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00001722 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00001723}
1724
Roger Luethi633949a2006-08-14 23:00:17 -07001725/* Process up to limit frames from receive ring */
1726static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727{
1728 struct rhine_private *rp = netdev_priv(dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001729 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 if (debug > 4) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001733 netdev_dbg(dev, "%s(), entry %d status %08x\n",
1734 __func__, entry,
1735 le32_to_cpu(rp->rx_head_desc->rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 }
1737
1738 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001739 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 struct rx_desc *desc = rp->rx_head_desc;
1741 u32 desc_status = le32_to_cpu(desc->rx_status);
Roger Luethi38f49e82010-12-06 00:59:40 +00001742 u32 desc_length = le32_to_cpu(desc->desc_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 int data_size = desc_status >> 16;
1744
Roger Luethi633949a2006-08-14 23:00:17 -07001745 if (desc_status & DescOwn)
1746 break;
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 if (debug > 4)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001749 netdev_dbg(dev, "%s() status is %08x\n",
1750 __func__, desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001751
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1753 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001754 netdev_warn(dev,
1755 "Oversized Ethernet frame spanned multiple buffers, "
1756 "entry %#x length %d status %08x!\n",
1757 entry, data_size,
1758 desc_status);
1759 netdev_warn(dev,
1760 "Oversized Ethernet frame %p vs %p\n",
1761 rp->rx_head_desc,
1762 &rp->rx_ring[entry]);
Eric Dumazet553e2332009-05-27 10:34:50 +00001763 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 } else if (desc_status & RxErr) {
1765 /* There was a error. */
1766 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001767 netdev_dbg(dev, "%s() Rx error was %08x\n",
1768 __func__, desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001769 dev->stats.rx_errors++;
1770 if (desc_status & 0x0030)
1771 dev->stats.rx_length_errors++;
1772 if (desc_status & 0x0048)
1773 dev->stats.rx_fifo_errors++;
1774 if (desc_status & 0x0004)
1775 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 if (desc_status & 0x0002) {
1777 /* this can also be updated outside the interrupt handler */
1778 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00001779 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 spin_unlock(&rp->lock);
1781 }
1782 }
1783 } else {
Eric Dumazet89d71a62009-10-13 05:34:20 +00001784 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 /* Length should omit the CRC */
1786 int pkt_len = data_size - 4;
Roger Luethi38f49e82010-12-06 00:59:40 +00001787 u16 vlan_tci = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 /* Check if the packet is long enough to accept without
1790 copying to a minimally-sized skbuff. */
Eric Dumazet89d71a62009-10-13 05:34:20 +00001791 if (pkt_len < rx_copybreak)
1792 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1793 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 pci_dma_sync_single_for_cpu(rp->pdev,
1795 rp->rx_skbuff_dma[entry],
1796 rp->rx_buf_sz,
1797 PCI_DMA_FROMDEVICE);
1798
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001799 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07001800 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001801 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 skb_put(skb, pkt_len);
1803 pci_dma_sync_single_for_device(rp->pdev,
1804 rp->rx_skbuff_dma[entry],
1805 rp->rx_buf_sz,
1806 PCI_DMA_FROMDEVICE);
1807 } else {
1808 skb = rp->rx_skbuff[entry];
1809 if (skb == NULL) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001810 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 break;
1812 }
1813 rp->rx_skbuff[entry] = NULL;
1814 skb_put(skb, pkt_len);
1815 pci_unmap_single(rp->pdev,
1816 rp->rx_skbuff_dma[entry],
1817 rp->rx_buf_sz,
1818 PCI_DMA_FROMDEVICE);
1819 }
Roger Luethi38f49e82010-12-06 00:59:40 +00001820
1821 if (unlikely(desc_length & DescTag))
1822 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00001825
1826 if (unlikely(desc_length & DescTag))
1827 __vlan_hwaccel_put_tag(skb, vlan_tci);
Roger Luethi633949a2006-08-14 23:00:17 -07001828 netif_receive_skb(skb);
Eric Dumazet553e2332009-05-27 10:34:50 +00001829 dev->stats.rx_bytes += pkt_len;
1830 dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 }
1832 entry = (++rp->cur_rx) % RX_RING_SIZE;
1833 rp->rx_head_desc = &rp->rx_ring[entry];
1834 }
1835
1836 /* Refill the Rx ring buffers. */
1837 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1838 struct sk_buff *skb;
1839 entry = rp->dirty_rx % RX_RING_SIZE;
1840 if (rp->rx_skbuff[entry] == NULL) {
Kevin Lob26b5552008-08-27 11:35:09 +08001841 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 rp->rx_skbuff[entry] = skb;
1843 if (skb == NULL)
1844 break; /* Better luck next round. */
1845 skb->dev = dev; /* Mark as being used by this device. */
1846 rp->rx_skbuff_dma[entry] =
David S. Miller689be432005-06-28 15:25:31 -07001847 pci_map_single(rp->pdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 rp->rx_buf_sz,
1849 PCI_DMA_FROMDEVICE);
1850 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1851 }
1852 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1853 }
Roger Luethi633949a2006-08-14 23:00:17 -07001854
1855 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856}
1857
1858/*
1859 * Clears the "tally counters" for CRC errors and missed frames(?).
1860 * It has been reported that some chips need a write of 0 to clear
1861 * these, for others the counters are set to 1 when written to and
1862 * instead cleared when read. So we clear them both ways ...
1863 */
1864static inline void clear_tally_counters(void __iomem *ioaddr)
1865{
1866 iowrite32(0, ioaddr + RxMissed);
1867 ioread16(ioaddr + RxCRCErrs);
1868 ioread16(ioaddr + RxMissed);
1869}
1870
1871static void rhine_restart_tx(struct net_device *dev) {
1872 struct rhine_private *rp = netdev_priv(dev);
1873 void __iomem *ioaddr = rp->base;
1874 int entry = rp->dirty_tx % TX_RING_SIZE;
1875 u32 intr_status;
1876
1877 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001878 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 * In that case the ISR will be back here RSN anyway.
1880 */
Francois Romieua20a28b2011-12-30 14:53:58 +01001881 intr_status = rhine_get_events(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 if ((intr_status & IntrTxErrSummary) == 0) {
1884
1885 /* We know better than the chip where it should continue. */
1886 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1887 ioaddr + TxRingPtr);
1888
1889 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1890 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00001891
1892 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1893 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1894 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1895
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1897 ioaddr + ChipCmd1);
1898 IOSYNC;
1899 }
1900 else {
1901 /* This should never happen */
1902 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001903 netdev_warn(dev, "%s() Another error occurred %08x\n",
1904 __func__, intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
1906
1907}
1908
1909static void rhine_error(struct net_device *dev, int intr_status)
1910{
1911 struct rhine_private *rp = netdev_priv(dev);
1912 void __iomem *ioaddr = rp->base;
1913
1914 spin_lock(&rp->lock);
1915
1916 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04001917 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 if (intr_status & IntrStatsMax) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001919 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1920 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 clear_tally_counters(ioaddr);
1922 }
1923 if (intr_status & IntrTxAborted) {
1924 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001925 netdev_info(dev, "Abort %08x, frame dropped\n",
1926 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 }
1928 if (intr_status & IntrTxUnderrun) {
Francois Romieu269f3112011-12-30 14:43:54 +01001929 rhine_kick_tx_threshold(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001931 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1932 rp->tx_thresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 }
1934 if (intr_status & IntrTxDescRace) {
1935 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001936 netdev_info(dev, "Tx descriptor write-back race\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 }
1938 if ((intr_status & IntrTxError) &&
1939 (intr_status & (IntrTxAborted |
1940 IntrTxUnderrun | IntrTxDescRace)) == 0) {
Francois Romieu269f3112011-12-30 14:43:54 +01001941 rhine_kick_tx_threshold(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001943 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1944 rp->tx_thresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 }
1946 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1947 IntrTxError))
1948 rhine_restart_tx(dev);
1949
1950 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1951 IntrTxError | IntrTxAborted | IntrNormalSummary |
1952 IntrTxDescRace)) {
1953 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001954 netdev_err(dev, "Something Wicked happened! %08x\n",
1955 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 }
1957
1958 spin_unlock(&rp->lock);
1959}
1960
1961static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1962{
1963 struct rhine_private *rp = netdev_priv(dev);
1964 void __iomem *ioaddr = rp->base;
1965 unsigned long flags;
1966
1967 spin_lock_irqsave(&rp->lock, flags);
Eric Dumazet553e2332009-05-27 10:34:50 +00001968 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1969 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 clear_tally_counters(ioaddr);
1971 spin_unlock_irqrestore(&rp->lock, flags);
1972
Eric Dumazet553e2332009-05-27 10:34:50 +00001973 return &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974}
1975
1976static void rhine_set_rx_mode(struct net_device *dev)
1977{
1978 struct rhine_private *rp = netdev_priv(dev);
1979 void __iomem *ioaddr = rp->base;
1980 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00001981 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
1982 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
1984 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 rx_mode = 0x1C;
1986 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1987 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001988 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00001989 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 /* Too many to match, or accept all multicasts. */
1991 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1992 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Roger Luethi38f49e82010-12-06 00:59:40 +00001993 } else if (rp->pdev->revision >= VT6105M) {
1994 int i = 0;
1995 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
1996 netdev_for_each_mc_addr(ha, dev) {
1997 if (i == MCAM_SIZE)
1998 break;
1999 rhine_set_cam(ioaddr, i, ha->addr);
2000 mCAMmask |= 1 << i;
2001 i++;
2002 }
2003 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00002006 netdev_for_each_mc_addr(ha, dev) {
2007 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
2009 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2010 }
2011 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2012 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002014 /* enable/disable VLAN receive filtering */
2015 if (rp->pdev->revision >= VT6105M) {
2016 if (dev->flags & IFF_PROMISC)
2017 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2018 else
2019 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2020 }
2021 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022}
2023
2024static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2025{
2026 struct rhine_private *rp = netdev_priv(dev);
2027
Rick Jones23020ab2011-11-09 09:58:07 +00002028 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2029 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2030 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031}
2032
2033static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2034{
2035 struct rhine_private *rp = netdev_priv(dev);
2036 int rc;
2037
2038 spin_lock_irq(&rp->lock);
2039 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2040 spin_unlock_irq(&rp->lock);
2041
2042 return rc;
2043}
2044
2045static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2046{
2047 struct rhine_private *rp = netdev_priv(dev);
2048 int rc;
2049
2050 spin_lock_irq(&rp->lock);
2051 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2052 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02002053 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
2055 return rc;
2056}
2057
2058static int netdev_nway_reset(struct net_device *dev)
2059{
2060 struct rhine_private *rp = netdev_priv(dev);
2061
2062 return mii_nway_restart(&rp->mii_if);
2063}
2064
2065static u32 netdev_get_link(struct net_device *dev)
2066{
2067 struct rhine_private *rp = netdev_priv(dev);
2068
2069 return mii_link_ok(&rp->mii_if);
2070}
2071
2072static u32 netdev_get_msglevel(struct net_device *dev)
2073{
2074 return debug;
2075}
2076
2077static void netdev_set_msglevel(struct net_device *dev, u32 value)
2078{
2079 debug = value;
2080}
2081
2082static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2083{
2084 struct rhine_private *rp = netdev_priv(dev);
2085
2086 if (!(rp->quirks & rqWOL))
2087 return;
2088
2089 spin_lock_irq(&rp->lock);
2090 wol->supported = WAKE_PHY | WAKE_MAGIC |
2091 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2092 wol->wolopts = rp->wolopts;
2093 spin_unlock_irq(&rp->lock);
2094}
2095
2096static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2097{
2098 struct rhine_private *rp = netdev_priv(dev);
2099 u32 support = WAKE_PHY | WAKE_MAGIC |
2100 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2101
2102 if (!(rp->quirks & rqWOL))
2103 return -EINVAL;
2104
2105 if (wol->wolopts & ~support)
2106 return -EINVAL;
2107
2108 spin_lock_irq(&rp->lock);
2109 rp->wolopts = wol->wolopts;
2110 spin_unlock_irq(&rp->lock);
2111
2112 return 0;
2113}
2114
Jeff Garzik7282d492006-09-13 14:30:00 -04002115static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 .get_drvinfo = netdev_get_drvinfo,
2117 .get_settings = netdev_get_settings,
2118 .set_settings = netdev_set_settings,
2119 .nway_reset = netdev_nway_reset,
2120 .get_link = netdev_get_link,
2121 .get_msglevel = netdev_get_msglevel,
2122 .set_msglevel = netdev_set_msglevel,
2123 .get_wol = rhine_get_wol,
2124 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125};
2126
2127static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2128{
2129 struct rhine_private *rp = netdev_priv(dev);
2130 int rc;
2131
2132 if (!netif_running(dev))
2133 return -EINVAL;
2134
2135 spin_lock_irq(&rp->lock);
2136 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2137 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02002138 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140 return rc;
2141}
2142
2143static int rhine_close(struct net_device *dev)
2144{
2145 struct rhine_private *rp = netdev_priv(dev);
2146 void __iomem *ioaddr = rp->base;
2147
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002148 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002149 cancel_work_sync(&rp->reset_task);
2150 netif_stop_queue(dev);
2151
2152 spin_lock_irq(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
2154 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002155 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2156 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
2158 /* Switch to loopback mode to avoid hardware races. */
2159 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2160
2161 /* Disable interrupts by clearing the interrupt mask. */
2162 iowrite16(0x0000, ioaddr + IntrEnable);
2163
2164 /* Stop the chip's Tx and Rx processes. */
2165 iowrite16(CmdStop, ioaddr + ChipCmd);
2166
2167 spin_unlock_irq(&rp->lock);
2168
2169 free_irq(rp->pdev->irq, dev);
2170 free_rbufs(dev);
2171 free_tbufs(dev);
2172 free_ring(dev);
2173
2174 return 0;
2175}
2176
2177
2178static void __devexit rhine_remove_one(struct pci_dev *pdev)
2179{
2180 struct net_device *dev = pci_get_drvdata(pdev);
2181 struct rhine_private *rp = netdev_priv(dev);
2182
2183 unregister_netdev(dev);
2184
2185 pci_iounmap(pdev, rp->base);
2186 pci_release_regions(pdev);
2187
2188 free_netdev(dev);
2189 pci_disable_device(pdev);
2190 pci_set_drvdata(pdev, NULL);
2191}
2192
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002193static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 struct net_device *dev = pci_get_drvdata(pdev);
2196 struct rhine_private *rp = netdev_priv(dev);
2197 void __iomem *ioaddr = rp->base;
2198
2199 if (!(rp->quirks & rqWOL))
2200 return; /* Nothing to do for non-WOL adapters */
2201
2202 rhine_power_init(dev);
2203
2204 /* Make sure we use pattern 0, 1 and not 4, 5 */
2205 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002206 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208 if (rp->wolopts & WAKE_MAGIC) {
2209 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2210 /*
2211 * Turn EEPROM-controlled wake-up back on -- some hardware may
2212 * not cooperate otherwise.
2213 */
2214 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2215 }
2216
2217 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2218 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2219
2220 if (rp->wolopts & WAKE_PHY)
2221 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2222
2223 if (rp->wolopts & WAKE_UCAST)
2224 iowrite8(WOLucast, ioaddr + WOLcrSet);
2225
2226 if (rp->wolopts) {
2227 /* Enable legacy WOL (for old motherboards) */
2228 iowrite8(0x01, ioaddr + PwcfgSet);
2229 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2230 }
2231
2232 /* Hit power state D3 (sleep) */
Roger Luethib933b4d2006-08-14 23:00:21 -07002233 if (!avoid_D3)
2234 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
2236 /* TODO: Check use of pci_enable_wake() */
2237
2238}
2239
2240#ifdef CONFIG_PM
2241static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2242{
2243 struct net_device *dev = pci_get_drvdata(pdev);
2244 struct rhine_private *rp = netdev_priv(dev);
2245 unsigned long flags;
2246
2247 if (!netif_running(dev))
2248 return 0;
2249
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002250 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 netif_device_detach(dev);
2253 pci_save_state(pdev);
2254
2255 spin_lock_irqsave(&rp->lock, flags);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002256 rhine_shutdown(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 spin_unlock_irqrestore(&rp->lock, flags);
2258
2259 free_irq(dev->irq, dev);
2260 return 0;
2261}
2262
2263static int rhine_resume(struct pci_dev *pdev)
2264{
2265 struct net_device *dev = pci_get_drvdata(pdev);
2266 struct rhine_private *rp = netdev_priv(dev);
2267 unsigned long flags;
2268 int ret;
2269
2270 if (!netif_running(dev))
2271 return 0;
2272
Roger Luethi38f49e82010-12-06 00:59:40 +00002273 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
Joe Perchesdf4511f2011-04-16 14:15:25 +00002274 netdev_err(dev, "request_irq failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
2276 ret = pci_set_power_state(pdev, PCI_D0);
2277 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002278 netdev_info(dev, "Entering power state D0 %s (%d)\n",
2279 ret ? "failed" : "succeeded", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
2281 pci_restore_state(pdev);
2282
2283 spin_lock_irqsave(&rp->lock, flags);
2284#ifdef USE_MMIO
2285 enable_mmio(rp->pioaddr, rp->quirks);
2286#endif
2287 rhine_power_init(dev);
2288 free_tbufs(dev);
2289 free_rbufs(dev);
2290 alloc_tbufs(dev);
2291 alloc_rbufs(dev);
2292 init_registers(dev);
2293 spin_unlock_irqrestore(&rp->lock, flags);
2294
2295 netif_device_attach(dev);
2296
2297 return 0;
2298}
2299#endif /* CONFIG_PM */
2300
2301static struct pci_driver rhine_driver = {
2302 .name = DRV_NAME,
2303 .id_table = rhine_pci_tbl,
2304 .probe = rhine_init_one,
2305 .remove = __devexit_p(rhine_remove_one),
2306#ifdef CONFIG_PM
2307 .suspend = rhine_suspend,
2308 .resume = rhine_resume,
2309#endif /* CONFIG_PM */
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002310 .shutdown = rhine_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311};
2312
Roger Luethie84df482007-03-06 19:57:37 +01002313static struct dmi_system_id __initdata rhine_dmi_table[] = {
2314 {
2315 .ident = "EPIA-M",
2316 .matches = {
2317 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2318 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2319 },
2320 },
2321 {
2322 .ident = "KV7",
2323 .matches = {
2324 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2325 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2326 },
2327 },
2328 { NULL }
2329};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
2331static int __init rhine_init(void)
2332{
2333/* when a module, this is printed whether or not devices are found in probe */
2334#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002335 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336#endif
Roger Luethie84df482007-03-06 19:57:37 +01002337 if (dmi_check_system(rhine_dmi_table)) {
2338 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002339 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002340 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002341 }
2342 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002343 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002344
Jeff Garzik29917622006-08-19 17:48:59 -04002345 return pci_register_driver(&rhine_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346}
2347
2348
2349static void __exit rhine_cleanup(void)
2350{
2351 pci_unregister_driver(&rhine_driver);
2352}
2353
2354
2355module_init(rhine_init);
2356module_exit(rhine_cleanup);