blob: 93987e84be18bd9ebb3eb0f65a5a36a427d10117 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi38f49e82010-12-06 00:59:40 +000035#define DRV_VERSION "1.5.0"
36#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
42
Joe Perchesdf4511f2011-04-16 14:15:25 +000043#define DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
45static int max_interrupt_work = 20;
46
47/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
48 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000049#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
50 defined(CONFIG_SPARC) || defined(__ia64__) || \
51 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070052static int rx_copybreak = 1518;
53#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070054static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070055#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Roger Luethib933b4d2006-08-14 23:00:21 -070057/* Work-around for broken BIOSes: they are unable to get the chip back out of
58 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000059static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * In case you are looking for 'options[]' or 'full_duplex[]', they
63 * are gone. Use ethtool(8) instead.
64 */
65
66/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
67 The Rhine has a 64 element 8390-like hash table. */
68static const int multicast_filter_limit = 32;
69
70
71/* Operational parameters that are set at compile time. */
72
73/* Keep the ring sizes a power of two for compile efficiency.
74 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
75 Making the Tx ring too large decreases the effectiveness of channel
76 bonding and packet priority.
77 There are no ill effects from too-large receive rings. */
78#define TX_RING_SIZE 16
79#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070080#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82/* Operational parameters that usually are not changed. */
83
84/* Time in jiffies before concluding the transmitter is hung. */
85#define TX_TIMEOUT (2*HZ)
86
87#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88
89#include <linux/module.h>
90#include <linux/moduleparam.h>
91#include <linux/kernel.h>
92#include <linux/string.h>
93#include <linux/timer.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/interrupt.h>
97#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -040098#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/netdevice.h>
100#include <linux/etherdevice.h>
101#include <linux/skbuff.h>
102#include <linux/init.h>
103#include <linux/delay.h>
104#include <linux/mii.h>
105#include <linux/ethtool.h>
106#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000107#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800109#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <asm/processor.h> /* Processor type for cache alignment. */
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100114#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116/* These identify the driver base version and may not be removed. */
Stephen Hemmingerc8de1fc2009-02-26 10:19:31 +0000117static const char version[] __devinitconst =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000118 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120/* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700134module_param(avoid_D3, bool, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Roger Luethi38f49e82010-12-06 00:59:40 +0000140#define MCAM_SIZE 32
141#define VCAM_SIZE 32
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
144 Theory of Operation
145
146I. Board Compatibility
147
148This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
149controller.
150
151II. Board-specific settings
152
153Boards with this chip are functional only in a bus-master PCI slot.
154
155Many operational settings are loaded from the EEPROM to the Config word at
156offset 0x78. For most of these settings, this driver assumes that they are
157correct.
158If this driver is compiled to use PCI memory space operations the EEPROM
159must be configured to enable memory ops.
160
161III. Driver operation
162
163IIIa. Ring buffers
164
165This driver uses two statically allocated fixed-size descriptor lists
166formed into rings by a branch from the final descriptor to the beginning of
167the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
168
169IIIb/c. Transmit/Receive Structure
170
171This driver attempts to use a zero-copy receive and transmit scheme.
172
173Alas, all data buffers are required to start on a 32 bit boundary, so
174the driver must often copy transmit packets into bounce buffers.
175
176The driver allocates full frame size skbuffs for the Rx ring buffers at
177open() time and passes the skb->data field to the chip as receive data
178buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
179a fresh skbuff is allocated and the frame is copied to the new skbuff.
180When the incoming frame is larger, the skbuff is passed directly up the
181protocol stack. Buffers consumed this way are replaced by newly allocated
182skbuffs in the last phase of rhine_rx().
183
184The RX_COPYBREAK value is chosen to trade-off the memory wasted by
185using a full-sized skbuff for small frames vs. the copying costs of larger
186frames. New boards are typically used in generously configured machines
187and the underfilled buffers have negligible impact compared to the benefit of
188a single allocation size, so the default value of zero results in never
189copying packets. When copying is done, the cost is usually mitigated by using
190a combined copy/checksum routine. Copying also preloads the cache, which is
191most useful with small frames.
192
193Since the VIA chips are only able to transfer data to buffers on 32 bit
194boundaries, the IP header at offset 14 in an ethernet frame isn't
195longword aligned for further processing. Copying these unaligned buffers
196has the beneficial effect of 16-byte aligning the IP header.
197
198IIId. Synchronization
199
200The driver runs as two independent, single-threaded flows of control. One
201is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800202netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
203which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800206netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
207the ring is not available it stops the transmit queue by
208calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210The interrupt handler has exclusive control over the Rx ring and records stats
211from the Tx ring. After reaping the stats, it marks the Tx queue entry as
212empty by incrementing the dirty_tx mark. If at least half of the entries in
213the Rx ring are available the transmit queue is woken up if it was stopped.
214
215IV. Notes
216
217IVb. References
218
219Preliminary VT86C100A manual from http://www.via.com.tw/
220http://www.scyld.com/expert/100mbps.html
221http://www.scyld.com/expert/NWay.html
222ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
223ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
224
225
226IVc. Errata
227
228The VT86C100A manual is not reliable information.
229The 3043 chip does not handle unaligned transmit or receive buffers, resulting
230in significant performance degradation for bounce buffer copies on transmit
231and unaligned IP headers on receive.
232The chip does not pad to minimum transmit length.
233
234*/
235
236
237/* This table drives the PCI probe routines. It's mostly boilerplate in all
238 of the drivers, and will likely be provided by some future kernel.
239 Note the matching code -- the first table entry matchs all 56** cards but
240 second only the 1234 card.
241*/
242
243enum rhine_revs {
244 VT86C100A = 0x00,
245 VTunknown0 = 0x20,
246 VT6102 = 0x40,
247 VT8231 = 0x50, /* Integrated MAC */
248 VT8233 = 0x60, /* Integrated MAC */
249 VT8235 = 0x74, /* Integrated MAC */
250 VT8237 = 0x78, /* Integrated MAC */
251 VTunknown1 = 0x7C,
252 VT6105 = 0x80,
253 VT6105_B0 = 0x83,
254 VT6105L = 0x8A,
255 VT6107 = 0x8C,
256 VTunknown2 = 0x8E,
257 VT6105M = 0x90, /* Management adapter */
258};
259
260enum rhine_quirks {
261 rqWOL = 0x0001, /* Wake-On-LAN support */
262 rqForceReset = 0x0002,
263 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
264 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
265 rqRhineI = 0x0100, /* See comment below */
266};
267/*
268 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269 * MMIO as well as for the collision counter and the Tx FIFO underflow
270 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271 */
272
273/* Beware of PCI posted writes */
274#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
275
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000276static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400277 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
278 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
279 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
280 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 { } /* terminate list */
282};
283MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284
285
286/* Offsets to the device registers. */
287enum register_offsets {
288 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000289 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 IntrStatus=0x0C, IntrEnable=0x0E,
291 MulticastFilter0=0x10, MulticastFilter1=0x14,
292 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000293 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
295 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
296 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
297 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000298 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
300 WOLcrClr1=0xA6, WOLcgClr=0xA7,
301 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
302};
303
304/* Bits in ConfigD */
305enum backoff_bits {
306 BackOptional=0x01, BackModify=0x02,
307 BackCaptureEffect=0x04, BackRandom=0x08
308};
309
Roger Luethi38f49e82010-12-06 00:59:40 +0000310/* Bits in the TxConfig (TCR) register */
311enum tcr_bits {
312 TCR_PQEN=0x01,
313 TCR_LB0=0x02, /* loopback[0] */
314 TCR_LB1=0x04, /* loopback[1] */
315 TCR_OFSET=0x08,
316 TCR_RTGOPT=0x10,
317 TCR_RTFT0=0x20,
318 TCR_RTFT1=0x40,
319 TCR_RTSF=0x80,
320};
321
322/* Bits in the CamCon (CAMC) register */
323enum camcon_bits {
324 CAMC_CAMEN=0x01,
325 CAMC_VCAMSL=0x02,
326 CAMC_CAMWR=0x04,
327 CAMC_CAMRD=0x08,
328};
329
330/* Bits in the PCIBusConfig1 (BCR1) register */
331enum bcr1_bits {
332 BCR1_POT0=0x01,
333 BCR1_POT1=0x02,
334 BCR1_POT2=0x04,
335 BCR1_CTFT0=0x08,
336 BCR1_CTFT1=0x10,
337 BCR1_CTSF=0x20,
338 BCR1_TXQNOBK=0x40, /* for VT6105 */
339 BCR1_VIDFR=0x80, /* for VT6105 */
340 BCR1_MED0=0x40, /* for VT6102 */
341 BCR1_MED1=0x80, /* for VT6102 */
342};
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#ifdef USE_MMIO
345/* Registers we check that mmio and reg are the same. */
346static const int mmio_verify_registers[] = {
347 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
348 0
349};
350#endif
351
352/* Bits in the interrupt status/mask registers. */
353enum intr_status_bits {
354 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
355 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
356 IntrPCIErr=0x0040,
357 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
358 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
359 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
360 IntrRxWakeUp=0x8000,
361 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
362 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
363 IntrTxErrSummary=0x082218,
364};
365
366/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
367enum wol_bits {
368 WOLucast = 0x10,
369 WOLmagic = 0x20,
370 WOLbmcast = 0x30,
371 WOLlnkon = 0x40,
372 WOLlnkoff = 0x80,
373};
374
375/* The Rx and Tx buffer descriptors. */
376struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400377 __le32 rx_status;
378 __le32 desc_length; /* Chain flag, Buffer/frame length */
379 __le32 addr;
380 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381};
382struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400383 __le32 tx_status;
384 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
385 __le32 addr;
386 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387};
388
389/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
390#define TXDESC 0x00e08000
391
392enum rx_status_bits {
393 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
394};
395
396/* Bits in *_desc.*_status */
397enum desc_status_bits {
398 DescOwn=0x80000000
399};
400
Roger Luethi38f49e82010-12-06 00:59:40 +0000401/* Bits in *_desc.*_length */
402enum desc_length_bits {
403 DescTag=0x00010000
404};
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406/* Bits in ChipCmd. */
407enum chip_cmd_bits {
408 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
409 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
410 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
411 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
412};
413
414struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000415 /* Bit mask for configured VLAN ids */
416 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* Descriptor rings */
419 struct rx_desc *rx_ring;
420 struct tx_desc *tx_ring;
421 dma_addr_t rx_ring_dma;
422 dma_addr_t tx_ring_dma;
423
424 /* The addresses of receive-in-place skbuffs. */
425 struct sk_buff *rx_skbuff[RX_RING_SIZE];
426 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
427
428 /* The saved address of a sent-in-place packet/buffer, for later free(). */
429 struct sk_buff *tx_skbuff[TX_RING_SIZE];
430 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
431
Roger Luethi4be5de22006-04-04 20:49:16 +0200432 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 unsigned char *tx_buf[TX_RING_SIZE];
434 unsigned char *tx_bufs;
435 dma_addr_t tx_bufs_dma;
436
437 struct pci_dev *pdev;
438 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700439 struct net_device *dev;
440 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 spinlock_t lock;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800442 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 /* Frequently used values: keep some adjacent for cache effect. */
445 u32 quirks;
446 struct rx_desc *rx_head_desc;
447 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
448 unsigned int cur_tx, dirty_tx;
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 u8 wolopts;
451
452 u8 tx_thresh, rx_thresh;
453
454 struct mii_if_info mii_if;
455 void __iomem *base;
456};
457
Roger Luethi38f49e82010-12-06 00:59:40 +0000458#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
459#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
460#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
461
462#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
463#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
464#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
465
466#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
467#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
468#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
469
470#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
471#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
472#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
473
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475static int mdio_read(struct net_device *dev, int phy_id, int location);
476static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
477static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800478static void rhine_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000480static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
481 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100482static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700484static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485static void rhine_error(struct net_device *dev, int intr_status);
486static void rhine_set_rx_mode(struct net_device *dev);
487static struct net_device_stats *rhine_get_stats(struct net_device *dev);
488static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400489static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490static int rhine_close(struct net_device *dev);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -0700491static void rhine_shutdown (struct pci_dev *pdev);
Jiri Pirko8e586132011-12-08 19:52:37 -0500492static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
493static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
Roger Luethi38f49e82010-12-06 00:59:40 +0000494static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
495static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
496static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
497static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
498static void rhine_init_cam_filter(struct net_device *dev);
499static void rhine_update_vcam(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Joe Perchesdf4511f2011-04-16 14:15:25 +0000501#define RHINE_WAIT_FOR(condition) \
502do { \
503 int i = 1024; \
504 while (!(condition) && --i) \
505 ; \
506 if (debug > 1 && i < 512) \
507 pr_info("%4d cycles used @ %s:%d\n", \
508 1024 - i, __func__, __LINE__); \
509} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511static inline u32 get_intr_status(struct net_device *dev)
512{
513 struct rhine_private *rp = netdev_priv(dev);
514 void __iomem *ioaddr = rp->base;
515 u32 intr_status;
516
517 intr_status = ioread16(ioaddr + IntrStatus);
518 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
519 if (rp->quirks & rqStatusWBRace)
520 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
521 return intr_status;
522}
523
524/*
525 * Get power related registers into sane state.
526 * Notify user about past WOL event.
527 */
528static void rhine_power_init(struct net_device *dev)
529{
530 struct rhine_private *rp = netdev_priv(dev);
531 void __iomem *ioaddr = rp->base;
532 u16 wolstat;
533
534 if (rp->quirks & rqWOL) {
535 /* Make sure chip is in power state D0 */
536 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
537
538 /* Disable "force PME-enable" */
539 iowrite8(0x80, ioaddr + WOLcgClr);
540
541 /* Clear power-event config bits (WOL) */
542 iowrite8(0xFF, ioaddr + WOLcrClr);
543 /* More recent cards can manage two additional patterns */
544 if (rp->quirks & rq6patterns)
545 iowrite8(0x03, ioaddr + WOLcrClr1);
546
547 /* Save power-event status bits */
548 wolstat = ioread8(ioaddr + PwrcsrSet);
549 if (rp->quirks & rq6patterns)
550 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
551
552 /* Clear power-event status bits */
553 iowrite8(0xFF, ioaddr + PwrcsrClr);
554 if (rp->quirks & rq6patterns)
555 iowrite8(0x03, ioaddr + PwrcsrClr1);
556
557 if (wolstat) {
558 char *reason;
559 switch (wolstat) {
560 case WOLmagic:
561 reason = "Magic packet";
562 break;
563 case WOLlnkon:
564 reason = "Link went up";
565 break;
566 case WOLlnkoff:
567 reason = "Link went down";
568 break;
569 case WOLucast:
570 reason = "Unicast packet";
571 break;
572 case WOLbmcast:
573 reason = "Multicast/broadcast packet";
574 break;
575 default:
576 reason = "Unknown";
577 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000578 netdev_info(dev, "Woke system up. Reason: %s\n",
579 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 }
581 }
582}
583
584static void rhine_chip_reset(struct net_device *dev)
585{
586 struct rhine_private *rp = netdev_priv(dev);
587 void __iomem *ioaddr = rp->base;
588
589 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
590 IOSYNC;
591
592 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000593 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 /* Force reset */
596 if (rp->quirks & rqForceReset)
597 iowrite8(0x40, ioaddr + MiscCmd);
598
599 /* Reset can take somewhat longer (rare) */
600 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
601 }
602
603 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +0000604 netdev_info(dev, "Reset %s\n",
605 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
606 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608
609#ifdef USE_MMIO
610static void enable_mmio(long pioaddr, u32 quirks)
611{
612 int n;
613 if (quirks & rqRhineI) {
614 /* More recent docs say that this bit is reserved ... */
615 n = inb(pioaddr + ConfigA) | 0x20;
616 outb(n, pioaddr + ConfigA);
617 } else {
618 n = inb(pioaddr + ConfigD) | 0x80;
619 outb(n, pioaddr + ConfigD);
620 }
621}
622#endif
623
624/*
625 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
626 * (plus 0x6C for Rhine-I/II)
627 */
628static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
629{
630 struct rhine_private *rp = netdev_priv(dev);
631 void __iomem *ioaddr = rp->base;
632
633 outb(0x20, pioaddr + MACRegEEcsr);
634 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
635
636#ifdef USE_MMIO
637 /*
638 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
639 * MMIO. If reloading EEPROM was done first this could be avoided, but
640 * it is not known if that still works with the "win98-reboot" problem.
641 */
642 enable_mmio(pioaddr, rp->quirks);
643#endif
644
645 /* Turn off EEPROM-controlled wake-up (magic packet) */
646 if (rp->quirks & rqWOL)
647 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
648
649}
650
651#ifdef CONFIG_NET_POLL_CONTROLLER
652static void rhine_poll(struct net_device *dev)
653{
654 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100655 rhine_interrupt(dev->irq, (void *)dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 enable_irq(dev->irq);
657}
658#endif
659
Francois Romieu269f3112011-12-30 14:43:54 +0100660static void rhine_kick_tx_threshold(struct rhine_private *rp)
661{
662 if (rp->tx_thresh < 0xe0) {
663 void __iomem *ioaddr = rp->base;
664
665 rp->tx_thresh += 0x20;
666 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
667 }
668}
669
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700670static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700671{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700672 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
673 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700674 void __iomem *ioaddr = rp->base;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700675 int work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700676
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700677 work_done = rhine_rx(dev, budget);
Roger Luethi633949a2006-08-14 23:00:17 -0700678
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700679 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800680 napi_complete(napi);
Roger Luethi633949a2006-08-14 23:00:17 -0700681
682 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
683 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
684 IntrTxDone | IntrTxError | IntrTxUnderrun |
685 IntrPCIErr | IntrStatsMax | IntrLinkChange,
686 ioaddr + IntrEnable);
Roger Luethi633949a2006-08-14 23:00:17 -0700687 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700688 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700689}
Roger Luethi633949a2006-08-14 23:00:17 -0700690
Adrian Bunkde4e7c82008-01-30 22:02:05 +0200691static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
693 struct rhine_private *rp = netdev_priv(dev);
694
695 /* Reset the chip to erase previous misconfiguration. */
696 rhine_chip_reset(dev);
697
698 /* Rhine-I needs extra time to recuperate before EEPROM reload */
699 if (rp->quirks & rqRhineI)
700 msleep(5);
701
702 /* Reload EEPROM controlled bytes cleared by soft reset */
703 rhine_reload_eeprom(pioaddr, dev);
704}
705
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800706static const struct net_device_ops rhine_netdev_ops = {
707 .ndo_open = rhine_open,
708 .ndo_stop = rhine_close,
709 .ndo_start_xmit = rhine_start_tx,
710 .ndo_get_stats = rhine_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000711 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000712 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800713 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000714 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800715 .ndo_do_ioctl = netdev_ioctl,
716 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000717 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
718 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800719#ifdef CONFIG_NET_POLL_CONTROLLER
720 .ndo_poll_controller = rhine_poll,
721#endif
722};
723
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724static int __devinit rhine_init_one(struct pci_dev *pdev,
725 const struct pci_device_id *ent)
726{
727 struct net_device *dev;
728 struct rhine_private *rp;
729 int i, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 u32 quirks;
731 long pioaddr;
732 long memaddr;
733 void __iomem *ioaddr;
734 int io_size, phy_id;
735 const char *name;
736#ifdef USE_MMIO
737 int bar = 1;
738#else
739 int bar = 0;
740#endif
741
742/* when built into the kernel, we only print version if device is found */
743#ifndef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +0000744 pr_info_once("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745#endif
746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 io_size = 256;
748 phy_id = 0;
749 quirks = 0;
750 name = "Rhine";
Auke Kok44c10132007-06-08 15:46:36 -0700751 if (pdev->revision < VTunknown0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 quirks = rqRhineI;
753 io_size = 128;
754 }
Auke Kok44c10132007-06-08 15:46:36 -0700755 else if (pdev->revision >= VT6102) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 quirks = rqWOL | rqForceReset;
Auke Kok44c10132007-06-08 15:46:36 -0700757 if (pdev->revision < VT6105) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 name = "Rhine II";
759 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
760 }
761 else {
762 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
Auke Kok44c10132007-06-08 15:46:36 -0700763 if (pdev->revision >= VT6105_B0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 quirks |= rq6patterns;
Auke Kok44c10132007-06-08 15:46:36 -0700765 if (pdev->revision < VT6105M)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 name = "Rhine III";
767 else
768 name = "Rhine III (Management Adapter)";
769 }
770 }
771
772 rc = pci_enable_device(pdev);
773 if (rc)
774 goto err_out;
775
776 /* this should always be supported */
Yang Hongyang284901a2009-04-06 19:01:15 -0700777 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if (rc) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000779 dev_err(&pdev->dev,
780 "32-bit PCI DMA addresses not supported by the card!?\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 goto err_out;
782 }
783
784 /* sanity check */
785 if ((pci_resource_len(pdev, 0) < io_size) ||
786 (pci_resource_len(pdev, 1) < io_size)) {
787 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000788 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 goto err_out;
790 }
791
792 pioaddr = pci_resource_start(pdev, 0);
793 memaddr = pci_resource_start(pdev, 1);
794
795 pci_set_master(pdev);
796
797 dev = alloc_etherdev(sizeof(struct rhine_private));
798 if (!dev) {
799 rc = -ENOMEM;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000800 dev_err(&pdev->dev, "alloc_etherdev failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 goto err_out;
802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 SET_NETDEV_DEV(dev, &pdev->dev);
804
805 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700806 rp->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 rp->quirks = quirks;
808 rp->pioaddr = pioaddr;
809 rp->pdev = pdev;
810
811 rc = pci_request_regions(pdev, DRV_NAME);
812 if (rc)
813 goto err_out_free_netdev;
814
815 ioaddr = pci_iomap(pdev, bar, io_size);
816 if (!ioaddr) {
817 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000818 dev_err(&pdev->dev,
819 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
820 pci_name(pdev), io_size, memaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 goto err_out_free_res;
822 }
823
824#ifdef USE_MMIO
825 enable_mmio(pioaddr, quirks);
826
827 /* Check that selected MMIO registers match the PIO ones */
828 i = 0;
829 while (mmio_verify_registers[i]) {
830 int reg = mmio_verify_registers[i++];
831 unsigned char a = inb(pioaddr+reg);
832 unsigned char b = readb(ioaddr+reg);
833 if (a != b) {
834 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000835 dev_err(&pdev->dev,
836 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
837 reg, a, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 goto err_out_unmap;
839 }
840 }
841#endif /* USE_MMIO */
842
843 dev->base_addr = (unsigned long)ioaddr;
844 rp->base = ioaddr;
845
846 /* Get chip registers into a sane state */
847 rhine_power_init(dev);
848 rhine_hw_init(dev, pioaddr);
849
850 for (i = 0; i < 6; i++)
851 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
852
Joe Perches482e3fe2011-04-16 14:15:26 +0000853 if (!is_valid_ether_addr(dev->dev_addr)) {
854 /* Report it and use a random ethernet address instead */
855 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
856 random_ether_addr(dev->dev_addr);
857 netdev_info(dev, "Using random MAC address: %pM\n",
858 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 }
Joe Perches482e3fe2011-04-16 14:15:26 +0000860 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 /* For Rhine-I/II, phy_id is loaded from EEPROM */
863 if (!phy_id)
864 phy_id = ioread8(ioaddr + 0x6C);
865
866 dev->irq = pdev->irq;
867
868 spin_lock_init(&rp->lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800869 INIT_WORK(&rp->reset_task, rhine_reset_task);
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 rp->mii_if.dev = dev;
872 rp->mii_if.mdio_read = mdio_read;
873 rp->mii_if.mdio_write = mdio_write;
874 rp->mii_if.phy_id_mask = 0x1f;
875 rp->mii_if.reg_num_mask = 0x1f;
876
877 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800878 dev->netdev_ops = &rhine_netdev_ops;
879 dev->ethtool_ops = &netdev_ethtool_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800881
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700882 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +0200883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 if (rp->quirks & rqRhineI)
885 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
886
Roger Luethi38f49e82010-12-06 00:59:40 +0000887 if (pdev->revision >= VT6105M)
888 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
889 NETIF_F_HW_VLAN_FILTER;
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 /* dev->name not defined before register_netdev()! */
892 rc = register_netdev(dev);
893 if (rc)
894 goto err_out_unmap;
895
Joe Perchesdf4511f2011-04-16 14:15:25 +0000896 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
897 name,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898#ifdef USE_MMIO
Joe Perchesdf4511f2011-04-16 14:15:25 +0000899 memaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900#else
Joe Perchesdf4511f2011-04-16 14:15:25 +0000901 (long)ioaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902#endif
Joe Perchesdf4511f2011-04-16 14:15:25 +0000903 dev->dev_addr, pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 pci_set_drvdata(pdev, dev);
906
907 {
908 u16 mii_cmd;
909 int mii_status = mdio_read(dev, phy_id, 1);
910 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
911 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
912 if (mii_status != 0xffff && mii_status != 0x0000) {
913 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +0000914 netdev_info(dev,
915 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
916 phy_id,
917 mii_status, rp->mii_if.advertising,
918 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
920 /* set IFF_RUNNING */
921 if (mii_status & BMSR_LSTATUS)
922 netif_carrier_on(dev);
923 else
924 netif_carrier_off(dev);
925
926 }
927 }
928 rp->mii_if.phy_id = phy_id;
Roger Luethib933b4d2006-08-14 23:00:21 -0700929 if (debug > 1 && avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +0000930 netdev_info(dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 return 0;
933
934err_out_unmap:
935 pci_iounmap(pdev, ioaddr);
936err_out_free_res:
937 pci_release_regions(pdev);
938err_out_free_netdev:
939 free_netdev(dev);
940err_out:
941 return rc;
942}
943
944static int alloc_ring(struct net_device* dev)
945{
946 struct rhine_private *rp = netdev_priv(dev);
947 void *ring;
948 dma_addr_t ring_dma;
949
950 ring = pci_alloc_consistent(rp->pdev,
951 RX_RING_SIZE * sizeof(struct rx_desc) +
952 TX_RING_SIZE * sizeof(struct tx_desc),
953 &ring_dma);
954 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000955 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return -ENOMEM;
957 }
958 if (rp->quirks & rqRhineI) {
959 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
960 PKT_BUF_SZ * TX_RING_SIZE,
961 &rp->tx_bufs_dma);
962 if (rp->tx_bufs == NULL) {
963 pci_free_consistent(rp->pdev,
964 RX_RING_SIZE * sizeof(struct rx_desc) +
965 TX_RING_SIZE * sizeof(struct tx_desc),
966 ring, ring_dma);
967 return -ENOMEM;
968 }
969 }
970
971 rp->rx_ring = ring;
972 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
973 rp->rx_ring_dma = ring_dma;
974 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
975
976 return 0;
977}
978
979static void free_ring(struct net_device* dev)
980{
981 struct rhine_private *rp = netdev_priv(dev);
982
983 pci_free_consistent(rp->pdev,
984 RX_RING_SIZE * sizeof(struct rx_desc) +
985 TX_RING_SIZE * sizeof(struct tx_desc),
986 rp->rx_ring, rp->rx_ring_dma);
987 rp->tx_ring = NULL;
988
989 if (rp->tx_bufs)
990 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
991 rp->tx_bufs, rp->tx_bufs_dma);
992
993 rp->tx_bufs = NULL;
994
995}
996
997static void alloc_rbufs(struct net_device *dev)
998{
999 struct rhine_private *rp = netdev_priv(dev);
1000 dma_addr_t next;
1001 int i;
1002
1003 rp->dirty_rx = rp->cur_rx = 0;
1004
1005 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1006 rp->rx_head_desc = &rp->rx_ring[0];
1007 next = rp->rx_ring_dma;
1008
1009 /* Init the ring entries */
1010 for (i = 0; i < RX_RING_SIZE; i++) {
1011 rp->rx_ring[i].rx_status = 0;
1012 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1013 next += sizeof(struct rx_desc);
1014 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1015 rp->rx_skbuff[i] = NULL;
1016 }
1017 /* Mark the last entry as wrapping the ring. */
1018 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1019
1020 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1021 for (i = 0; i < RX_RING_SIZE; i++) {
Kevin Lob26b5552008-08-27 11:35:09 +08001022 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 rp->rx_skbuff[i] = skb;
1024 if (skb == NULL)
1025 break;
1026 skb->dev = dev; /* Mark as being used by this device. */
1027
1028 rp->rx_skbuff_dma[i] =
David S. Miller689be432005-06-28 15:25:31 -07001029 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 PCI_DMA_FROMDEVICE);
1031
1032 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1033 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1034 }
1035 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1036}
1037
1038static void free_rbufs(struct net_device* dev)
1039{
1040 struct rhine_private *rp = netdev_priv(dev);
1041 int i;
1042
1043 /* Free all the skbuffs in the Rx queue. */
1044 for (i = 0; i < RX_RING_SIZE; i++) {
1045 rp->rx_ring[i].rx_status = 0;
1046 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1047 if (rp->rx_skbuff[i]) {
1048 pci_unmap_single(rp->pdev,
1049 rp->rx_skbuff_dma[i],
1050 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1051 dev_kfree_skb(rp->rx_skbuff[i]);
1052 }
1053 rp->rx_skbuff[i] = NULL;
1054 }
1055}
1056
1057static void alloc_tbufs(struct net_device* dev)
1058{
1059 struct rhine_private *rp = netdev_priv(dev);
1060 dma_addr_t next;
1061 int i;
1062
1063 rp->dirty_tx = rp->cur_tx = 0;
1064 next = rp->tx_ring_dma;
1065 for (i = 0; i < TX_RING_SIZE; i++) {
1066 rp->tx_skbuff[i] = NULL;
1067 rp->tx_ring[i].tx_status = 0;
1068 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1069 next += sizeof(struct tx_desc);
1070 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001071 if (rp->quirks & rqRhineI)
1072 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 }
1074 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1075
1076}
1077
1078static void free_tbufs(struct net_device* dev)
1079{
1080 struct rhine_private *rp = netdev_priv(dev);
1081 int i;
1082
1083 for (i = 0; i < TX_RING_SIZE; i++) {
1084 rp->tx_ring[i].tx_status = 0;
1085 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1086 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1087 if (rp->tx_skbuff[i]) {
1088 if (rp->tx_skbuff_dma[i]) {
1089 pci_unmap_single(rp->pdev,
1090 rp->tx_skbuff_dma[i],
1091 rp->tx_skbuff[i]->len,
1092 PCI_DMA_TODEVICE);
1093 }
1094 dev_kfree_skb(rp->tx_skbuff[i]);
1095 }
1096 rp->tx_skbuff[i] = NULL;
1097 rp->tx_buf[i] = NULL;
1098 }
1099}
1100
1101static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1102{
1103 struct rhine_private *rp = netdev_priv(dev);
1104 void __iomem *ioaddr = rp->base;
1105
1106 mii_check_media(&rp->mii_if, debug, init_media);
1107
1108 if (rp->mii_if.full_duplex)
1109 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1110 ioaddr + ChipCmd1);
1111 else
1112 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1113 ioaddr + ChipCmd1);
Roger Luethi00b428c2006-03-28 20:53:56 +02001114 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001115 netdev_info(dev, "force_media %d, carrier %d\n",
1116 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001117}
1118
1119/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001120static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001121{
1122 if (mii->force_media) {
1123 /* autoneg is off: Link is always assumed to be up */
1124 if (!netif_carrier_ok(mii->dev))
1125 netif_carrier_on(mii->dev);
1126 }
1127 else /* Let MMI library update carrier status */
1128 rhine_check_media(mii->dev, 0);
1129 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001130 netdev_info(mii->dev, "force_media %d, carrier %d\n",
1131 mii->force_media, netif_carrier_ok(mii->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132}
1133
Roger Luethi38f49e82010-12-06 00:59:40 +00001134/**
1135 * rhine_set_cam - set CAM multicast filters
1136 * @ioaddr: register block of this Rhine
1137 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1138 * @addr: multicast address (6 bytes)
1139 *
1140 * Load addresses into multicast filters.
1141 */
1142static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1143{
1144 int i;
1145
1146 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1147 wmb();
1148
1149 /* Paranoid -- idx out of range should never happen */
1150 idx &= (MCAM_SIZE - 1);
1151
1152 iowrite8((u8) idx, ioaddr + CamAddr);
1153
1154 for (i = 0; i < 6; i++, addr++)
1155 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1156 udelay(10);
1157 wmb();
1158
1159 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1160 udelay(10);
1161
1162 iowrite8(0, ioaddr + CamCon);
1163}
1164
1165/**
1166 * rhine_set_vlan_cam - set CAM VLAN filters
1167 * @ioaddr: register block of this Rhine
1168 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1169 * @addr: VLAN ID (2 bytes)
1170 *
1171 * Load addresses into VLAN filters.
1172 */
1173static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1174{
1175 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1176 wmb();
1177
1178 /* Paranoid -- idx out of range should never happen */
1179 idx &= (VCAM_SIZE - 1);
1180
1181 iowrite8((u8) idx, ioaddr + CamAddr);
1182
1183 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1184 udelay(10);
1185 wmb();
1186
1187 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1188 udelay(10);
1189
1190 iowrite8(0, ioaddr + CamCon);
1191}
1192
1193/**
1194 * rhine_set_cam_mask - set multicast CAM mask
1195 * @ioaddr: register block of this Rhine
1196 * @mask: multicast CAM mask
1197 *
1198 * Mask sets multicast filters active/inactive.
1199 */
1200static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1201{
1202 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1203 wmb();
1204
1205 /* write mask */
1206 iowrite32(mask, ioaddr + CamMask);
1207
1208 /* disable CAMEN */
1209 iowrite8(0, ioaddr + CamCon);
1210}
1211
1212/**
1213 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1214 * @ioaddr: register block of this Rhine
1215 * @mask: VLAN CAM mask
1216 *
1217 * Mask sets VLAN filters active/inactive.
1218 */
1219static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1220{
1221 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1222 wmb();
1223
1224 /* write mask */
1225 iowrite32(mask, ioaddr + CamMask);
1226
1227 /* disable CAMEN */
1228 iowrite8(0, ioaddr + CamCon);
1229}
1230
1231/**
1232 * rhine_init_cam_filter - initialize CAM filters
1233 * @dev: network device
1234 *
1235 * Initialize (disable) hardware VLAN and multicast support on this
1236 * Rhine.
1237 */
1238static void rhine_init_cam_filter(struct net_device *dev)
1239{
1240 struct rhine_private *rp = netdev_priv(dev);
1241 void __iomem *ioaddr = rp->base;
1242
1243 /* Disable all CAMs */
1244 rhine_set_vlan_cam_mask(ioaddr, 0);
1245 rhine_set_cam_mask(ioaddr, 0);
1246
1247 /* disable hardware VLAN support */
1248 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1249 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1250}
1251
1252/**
1253 * rhine_update_vcam - update VLAN CAM filters
1254 * @rp: rhine_private data of this Rhine
1255 *
1256 * Update VLAN CAM filters to match configuration change.
1257 */
1258static void rhine_update_vcam(struct net_device *dev)
1259{
1260 struct rhine_private *rp = netdev_priv(dev);
1261 void __iomem *ioaddr = rp->base;
1262 u16 vid;
1263 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1264 unsigned int i = 0;
1265
1266 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1267 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1268 vCAMmask |= 1 << i;
1269 if (++i >= VCAM_SIZE)
1270 break;
1271 }
1272 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1273}
1274
Jiri Pirko8e586132011-12-08 19:52:37 -05001275static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001276{
1277 struct rhine_private *rp = netdev_priv(dev);
1278
1279 spin_lock_irq(&rp->lock);
1280 set_bit(vid, rp->active_vlans);
1281 rhine_update_vcam(dev);
1282 spin_unlock_irq(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001283 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001284}
1285
Jiri Pirko8e586132011-12-08 19:52:37 -05001286static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001287{
1288 struct rhine_private *rp = netdev_priv(dev);
1289
1290 spin_lock_irq(&rp->lock);
1291 clear_bit(vid, rp->active_vlans);
1292 rhine_update_vcam(dev);
1293 spin_unlock_irq(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001294 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001295}
1296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297static void init_registers(struct net_device *dev)
1298{
1299 struct rhine_private *rp = netdev_priv(dev);
1300 void __iomem *ioaddr = rp->base;
1301 int i;
1302
1303 for (i = 0; i < 6; i++)
1304 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1305
1306 /* Initialize other registers. */
1307 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1308 /* Configure initial FIFO thresholds. */
1309 iowrite8(0x20, ioaddr + TxConfig);
1310 rp->tx_thresh = 0x20;
1311 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1312
1313 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1314 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1315
1316 rhine_set_rx_mode(dev);
1317
Roger Luethi38f49e82010-12-06 00:59:40 +00001318 if (rp->pdev->revision >= VT6105M)
1319 rhine_init_cam_filter(dev);
1320
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001321 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001322
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 /* Enable interrupts by setting the interrupt mask. */
1324 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1325 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1326 IntrTxDone | IntrTxError | IntrTxUnderrun |
1327 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1328 ioaddr + IntrEnable);
1329
1330 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1331 ioaddr + ChipCmd);
1332 rhine_check_media(dev, 1);
1333}
1334
1335/* Enable MII link status auto-polling (required for IntrLinkChange) */
1336static void rhine_enable_linkmon(void __iomem *ioaddr)
1337{
1338 iowrite8(0, ioaddr + MIICmd);
1339 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1340 iowrite8(0x80, ioaddr + MIICmd);
1341
1342 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1343
1344 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1345}
1346
1347/* Disable MII link status auto-polling (required for MDIO access) */
1348static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1349{
1350 iowrite8(0, ioaddr + MIICmd);
1351
1352 if (quirks & rqRhineI) {
1353 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1354
John W. Linville38bb6b22006-05-19 10:51:21 -04001355 /* Can be called from ISR. Evil. */
1356 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358 /* 0x80 must be set immediately before turning it off */
1359 iowrite8(0x80, ioaddr + MIICmd);
1360
1361 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1362
1363 /* Heh. Now clear 0x80 again. */
1364 iowrite8(0, ioaddr + MIICmd);
1365 }
1366 else
1367 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1368}
1369
1370/* Read and write over the MII Management Data I/O (MDIO) interface. */
1371
1372static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1373{
1374 struct rhine_private *rp = netdev_priv(dev);
1375 void __iomem *ioaddr = rp->base;
1376 int result;
1377
1378 rhine_disable_linkmon(ioaddr, rp->quirks);
1379
1380 /* rhine_disable_linkmon already cleared MIICmd */
1381 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1382 iowrite8(regnum, ioaddr + MIIRegAddr);
1383 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1384 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1385 result = ioread16(ioaddr + MIIData);
1386
1387 rhine_enable_linkmon(ioaddr);
1388 return result;
1389}
1390
1391static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1392{
1393 struct rhine_private *rp = netdev_priv(dev);
1394 void __iomem *ioaddr = rp->base;
1395
1396 rhine_disable_linkmon(ioaddr, rp->quirks);
1397
1398 /* rhine_disable_linkmon already cleared MIICmd */
1399 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1400 iowrite8(regnum, ioaddr + MIIRegAddr);
1401 iowrite16(value, ioaddr + MIIData);
1402 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1403 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1404
1405 rhine_enable_linkmon(ioaddr);
1406}
1407
1408static int rhine_open(struct net_device *dev)
1409{
1410 struct rhine_private *rp = netdev_priv(dev);
1411 void __iomem *ioaddr = rp->base;
1412 int rc;
1413
Julia Lawall76781382009-11-18 08:23:53 +00001414 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 dev);
1416 if (rc)
1417 return rc;
1418
1419 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001420 netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422 rc = alloc_ring(dev);
1423 if (rc) {
1424 free_irq(rp->pdev->irq, dev);
1425 return rc;
1426 }
1427 alloc_rbufs(dev);
1428 alloc_tbufs(dev);
1429 rhine_chip_reset(dev);
1430 init_registers(dev);
1431 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001432 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
1433 __func__, ioread16(ioaddr + ChipCmd),
1434 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436 netif_start_queue(dev);
1437
1438 return 0;
1439}
1440
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001441static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001443 struct rhine_private *rp = container_of(work, struct rhine_private,
1444 reset_task);
1445 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
1447 /* protect against concurrent rx interrupts */
1448 disable_irq(rp->pdev->irq);
1449
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001450 napi_disable(&rp->napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001451
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001452 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
1454 /* clear all descriptors */
1455 free_tbufs(dev);
1456 free_rbufs(dev);
1457 alloc_tbufs(dev);
1458 alloc_rbufs(dev);
1459
1460 /* Reinitialize the hardware. */
1461 rhine_chip_reset(dev);
1462 init_registers(dev);
1463
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001464 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 enable_irq(rp->pdev->irq);
1466
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001467 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001468 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 netif_wake_queue(dev);
1470}
1471
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001472static void rhine_tx_timeout(struct net_device *dev)
1473{
1474 struct rhine_private *rp = netdev_priv(dev);
1475 void __iomem *ioaddr = rp->base;
1476
Joe Perchesdf4511f2011-04-16 14:15:25 +00001477 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1478 ioread16(ioaddr + IntrStatus),
1479 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001480
1481 schedule_work(&rp->reset_task);
1482}
1483
Stephen Hemminger613573252009-08-31 19:50:58 +00001484static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1485 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486{
1487 struct rhine_private *rp = netdev_priv(dev);
1488 void __iomem *ioaddr = rp->base;
1489 unsigned entry;
Dongdong Deng22580f82009-08-13 19:12:31 +00001490 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 /* Caution: the write order is important here, set the field
1493 with the "ownership" bits last. */
1494
1495 /* Calculate the next Tx descriptor entry. */
1496 entry = rp->cur_tx % TX_RING_SIZE;
1497
Herbert Xu5b057c62006-06-23 02:06:41 -07001498 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001499 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 rp->tx_skbuff[entry] = skb;
1502
1503 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001504 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 /* Must use alignment buffer. */
1506 if (skb->len > PKT_BUF_SZ) {
1507 /* packet too long, drop it */
1508 dev_kfree_skb(skb);
1509 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001510 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001511 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001513
1514 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001516 if (skb->len < ETH_ZLEN)
1517 memset(rp->tx_buf[entry] + skb->len, 0,
1518 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 rp->tx_skbuff_dma[entry] = 0;
1520 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1521 (rp->tx_buf[entry] -
1522 rp->tx_bufs));
1523 } else {
1524 rp->tx_skbuff_dma[entry] =
1525 pci_map_single(rp->pdev, skb->data, skb->len,
1526 PCI_DMA_TODEVICE);
1527 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1528 }
1529
1530 rp->tx_ring[entry].desc_length =
1531 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1532
Roger Luethi38f49e82010-12-06 00:59:40 +00001533 if (unlikely(vlan_tx_tag_present(skb))) {
1534 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1535 /* request tagging */
1536 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1537 }
1538 else
1539 rp->tx_ring[entry].tx_status = 0;
1540
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 /* lock eth irq */
Dongdong Deng22580f82009-08-13 19:12:31 +00001542 spin_lock_irqsave(&rp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001544 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 wmb();
1546
1547 rp->cur_tx++;
1548
1549 /* Non-x86 Todo: explicitly flush cache lines here. */
1550
Roger Luethi38f49e82010-12-06 00:59:40 +00001551 if (vlan_tx_tag_present(skb))
1552 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1553 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 /* Wake the potentially-idle transmit channel */
1556 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1557 ioaddr + ChipCmd1);
1558 IOSYNC;
1559
1560 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1561 netif_stop_queue(dev);
1562
Dongdong Deng22580f82009-08-13 19:12:31 +00001563 spin_unlock_irqrestore(&rp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
1565 if (debug > 4) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001566 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1567 rp->cur_tx-1, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 }
Patrick McHardy6ed10652009-06-23 06:03:08 +00001569 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570}
1571
1572/* The interrupt handler does all of the Rx thread work and cleans up
1573 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001574static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
1576 struct net_device *dev = dev_instance;
1577 struct rhine_private *rp = netdev_priv(dev);
1578 void __iomem *ioaddr = rp->base;
1579 u32 intr_status;
1580 int boguscnt = max_interrupt_work;
1581 int handled = 0;
1582
1583 while ((intr_status = get_intr_status(dev))) {
1584 handled = 1;
1585
1586 /* Acknowledge all of the current interrupt sources ASAP. */
1587 if (intr_status & IntrTxDescRace)
1588 iowrite8(0x08, ioaddr + IntrStatus2);
1589 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1590 IOSYNC;
1591
1592 if (debug > 4)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001593 netdev_dbg(dev, "Interrupt, status %08x\n",
1594 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
1596 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
Roger Luethi633949a2006-08-14 23:00:17 -07001597 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
Roger Luethi633949a2006-08-14 23:00:17 -07001598 iowrite16(IntrTxAborted |
1599 IntrTxDone | IntrTxError | IntrTxUnderrun |
1600 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1601 ioaddr + IntrEnable);
1602
Ben Hutchings288379f2009-01-19 16:43:59 -08001603 napi_schedule(&rp->napi);
Roger Luethi633949a2006-08-14 23:00:17 -07001604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1607 if (intr_status & IntrTxErrSummary) {
1608 /* Avoid scavenging before Tx engine turned off */
1609 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1610 if (debug > 2 &&
1611 ioread8(ioaddr+ChipCmd) & CmdTxOn)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001612 netdev_warn(dev,
1613 "%s: Tx engine still on\n",
1614 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 }
1616 rhine_tx(dev);
1617 }
1618
1619 /* Abnormal error summary/uncommon events handlers. */
1620 if (intr_status & (IntrPCIErr | IntrLinkChange |
1621 IntrStatsMax | IntrTxError | IntrTxAborted |
1622 IntrTxUnderrun | IntrTxDescRace))
1623 rhine_error(dev, intr_status);
1624
1625 if (--boguscnt < 0) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001626 netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
1627 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 break;
1629 }
1630 }
1631
1632 if (debug > 3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001633 netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1634 ioread16(ioaddr + IntrStatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 return IRQ_RETVAL(handled);
1636}
1637
1638/* This routine is logically part of the interrupt handler, but isolated
1639 for clarity. */
1640static void rhine_tx(struct net_device *dev)
1641{
1642 struct rhine_private *rp = netdev_priv(dev);
1643 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1644
1645 spin_lock(&rp->lock);
1646
1647 /* find and cleanup dirty tx descriptors */
1648 while (rp->dirty_tx != rp->cur_tx) {
1649 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1650 if (debug > 6)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001651 netdev_dbg(dev, "Tx scavenge %d status %08x\n",
1652 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 if (txstatus & DescOwn)
1654 break;
1655 if (txstatus & 0x8000) {
1656 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001657 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1658 txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001659 dev->stats.tx_errors++;
1660 if (txstatus & 0x0400)
1661 dev->stats.tx_carrier_errors++;
1662 if (txstatus & 0x0200)
1663 dev->stats.tx_window_errors++;
1664 if (txstatus & 0x0100)
1665 dev->stats.tx_aborted_errors++;
1666 if (txstatus & 0x0080)
1667 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1669 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001670 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1672 break; /* Keep the skb - we try again */
1673 }
1674 /* Transmitter restarted in 'abnormal' handler. */
1675 } else {
1676 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001677 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001679 dev->stats.collisions += txstatus & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 if (debug > 6)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001681 netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
1682 (txstatus >> 3) & 0xF,
1683 txstatus & 0xF);
Eric Dumazet553e2332009-05-27 10:34:50 +00001684 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1685 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 }
1687 /* Free the original skb. */
1688 if (rp->tx_skbuff_dma[entry]) {
1689 pci_unmap_single(rp->pdev,
1690 rp->tx_skbuff_dma[entry],
1691 rp->tx_skbuff[entry]->len,
1692 PCI_DMA_TODEVICE);
1693 }
1694 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1695 rp->tx_skbuff[entry] = NULL;
1696 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1697 }
1698 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1699 netif_wake_queue(dev);
1700
1701 spin_unlock(&rp->lock);
1702}
1703
Roger Luethi38f49e82010-12-06 00:59:40 +00001704/**
1705 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1706 * @skb: pointer to sk_buff
1707 * @data_size: used data area of the buffer including CRC
1708 *
1709 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1710 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1711 * aligned following the CRC.
1712 */
1713static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1714{
1715 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00001716 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00001717}
1718
Roger Luethi633949a2006-08-14 23:00:17 -07001719/* Process up to limit frames from receive ring */
1720static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721{
1722 struct rhine_private *rp = netdev_priv(dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001723 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
1726 if (debug > 4) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001727 netdev_dbg(dev, "%s(), entry %d status %08x\n",
1728 __func__, entry,
1729 le32_to_cpu(rp->rx_head_desc->rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 }
1731
1732 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001733 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 struct rx_desc *desc = rp->rx_head_desc;
1735 u32 desc_status = le32_to_cpu(desc->rx_status);
Roger Luethi38f49e82010-12-06 00:59:40 +00001736 u32 desc_length = le32_to_cpu(desc->desc_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 int data_size = desc_status >> 16;
1738
Roger Luethi633949a2006-08-14 23:00:17 -07001739 if (desc_status & DescOwn)
1740 break;
1741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 if (debug > 4)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001743 netdev_dbg(dev, "%s() status is %08x\n",
1744 __func__, desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001745
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1747 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001748 netdev_warn(dev,
1749 "Oversized Ethernet frame spanned multiple buffers, "
1750 "entry %#x length %d status %08x!\n",
1751 entry, data_size,
1752 desc_status);
1753 netdev_warn(dev,
1754 "Oversized Ethernet frame %p vs %p\n",
1755 rp->rx_head_desc,
1756 &rp->rx_ring[entry]);
Eric Dumazet553e2332009-05-27 10:34:50 +00001757 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 } else if (desc_status & RxErr) {
1759 /* There was a error. */
1760 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001761 netdev_dbg(dev, "%s() Rx error was %08x\n",
1762 __func__, desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001763 dev->stats.rx_errors++;
1764 if (desc_status & 0x0030)
1765 dev->stats.rx_length_errors++;
1766 if (desc_status & 0x0048)
1767 dev->stats.rx_fifo_errors++;
1768 if (desc_status & 0x0004)
1769 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 if (desc_status & 0x0002) {
1771 /* this can also be updated outside the interrupt handler */
1772 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00001773 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 spin_unlock(&rp->lock);
1775 }
1776 }
1777 } else {
Eric Dumazet89d71a62009-10-13 05:34:20 +00001778 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 /* Length should omit the CRC */
1780 int pkt_len = data_size - 4;
Roger Luethi38f49e82010-12-06 00:59:40 +00001781 u16 vlan_tci = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783 /* Check if the packet is long enough to accept without
1784 copying to a minimally-sized skbuff. */
Eric Dumazet89d71a62009-10-13 05:34:20 +00001785 if (pkt_len < rx_copybreak)
1786 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1787 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 pci_dma_sync_single_for_cpu(rp->pdev,
1789 rp->rx_skbuff_dma[entry],
1790 rp->rx_buf_sz,
1791 PCI_DMA_FROMDEVICE);
1792
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001793 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07001794 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001795 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 skb_put(skb, pkt_len);
1797 pci_dma_sync_single_for_device(rp->pdev,
1798 rp->rx_skbuff_dma[entry],
1799 rp->rx_buf_sz,
1800 PCI_DMA_FROMDEVICE);
1801 } else {
1802 skb = rp->rx_skbuff[entry];
1803 if (skb == NULL) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001804 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 break;
1806 }
1807 rp->rx_skbuff[entry] = NULL;
1808 skb_put(skb, pkt_len);
1809 pci_unmap_single(rp->pdev,
1810 rp->rx_skbuff_dma[entry],
1811 rp->rx_buf_sz,
1812 PCI_DMA_FROMDEVICE);
1813 }
Roger Luethi38f49e82010-12-06 00:59:40 +00001814
1815 if (unlikely(desc_length & DescTag))
1816 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00001819
1820 if (unlikely(desc_length & DescTag))
1821 __vlan_hwaccel_put_tag(skb, vlan_tci);
Roger Luethi633949a2006-08-14 23:00:17 -07001822 netif_receive_skb(skb);
Eric Dumazet553e2332009-05-27 10:34:50 +00001823 dev->stats.rx_bytes += pkt_len;
1824 dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 }
1826 entry = (++rp->cur_rx) % RX_RING_SIZE;
1827 rp->rx_head_desc = &rp->rx_ring[entry];
1828 }
1829
1830 /* Refill the Rx ring buffers. */
1831 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1832 struct sk_buff *skb;
1833 entry = rp->dirty_rx % RX_RING_SIZE;
1834 if (rp->rx_skbuff[entry] == NULL) {
Kevin Lob26b5552008-08-27 11:35:09 +08001835 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 rp->rx_skbuff[entry] = skb;
1837 if (skb == NULL)
1838 break; /* Better luck next round. */
1839 skb->dev = dev; /* Mark as being used by this device. */
1840 rp->rx_skbuff_dma[entry] =
David S. Miller689be432005-06-28 15:25:31 -07001841 pci_map_single(rp->pdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 rp->rx_buf_sz,
1843 PCI_DMA_FROMDEVICE);
1844 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1845 }
1846 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1847 }
Roger Luethi633949a2006-08-14 23:00:17 -07001848
1849 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850}
1851
1852/*
1853 * Clears the "tally counters" for CRC errors and missed frames(?).
1854 * It has been reported that some chips need a write of 0 to clear
1855 * these, for others the counters are set to 1 when written to and
1856 * instead cleared when read. So we clear them both ways ...
1857 */
1858static inline void clear_tally_counters(void __iomem *ioaddr)
1859{
1860 iowrite32(0, ioaddr + RxMissed);
1861 ioread16(ioaddr + RxCRCErrs);
1862 ioread16(ioaddr + RxMissed);
1863}
1864
1865static void rhine_restart_tx(struct net_device *dev) {
1866 struct rhine_private *rp = netdev_priv(dev);
1867 void __iomem *ioaddr = rp->base;
1868 int entry = rp->dirty_tx % TX_RING_SIZE;
1869 u32 intr_status;
1870
1871 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001872 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 * In that case the ISR will be back here RSN anyway.
1874 */
1875 intr_status = get_intr_status(dev);
1876
1877 if ((intr_status & IntrTxErrSummary) == 0) {
1878
1879 /* We know better than the chip where it should continue. */
1880 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1881 ioaddr + TxRingPtr);
1882
1883 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1884 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00001885
1886 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1887 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1888 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1891 ioaddr + ChipCmd1);
1892 IOSYNC;
1893 }
1894 else {
1895 /* This should never happen */
1896 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001897 netdev_warn(dev, "%s() Another error occurred %08x\n",
1898 __func__, intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 }
1900
1901}
1902
1903static void rhine_error(struct net_device *dev, int intr_status)
1904{
1905 struct rhine_private *rp = netdev_priv(dev);
1906 void __iomem *ioaddr = rp->base;
1907
1908 spin_lock(&rp->lock);
1909
1910 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04001911 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 if (intr_status & IntrStatsMax) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001913 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1914 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 clear_tally_counters(ioaddr);
1916 }
1917 if (intr_status & IntrTxAborted) {
1918 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001919 netdev_info(dev, "Abort %08x, frame dropped\n",
1920 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 }
1922 if (intr_status & IntrTxUnderrun) {
Francois Romieu269f3112011-12-30 14:43:54 +01001923 rhine_kick_tx_threshold(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001925 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1926 rp->tx_thresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 }
1928 if (intr_status & IntrTxDescRace) {
1929 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001930 netdev_info(dev, "Tx descriptor write-back race\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 }
1932 if ((intr_status & IntrTxError) &&
1933 (intr_status & (IntrTxAborted |
1934 IntrTxUnderrun | IntrTxDescRace)) == 0) {
Francois Romieu269f3112011-12-30 14:43:54 +01001935 rhine_kick_tx_threshold(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001937 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1938 rp->tx_thresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 }
1940 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1941 IntrTxError))
1942 rhine_restart_tx(dev);
1943
1944 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1945 IntrTxError | IntrTxAborted | IntrNormalSummary |
1946 IntrTxDescRace)) {
1947 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001948 netdev_err(dev, "Something Wicked happened! %08x\n",
1949 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 }
1951
1952 spin_unlock(&rp->lock);
1953}
1954
1955static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1956{
1957 struct rhine_private *rp = netdev_priv(dev);
1958 void __iomem *ioaddr = rp->base;
1959 unsigned long flags;
1960
1961 spin_lock_irqsave(&rp->lock, flags);
Eric Dumazet553e2332009-05-27 10:34:50 +00001962 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1963 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 clear_tally_counters(ioaddr);
1965 spin_unlock_irqrestore(&rp->lock, flags);
1966
Eric Dumazet553e2332009-05-27 10:34:50 +00001967 return &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968}
1969
1970static void rhine_set_rx_mode(struct net_device *dev)
1971{
1972 struct rhine_private *rp = netdev_priv(dev);
1973 void __iomem *ioaddr = rp->base;
1974 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00001975 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
1976 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 rx_mode = 0x1C;
1980 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1981 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001982 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00001983 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 /* Too many to match, or accept all multicasts. */
1985 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1986 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Roger Luethi38f49e82010-12-06 00:59:40 +00001987 } else if (rp->pdev->revision >= VT6105M) {
1988 int i = 0;
1989 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
1990 netdev_for_each_mc_addr(ha, dev) {
1991 if (i == MCAM_SIZE)
1992 break;
1993 rhine_set_cam(ioaddr, i, ha->addr);
1994 mCAMmask |= 1 << i;
1995 i++;
1996 }
1997 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00002000 netdev_for_each_mc_addr(ha, dev) {
2001 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2004 }
2005 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2006 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002008 /* enable/disable VLAN receive filtering */
2009 if (rp->pdev->revision >= VT6105M) {
2010 if (dev->flags & IFF_PROMISC)
2011 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2012 else
2013 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2014 }
2015 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016}
2017
2018static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2019{
2020 struct rhine_private *rp = netdev_priv(dev);
2021
Rick Jones23020ab2011-11-09 09:58:07 +00002022 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2023 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2024 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025}
2026
2027static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2028{
2029 struct rhine_private *rp = netdev_priv(dev);
2030 int rc;
2031
2032 spin_lock_irq(&rp->lock);
2033 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2034 spin_unlock_irq(&rp->lock);
2035
2036 return rc;
2037}
2038
2039static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2040{
2041 struct rhine_private *rp = netdev_priv(dev);
2042 int rc;
2043
2044 spin_lock_irq(&rp->lock);
2045 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2046 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02002047 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
2049 return rc;
2050}
2051
2052static int netdev_nway_reset(struct net_device *dev)
2053{
2054 struct rhine_private *rp = netdev_priv(dev);
2055
2056 return mii_nway_restart(&rp->mii_if);
2057}
2058
2059static u32 netdev_get_link(struct net_device *dev)
2060{
2061 struct rhine_private *rp = netdev_priv(dev);
2062
2063 return mii_link_ok(&rp->mii_if);
2064}
2065
2066static u32 netdev_get_msglevel(struct net_device *dev)
2067{
2068 return debug;
2069}
2070
2071static void netdev_set_msglevel(struct net_device *dev, u32 value)
2072{
2073 debug = value;
2074}
2075
2076static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2077{
2078 struct rhine_private *rp = netdev_priv(dev);
2079
2080 if (!(rp->quirks & rqWOL))
2081 return;
2082
2083 spin_lock_irq(&rp->lock);
2084 wol->supported = WAKE_PHY | WAKE_MAGIC |
2085 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2086 wol->wolopts = rp->wolopts;
2087 spin_unlock_irq(&rp->lock);
2088}
2089
2090static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2091{
2092 struct rhine_private *rp = netdev_priv(dev);
2093 u32 support = WAKE_PHY | WAKE_MAGIC |
2094 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2095
2096 if (!(rp->quirks & rqWOL))
2097 return -EINVAL;
2098
2099 if (wol->wolopts & ~support)
2100 return -EINVAL;
2101
2102 spin_lock_irq(&rp->lock);
2103 rp->wolopts = wol->wolopts;
2104 spin_unlock_irq(&rp->lock);
2105
2106 return 0;
2107}
2108
Jeff Garzik7282d492006-09-13 14:30:00 -04002109static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 .get_drvinfo = netdev_get_drvinfo,
2111 .get_settings = netdev_get_settings,
2112 .set_settings = netdev_set_settings,
2113 .nway_reset = netdev_nway_reset,
2114 .get_link = netdev_get_link,
2115 .get_msglevel = netdev_get_msglevel,
2116 .set_msglevel = netdev_set_msglevel,
2117 .get_wol = rhine_get_wol,
2118 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119};
2120
2121static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2122{
2123 struct rhine_private *rp = netdev_priv(dev);
2124 int rc;
2125
2126 if (!netif_running(dev))
2127 return -EINVAL;
2128
2129 spin_lock_irq(&rp->lock);
2130 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2131 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02002132 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
2134 return rc;
2135}
2136
2137static int rhine_close(struct net_device *dev)
2138{
2139 struct rhine_private *rp = netdev_priv(dev);
2140 void __iomem *ioaddr = rp->base;
2141
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002142 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002143 cancel_work_sync(&rp->reset_task);
2144 netif_stop_queue(dev);
2145
2146 spin_lock_irq(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
2148 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002149 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2150 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 /* Switch to loopback mode to avoid hardware races. */
2153 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2154
2155 /* Disable interrupts by clearing the interrupt mask. */
2156 iowrite16(0x0000, ioaddr + IntrEnable);
2157
2158 /* Stop the chip's Tx and Rx processes. */
2159 iowrite16(CmdStop, ioaddr + ChipCmd);
2160
2161 spin_unlock_irq(&rp->lock);
2162
2163 free_irq(rp->pdev->irq, dev);
2164 free_rbufs(dev);
2165 free_tbufs(dev);
2166 free_ring(dev);
2167
2168 return 0;
2169}
2170
2171
2172static void __devexit rhine_remove_one(struct pci_dev *pdev)
2173{
2174 struct net_device *dev = pci_get_drvdata(pdev);
2175 struct rhine_private *rp = netdev_priv(dev);
2176
2177 unregister_netdev(dev);
2178
2179 pci_iounmap(pdev, rp->base);
2180 pci_release_regions(pdev);
2181
2182 free_netdev(dev);
2183 pci_disable_device(pdev);
2184 pci_set_drvdata(pdev, NULL);
2185}
2186
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002187static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 struct net_device *dev = pci_get_drvdata(pdev);
2190 struct rhine_private *rp = netdev_priv(dev);
2191 void __iomem *ioaddr = rp->base;
2192
2193 if (!(rp->quirks & rqWOL))
2194 return; /* Nothing to do for non-WOL adapters */
2195
2196 rhine_power_init(dev);
2197
2198 /* Make sure we use pattern 0, 1 and not 4, 5 */
2199 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002200 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
2202 if (rp->wolopts & WAKE_MAGIC) {
2203 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2204 /*
2205 * Turn EEPROM-controlled wake-up back on -- some hardware may
2206 * not cooperate otherwise.
2207 */
2208 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2209 }
2210
2211 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2212 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2213
2214 if (rp->wolopts & WAKE_PHY)
2215 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2216
2217 if (rp->wolopts & WAKE_UCAST)
2218 iowrite8(WOLucast, ioaddr + WOLcrSet);
2219
2220 if (rp->wolopts) {
2221 /* Enable legacy WOL (for old motherboards) */
2222 iowrite8(0x01, ioaddr + PwcfgSet);
2223 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2224 }
2225
2226 /* Hit power state D3 (sleep) */
Roger Luethib933b4d2006-08-14 23:00:21 -07002227 if (!avoid_D3)
2228 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
2230 /* TODO: Check use of pci_enable_wake() */
2231
2232}
2233
2234#ifdef CONFIG_PM
2235static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2236{
2237 struct net_device *dev = pci_get_drvdata(pdev);
2238 struct rhine_private *rp = netdev_priv(dev);
2239 unsigned long flags;
2240
2241 if (!netif_running(dev))
2242 return 0;
2243
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002244 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002245
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 netif_device_detach(dev);
2247 pci_save_state(pdev);
2248
2249 spin_lock_irqsave(&rp->lock, flags);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002250 rhine_shutdown(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 spin_unlock_irqrestore(&rp->lock, flags);
2252
2253 free_irq(dev->irq, dev);
2254 return 0;
2255}
2256
2257static int rhine_resume(struct pci_dev *pdev)
2258{
2259 struct net_device *dev = pci_get_drvdata(pdev);
2260 struct rhine_private *rp = netdev_priv(dev);
2261 unsigned long flags;
2262 int ret;
2263
2264 if (!netif_running(dev))
2265 return 0;
2266
Roger Luethi38f49e82010-12-06 00:59:40 +00002267 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
Joe Perchesdf4511f2011-04-16 14:15:25 +00002268 netdev_err(dev, "request_irq failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
2270 ret = pci_set_power_state(pdev, PCI_D0);
2271 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002272 netdev_info(dev, "Entering power state D0 %s (%d)\n",
2273 ret ? "failed" : "succeeded", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
2275 pci_restore_state(pdev);
2276
2277 spin_lock_irqsave(&rp->lock, flags);
2278#ifdef USE_MMIO
2279 enable_mmio(rp->pioaddr, rp->quirks);
2280#endif
2281 rhine_power_init(dev);
2282 free_tbufs(dev);
2283 free_rbufs(dev);
2284 alloc_tbufs(dev);
2285 alloc_rbufs(dev);
2286 init_registers(dev);
2287 spin_unlock_irqrestore(&rp->lock, flags);
2288
2289 netif_device_attach(dev);
2290
2291 return 0;
2292}
2293#endif /* CONFIG_PM */
2294
2295static struct pci_driver rhine_driver = {
2296 .name = DRV_NAME,
2297 .id_table = rhine_pci_tbl,
2298 .probe = rhine_init_one,
2299 .remove = __devexit_p(rhine_remove_one),
2300#ifdef CONFIG_PM
2301 .suspend = rhine_suspend,
2302 .resume = rhine_resume,
2303#endif /* CONFIG_PM */
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002304 .shutdown = rhine_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305};
2306
Roger Luethie84df482007-03-06 19:57:37 +01002307static struct dmi_system_id __initdata rhine_dmi_table[] = {
2308 {
2309 .ident = "EPIA-M",
2310 .matches = {
2311 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2312 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2313 },
2314 },
2315 {
2316 .ident = "KV7",
2317 .matches = {
2318 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2319 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2320 },
2321 },
2322 { NULL }
2323};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
2325static int __init rhine_init(void)
2326{
2327/* when a module, this is printed whether or not devices are found in probe */
2328#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002329 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330#endif
Roger Luethie84df482007-03-06 19:57:37 +01002331 if (dmi_check_system(rhine_dmi_table)) {
2332 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002333 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002334 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002335 }
2336 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002337 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002338
Jeff Garzik29917622006-08-19 17:48:59 -04002339 return pci_register_driver(&rhine_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340}
2341
2342
2343static void __exit rhine_cleanup(void)
2344{
2345 pci_unregister_driver(&rhine_driver);
2346}
2347
2348
2349module_init(rhine_init);
2350module_exit(rhine_cleanup);