blob: 5c4983b2870a9c7a5dd7adefba08a8c054671a9b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi38f49e82010-12-06 00:59:40 +000035#define DRV_VERSION "1.5.0"
36#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
42
Joe Perchesdf4511f2011-04-16 14:15:25 +000043#define DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
45static int max_interrupt_work = 20;
46
47/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
48 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000049#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
50 defined(CONFIG_SPARC) || defined(__ia64__) || \
51 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070052static int rx_copybreak = 1518;
53#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070054static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070055#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Roger Luethib933b4d2006-08-14 23:00:21 -070057/* Work-around for broken BIOSes: they are unable to get the chip back out of
58 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000059static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * In case you are looking for 'options[]' or 'full_duplex[]', they
63 * are gone. Use ethtool(8) instead.
64 */
65
66/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
67 The Rhine has a 64 element 8390-like hash table. */
68static const int multicast_filter_limit = 32;
69
70
71/* Operational parameters that are set at compile time. */
72
73/* Keep the ring sizes a power of two for compile efficiency.
74 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
75 Making the Tx ring too large decreases the effectiveness of channel
76 bonding and packet priority.
77 There are no ill effects from too-large receive rings. */
78#define TX_RING_SIZE 16
79#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070080#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82/* Operational parameters that usually are not changed. */
83
84/* Time in jiffies before concluding the transmitter is hung. */
85#define TX_TIMEOUT (2*HZ)
86
87#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88
89#include <linux/module.h>
90#include <linux/moduleparam.h>
91#include <linux/kernel.h>
92#include <linux/string.h>
93#include <linux/timer.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/interrupt.h>
97#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -040098#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/netdevice.h>
100#include <linux/etherdevice.h>
101#include <linux/skbuff.h>
102#include <linux/init.h>
103#include <linux/delay.h>
104#include <linux/mii.h>
105#include <linux/ethtool.h>
106#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000107#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800109#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <asm/processor.h> /* Processor type for cache alignment. */
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100114#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116/* These identify the driver base version and may not be removed. */
Stephen Hemmingerc8de1fc2009-02-26 10:19:31 +0000117static const char version[] __devinitconst =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000118 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120/* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700134module_param(avoid_D3, bool, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Roger Luethi38f49e82010-12-06 00:59:40 +0000140#define MCAM_SIZE 32
141#define VCAM_SIZE 32
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
144 Theory of Operation
145
146I. Board Compatibility
147
148This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
149controller.
150
151II. Board-specific settings
152
153Boards with this chip are functional only in a bus-master PCI slot.
154
155Many operational settings are loaded from the EEPROM to the Config word at
156offset 0x78. For most of these settings, this driver assumes that they are
157correct.
158If this driver is compiled to use PCI memory space operations the EEPROM
159must be configured to enable memory ops.
160
161III. Driver operation
162
163IIIa. Ring buffers
164
165This driver uses two statically allocated fixed-size descriptor lists
166formed into rings by a branch from the final descriptor to the beginning of
167the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
168
169IIIb/c. Transmit/Receive Structure
170
171This driver attempts to use a zero-copy receive and transmit scheme.
172
173Alas, all data buffers are required to start on a 32 bit boundary, so
174the driver must often copy transmit packets into bounce buffers.
175
176The driver allocates full frame size skbuffs for the Rx ring buffers at
177open() time and passes the skb->data field to the chip as receive data
178buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
179a fresh skbuff is allocated and the frame is copied to the new skbuff.
180When the incoming frame is larger, the skbuff is passed directly up the
181protocol stack. Buffers consumed this way are replaced by newly allocated
182skbuffs in the last phase of rhine_rx().
183
184The RX_COPYBREAK value is chosen to trade-off the memory wasted by
185using a full-sized skbuff for small frames vs. the copying costs of larger
186frames. New boards are typically used in generously configured machines
187and the underfilled buffers have negligible impact compared to the benefit of
188a single allocation size, so the default value of zero results in never
189copying packets. When copying is done, the cost is usually mitigated by using
190a combined copy/checksum routine. Copying also preloads the cache, which is
191most useful with small frames.
192
193Since the VIA chips are only able to transfer data to buffers on 32 bit
194boundaries, the IP header at offset 14 in an ethernet frame isn't
195longword aligned for further processing. Copying these unaligned buffers
196has the beneficial effect of 16-byte aligning the IP header.
197
198IIId. Synchronization
199
200The driver runs as two independent, single-threaded flows of control. One
201is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800202netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
203which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800206netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
207the ring is not available it stops the transmit queue by
208calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210The interrupt handler has exclusive control over the Rx ring and records stats
211from the Tx ring. After reaping the stats, it marks the Tx queue entry as
212empty by incrementing the dirty_tx mark. If at least half of the entries in
213the Rx ring are available the transmit queue is woken up if it was stopped.
214
215IV. Notes
216
217IVb. References
218
219Preliminary VT86C100A manual from http://www.via.com.tw/
220http://www.scyld.com/expert/100mbps.html
221http://www.scyld.com/expert/NWay.html
222ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
223ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
224
225
226IVc. Errata
227
228The VT86C100A manual is not reliable information.
229The 3043 chip does not handle unaligned transmit or receive buffers, resulting
230in significant performance degradation for bounce buffer copies on transmit
231and unaligned IP headers on receive.
232The chip does not pad to minimum transmit length.
233
234*/
235
236
237/* This table drives the PCI probe routines. It's mostly boilerplate in all
238 of the drivers, and will likely be provided by some future kernel.
239 Note the matching code -- the first table entry matchs all 56** cards but
240 second only the 1234 card.
241*/
242
243enum rhine_revs {
244 VT86C100A = 0x00,
245 VTunknown0 = 0x20,
246 VT6102 = 0x40,
247 VT8231 = 0x50, /* Integrated MAC */
248 VT8233 = 0x60, /* Integrated MAC */
249 VT8235 = 0x74, /* Integrated MAC */
250 VT8237 = 0x78, /* Integrated MAC */
251 VTunknown1 = 0x7C,
252 VT6105 = 0x80,
253 VT6105_B0 = 0x83,
254 VT6105L = 0x8A,
255 VT6107 = 0x8C,
256 VTunknown2 = 0x8E,
257 VT6105M = 0x90, /* Management adapter */
258};
259
260enum rhine_quirks {
261 rqWOL = 0x0001, /* Wake-On-LAN support */
262 rqForceReset = 0x0002,
263 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
264 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
265 rqRhineI = 0x0100, /* See comment below */
266};
267/*
268 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269 * MMIO as well as for the collision counter and the Tx FIFO underflow
270 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271 */
272
273/* Beware of PCI posted writes */
274#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
275
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000276static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400277 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
278 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
279 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
280 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 { } /* terminate list */
282};
283MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284
285
286/* Offsets to the device registers. */
287enum register_offsets {
288 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000289 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 IntrStatus=0x0C, IntrEnable=0x0E,
291 MulticastFilter0=0x10, MulticastFilter1=0x14,
292 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000293 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
295 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
296 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
297 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000298 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
300 WOLcrClr1=0xA6, WOLcgClr=0xA7,
301 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
302};
303
304/* Bits in ConfigD */
305enum backoff_bits {
306 BackOptional=0x01, BackModify=0x02,
307 BackCaptureEffect=0x04, BackRandom=0x08
308};
309
Roger Luethi38f49e82010-12-06 00:59:40 +0000310/* Bits in the TxConfig (TCR) register */
311enum tcr_bits {
312 TCR_PQEN=0x01,
313 TCR_LB0=0x02, /* loopback[0] */
314 TCR_LB1=0x04, /* loopback[1] */
315 TCR_OFSET=0x08,
316 TCR_RTGOPT=0x10,
317 TCR_RTFT0=0x20,
318 TCR_RTFT1=0x40,
319 TCR_RTSF=0x80,
320};
321
322/* Bits in the CamCon (CAMC) register */
323enum camcon_bits {
324 CAMC_CAMEN=0x01,
325 CAMC_VCAMSL=0x02,
326 CAMC_CAMWR=0x04,
327 CAMC_CAMRD=0x08,
328};
329
330/* Bits in the PCIBusConfig1 (BCR1) register */
331enum bcr1_bits {
332 BCR1_POT0=0x01,
333 BCR1_POT1=0x02,
334 BCR1_POT2=0x04,
335 BCR1_CTFT0=0x08,
336 BCR1_CTFT1=0x10,
337 BCR1_CTSF=0x20,
338 BCR1_TXQNOBK=0x40, /* for VT6105 */
339 BCR1_VIDFR=0x80, /* for VT6105 */
340 BCR1_MED0=0x40, /* for VT6102 */
341 BCR1_MED1=0x80, /* for VT6102 */
342};
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#ifdef USE_MMIO
345/* Registers we check that mmio and reg are the same. */
346static const int mmio_verify_registers[] = {
347 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
348 0
349};
350#endif
351
352/* Bits in the interrupt status/mask registers. */
353enum intr_status_bits {
354 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
355 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
356 IntrPCIErr=0x0040,
357 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
358 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
359 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
360 IntrRxWakeUp=0x8000,
361 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
362 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
363 IntrTxErrSummary=0x082218,
364};
365
366/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
367enum wol_bits {
368 WOLucast = 0x10,
369 WOLmagic = 0x20,
370 WOLbmcast = 0x30,
371 WOLlnkon = 0x40,
372 WOLlnkoff = 0x80,
373};
374
375/* The Rx and Tx buffer descriptors. */
376struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400377 __le32 rx_status;
378 __le32 desc_length; /* Chain flag, Buffer/frame length */
379 __le32 addr;
380 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381};
382struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400383 __le32 tx_status;
384 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
385 __le32 addr;
386 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387};
388
389/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
390#define TXDESC 0x00e08000
391
392enum rx_status_bits {
393 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
394};
395
396/* Bits in *_desc.*_status */
397enum desc_status_bits {
398 DescOwn=0x80000000
399};
400
Roger Luethi38f49e82010-12-06 00:59:40 +0000401/* Bits in *_desc.*_length */
402enum desc_length_bits {
403 DescTag=0x00010000
404};
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406/* Bits in ChipCmd. */
407enum chip_cmd_bits {
408 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
409 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
410 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
411 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
412};
413
414struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000415 /* Bit mask for configured VLAN ids */
416 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* Descriptor rings */
419 struct rx_desc *rx_ring;
420 struct tx_desc *tx_ring;
421 dma_addr_t rx_ring_dma;
422 dma_addr_t tx_ring_dma;
423
424 /* The addresses of receive-in-place skbuffs. */
425 struct sk_buff *rx_skbuff[RX_RING_SIZE];
426 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
427
428 /* The saved address of a sent-in-place packet/buffer, for later free(). */
429 struct sk_buff *tx_skbuff[TX_RING_SIZE];
430 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
431
Roger Luethi4be5de22006-04-04 20:49:16 +0200432 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 unsigned char *tx_buf[TX_RING_SIZE];
434 unsigned char *tx_bufs;
435 dma_addr_t tx_bufs_dma;
436
437 struct pci_dev *pdev;
438 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700439 struct net_device *dev;
440 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 spinlock_t lock;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800442 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 /* Frequently used values: keep some adjacent for cache effect. */
445 u32 quirks;
446 struct rx_desc *rx_head_desc;
447 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
448 unsigned int cur_tx, dirty_tx;
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 u8 wolopts;
451
452 u8 tx_thresh, rx_thresh;
453
454 struct mii_if_info mii_if;
455 void __iomem *base;
456};
457
Roger Luethi38f49e82010-12-06 00:59:40 +0000458#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
459#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
460#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
461
462#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
463#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
464#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
465
466#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
467#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
468#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
469
470#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
471#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
472#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
473
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475static int mdio_read(struct net_device *dev, int phy_id, int location);
476static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
477static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800478static void rhine_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000480static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
481 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100482static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700484static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485static void rhine_error(struct net_device *dev, int intr_status);
486static void rhine_set_rx_mode(struct net_device *dev);
487static struct net_device_stats *rhine_get_stats(struct net_device *dev);
488static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400489static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490static int rhine_close(struct net_device *dev);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -0700491static void rhine_shutdown (struct pci_dev *pdev);
Jiri Pirko8e586132011-12-08 19:52:37 -0500492static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
493static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
Roger Luethi38f49e82010-12-06 00:59:40 +0000494static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
495static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
496static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
497static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
498static void rhine_init_cam_filter(struct net_device *dev);
499static void rhine_update_vcam(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Joe Perchesdf4511f2011-04-16 14:15:25 +0000501#define RHINE_WAIT_FOR(condition) \
502do { \
503 int i = 1024; \
504 while (!(condition) && --i) \
505 ; \
506 if (debug > 1 && i < 512) \
507 pr_info("%4d cycles used @ %s:%d\n", \
508 1024 - i, __func__, __LINE__); \
509} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511static inline u32 get_intr_status(struct net_device *dev)
512{
513 struct rhine_private *rp = netdev_priv(dev);
514 void __iomem *ioaddr = rp->base;
515 u32 intr_status;
516
517 intr_status = ioread16(ioaddr + IntrStatus);
518 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
519 if (rp->quirks & rqStatusWBRace)
520 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
521 return intr_status;
522}
523
524/*
525 * Get power related registers into sane state.
526 * Notify user about past WOL event.
527 */
528static void rhine_power_init(struct net_device *dev)
529{
530 struct rhine_private *rp = netdev_priv(dev);
531 void __iomem *ioaddr = rp->base;
532 u16 wolstat;
533
534 if (rp->quirks & rqWOL) {
535 /* Make sure chip is in power state D0 */
536 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
537
538 /* Disable "force PME-enable" */
539 iowrite8(0x80, ioaddr + WOLcgClr);
540
541 /* Clear power-event config bits (WOL) */
542 iowrite8(0xFF, ioaddr + WOLcrClr);
543 /* More recent cards can manage two additional patterns */
544 if (rp->quirks & rq6patterns)
545 iowrite8(0x03, ioaddr + WOLcrClr1);
546
547 /* Save power-event status bits */
548 wolstat = ioread8(ioaddr + PwrcsrSet);
549 if (rp->quirks & rq6patterns)
550 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
551
552 /* Clear power-event status bits */
553 iowrite8(0xFF, ioaddr + PwrcsrClr);
554 if (rp->quirks & rq6patterns)
555 iowrite8(0x03, ioaddr + PwrcsrClr1);
556
557 if (wolstat) {
558 char *reason;
559 switch (wolstat) {
560 case WOLmagic:
561 reason = "Magic packet";
562 break;
563 case WOLlnkon:
564 reason = "Link went up";
565 break;
566 case WOLlnkoff:
567 reason = "Link went down";
568 break;
569 case WOLucast:
570 reason = "Unicast packet";
571 break;
572 case WOLbmcast:
573 reason = "Multicast/broadcast packet";
574 break;
575 default:
576 reason = "Unknown";
577 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000578 netdev_info(dev, "Woke system up. Reason: %s\n",
579 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 }
581 }
582}
583
584static void rhine_chip_reset(struct net_device *dev)
585{
586 struct rhine_private *rp = netdev_priv(dev);
587 void __iomem *ioaddr = rp->base;
588
589 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
590 IOSYNC;
591
592 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000593 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 /* Force reset */
596 if (rp->quirks & rqForceReset)
597 iowrite8(0x40, ioaddr + MiscCmd);
598
599 /* Reset can take somewhat longer (rare) */
600 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
601 }
602
603 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +0000604 netdev_info(dev, "Reset %s\n",
605 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
606 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608
609#ifdef USE_MMIO
610static void enable_mmio(long pioaddr, u32 quirks)
611{
612 int n;
613 if (quirks & rqRhineI) {
614 /* More recent docs say that this bit is reserved ... */
615 n = inb(pioaddr + ConfigA) | 0x20;
616 outb(n, pioaddr + ConfigA);
617 } else {
618 n = inb(pioaddr + ConfigD) | 0x80;
619 outb(n, pioaddr + ConfigD);
620 }
621}
622#endif
623
624/*
625 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
626 * (plus 0x6C for Rhine-I/II)
627 */
628static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
629{
630 struct rhine_private *rp = netdev_priv(dev);
631 void __iomem *ioaddr = rp->base;
632
633 outb(0x20, pioaddr + MACRegEEcsr);
634 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
635
636#ifdef USE_MMIO
637 /*
638 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
639 * MMIO. If reloading EEPROM was done first this could be avoided, but
640 * it is not known if that still works with the "win98-reboot" problem.
641 */
642 enable_mmio(pioaddr, rp->quirks);
643#endif
644
645 /* Turn off EEPROM-controlled wake-up (magic packet) */
646 if (rp->quirks & rqWOL)
647 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
648
649}
650
651#ifdef CONFIG_NET_POLL_CONTROLLER
652static void rhine_poll(struct net_device *dev)
653{
654 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100655 rhine_interrupt(dev->irq, (void *)dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 enable_irq(dev->irq);
657}
658#endif
659
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700660static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700661{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700662 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
663 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700664 void __iomem *ioaddr = rp->base;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700665 int work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700666
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700667 work_done = rhine_rx(dev, budget);
Roger Luethi633949a2006-08-14 23:00:17 -0700668
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800670 napi_complete(napi);
Roger Luethi633949a2006-08-14 23:00:17 -0700671
672 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
673 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
674 IntrTxDone | IntrTxError | IntrTxUnderrun |
675 IntrPCIErr | IntrStatsMax | IntrLinkChange,
676 ioaddr + IntrEnable);
Roger Luethi633949a2006-08-14 23:00:17 -0700677 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700678 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700679}
Roger Luethi633949a2006-08-14 23:00:17 -0700680
Adrian Bunkde4e7c82008-01-30 22:02:05 +0200681static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct rhine_private *rp = netdev_priv(dev);
684
685 /* Reset the chip to erase previous misconfiguration. */
686 rhine_chip_reset(dev);
687
688 /* Rhine-I needs extra time to recuperate before EEPROM reload */
689 if (rp->quirks & rqRhineI)
690 msleep(5);
691
692 /* Reload EEPROM controlled bytes cleared by soft reset */
693 rhine_reload_eeprom(pioaddr, dev);
694}
695
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800696static const struct net_device_ops rhine_netdev_ops = {
697 .ndo_open = rhine_open,
698 .ndo_stop = rhine_close,
699 .ndo_start_xmit = rhine_start_tx,
700 .ndo_get_stats = rhine_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000701 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000702 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800703 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000704 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800705 .ndo_do_ioctl = netdev_ioctl,
706 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000707 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
708 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800709#ifdef CONFIG_NET_POLL_CONTROLLER
710 .ndo_poll_controller = rhine_poll,
711#endif
712};
713
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714static int __devinit rhine_init_one(struct pci_dev *pdev,
715 const struct pci_device_id *ent)
716{
717 struct net_device *dev;
718 struct rhine_private *rp;
719 int i, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 u32 quirks;
721 long pioaddr;
722 long memaddr;
723 void __iomem *ioaddr;
724 int io_size, phy_id;
725 const char *name;
726#ifdef USE_MMIO
727 int bar = 1;
728#else
729 int bar = 0;
730#endif
731
732/* when built into the kernel, we only print version if device is found */
733#ifndef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +0000734 pr_info_once("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735#endif
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 io_size = 256;
738 phy_id = 0;
739 quirks = 0;
740 name = "Rhine";
Auke Kok44c10132007-06-08 15:46:36 -0700741 if (pdev->revision < VTunknown0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 quirks = rqRhineI;
743 io_size = 128;
744 }
Auke Kok44c10132007-06-08 15:46:36 -0700745 else if (pdev->revision >= VT6102) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 quirks = rqWOL | rqForceReset;
Auke Kok44c10132007-06-08 15:46:36 -0700747 if (pdev->revision < VT6105) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 name = "Rhine II";
749 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
750 }
751 else {
752 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
Auke Kok44c10132007-06-08 15:46:36 -0700753 if (pdev->revision >= VT6105_B0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 quirks |= rq6patterns;
Auke Kok44c10132007-06-08 15:46:36 -0700755 if (pdev->revision < VT6105M)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 name = "Rhine III";
757 else
758 name = "Rhine III (Management Adapter)";
759 }
760 }
761
762 rc = pci_enable_device(pdev);
763 if (rc)
764 goto err_out;
765
766 /* this should always be supported */
Yang Hongyang284901a2009-04-06 19:01:15 -0700767 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if (rc) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000769 dev_err(&pdev->dev,
770 "32-bit PCI DMA addresses not supported by the card!?\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 goto err_out;
772 }
773
774 /* sanity check */
775 if ((pci_resource_len(pdev, 0) < io_size) ||
776 (pci_resource_len(pdev, 1) < io_size)) {
777 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000778 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 goto err_out;
780 }
781
782 pioaddr = pci_resource_start(pdev, 0);
783 memaddr = pci_resource_start(pdev, 1);
784
785 pci_set_master(pdev);
786
787 dev = alloc_etherdev(sizeof(struct rhine_private));
788 if (!dev) {
789 rc = -ENOMEM;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000790 dev_err(&pdev->dev, "alloc_etherdev failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 goto err_out;
792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 SET_NETDEV_DEV(dev, &pdev->dev);
794
795 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700796 rp->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 rp->quirks = quirks;
798 rp->pioaddr = pioaddr;
799 rp->pdev = pdev;
800
801 rc = pci_request_regions(pdev, DRV_NAME);
802 if (rc)
803 goto err_out_free_netdev;
804
805 ioaddr = pci_iomap(pdev, bar, io_size);
806 if (!ioaddr) {
807 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000808 dev_err(&pdev->dev,
809 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
810 pci_name(pdev), io_size, memaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 goto err_out_free_res;
812 }
813
814#ifdef USE_MMIO
815 enable_mmio(pioaddr, quirks);
816
817 /* Check that selected MMIO registers match the PIO ones */
818 i = 0;
819 while (mmio_verify_registers[i]) {
820 int reg = mmio_verify_registers[i++];
821 unsigned char a = inb(pioaddr+reg);
822 unsigned char b = readb(ioaddr+reg);
823 if (a != b) {
824 rc = -EIO;
Joe Perchesdf4511f2011-04-16 14:15:25 +0000825 dev_err(&pdev->dev,
826 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
827 reg, a, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 goto err_out_unmap;
829 }
830 }
831#endif /* USE_MMIO */
832
833 dev->base_addr = (unsigned long)ioaddr;
834 rp->base = ioaddr;
835
836 /* Get chip registers into a sane state */
837 rhine_power_init(dev);
838 rhine_hw_init(dev, pioaddr);
839
840 for (i = 0; i < 6; i++)
841 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
842
Joe Perches482e3fe2011-04-16 14:15:26 +0000843 if (!is_valid_ether_addr(dev->dev_addr)) {
844 /* Report it and use a random ethernet address instead */
845 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
846 random_ether_addr(dev->dev_addr);
847 netdev_info(dev, "Using random MAC address: %pM\n",
848 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
Joe Perches482e3fe2011-04-16 14:15:26 +0000850 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 /* For Rhine-I/II, phy_id is loaded from EEPROM */
853 if (!phy_id)
854 phy_id = ioread8(ioaddr + 0x6C);
855
856 dev->irq = pdev->irq;
857
858 spin_lock_init(&rp->lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800859 INIT_WORK(&rp->reset_task, rhine_reset_task);
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 rp->mii_if.dev = dev;
862 rp->mii_if.mdio_read = mdio_read;
863 rp->mii_if.mdio_write = mdio_write;
864 rp->mii_if.phy_id_mask = 0x1f;
865 rp->mii_if.reg_num_mask = 0x1f;
866
867 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800868 dev->netdev_ops = &rhine_netdev_ops;
869 dev->ethtool_ops = &netdev_ethtool_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800871
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700872 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +0200873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 if (rp->quirks & rqRhineI)
875 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
876
Roger Luethi38f49e82010-12-06 00:59:40 +0000877 if (pdev->revision >= VT6105M)
878 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
879 NETIF_F_HW_VLAN_FILTER;
880
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 /* dev->name not defined before register_netdev()! */
882 rc = register_netdev(dev);
883 if (rc)
884 goto err_out_unmap;
885
Joe Perchesdf4511f2011-04-16 14:15:25 +0000886 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
887 name,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888#ifdef USE_MMIO
Joe Perchesdf4511f2011-04-16 14:15:25 +0000889 memaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890#else
Joe Perchesdf4511f2011-04-16 14:15:25 +0000891 (long)ioaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892#endif
Joe Perchesdf4511f2011-04-16 14:15:25 +0000893 dev->dev_addr, pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
895 pci_set_drvdata(pdev, dev);
896
897 {
898 u16 mii_cmd;
899 int mii_status = mdio_read(dev, phy_id, 1);
900 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
901 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
902 if (mii_status != 0xffff && mii_status != 0x0000) {
903 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +0000904 netdev_info(dev,
905 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
906 phy_id,
907 mii_status, rp->mii_if.advertising,
908 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
910 /* set IFF_RUNNING */
911 if (mii_status & BMSR_LSTATUS)
912 netif_carrier_on(dev);
913 else
914 netif_carrier_off(dev);
915
916 }
917 }
918 rp->mii_if.phy_id = phy_id;
Roger Luethib933b4d2006-08-14 23:00:21 -0700919 if (debug > 1 && avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +0000920 netdev_info(dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 return 0;
923
924err_out_unmap:
925 pci_iounmap(pdev, ioaddr);
926err_out_free_res:
927 pci_release_regions(pdev);
928err_out_free_netdev:
929 free_netdev(dev);
930err_out:
931 return rc;
932}
933
934static int alloc_ring(struct net_device* dev)
935{
936 struct rhine_private *rp = netdev_priv(dev);
937 void *ring;
938 dma_addr_t ring_dma;
939
940 ring = pci_alloc_consistent(rp->pdev,
941 RX_RING_SIZE * sizeof(struct rx_desc) +
942 TX_RING_SIZE * sizeof(struct tx_desc),
943 &ring_dma);
944 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000945 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 return -ENOMEM;
947 }
948 if (rp->quirks & rqRhineI) {
949 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
950 PKT_BUF_SZ * TX_RING_SIZE,
951 &rp->tx_bufs_dma);
952 if (rp->tx_bufs == NULL) {
953 pci_free_consistent(rp->pdev,
954 RX_RING_SIZE * sizeof(struct rx_desc) +
955 TX_RING_SIZE * sizeof(struct tx_desc),
956 ring, ring_dma);
957 return -ENOMEM;
958 }
959 }
960
961 rp->rx_ring = ring;
962 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
963 rp->rx_ring_dma = ring_dma;
964 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
965
966 return 0;
967}
968
969static void free_ring(struct net_device* dev)
970{
971 struct rhine_private *rp = netdev_priv(dev);
972
973 pci_free_consistent(rp->pdev,
974 RX_RING_SIZE * sizeof(struct rx_desc) +
975 TX_RING_SIZE * sizeof(struct tx_desc),
976 rp->rx_ring, rp->rx_ring_dma);
977 rp->tx_ring = NULL;
978
979 if (rp->tx_bufs)
980 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
981 rp->tx_bufs, rp->tx_bufs_dma);
982
983 rp->tx_bufs = NULL;
984
985}
986
987static void alloc_rbufs(struct net_device *dev)
988{
989 struct rhine_private *rp = netdev_priv(dev);
990 dma_addr_t next;
991 int i;
992
993 rp->dirty_rx = rp->cur_rx = 0;
994
995 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
996 rp->rx_head_desc = &rp->rx_ring[0];
997 next = rp->rx_ring_dma;
998
999 /* Init the ring entries */
1000 for (i = 0; i < RX_RING_SIZE; i++) {
1001 rp->rx_ring[i].rx_status = 0;
1002 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1003 next += sizeof(struct rx_desc);
1004 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1005 rp->rx_skbuff[i] = NULL;
1006 }
1007 /* Mark the last entry as wrapping the ring. */
1008 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1009
1010 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1011 for (i = 0; i < RX_RING_SIZE; i++) {
Kevin Lob26b5552008-08-27 11:35:09 +08001012 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 rp->rx_skbuff[i] = skb;
1014 if (skb == NULL)
1015 break;
1016 skb->dev = dev; /* Mark as being used by this device. */
1017
1018 rp->rx_skbuff_dma[i] =
David S. Miller689be432005-06-28 15:25:31 -07001019 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 PCI_DMA_FROMDEVICE);
1021
1022 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1023 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1024 }
1025 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1026}
1027
1028static void free_rbufs(struct net_device* dev)
1029{
1030 struct rhine_private *rp = netdev_priv(dev);
1031 int i;
1032
1033 /* Free all the skbuffs in the Rx queue. */
1034 for (i = 0; i < RX_RING_SIZE; i++) {
1035 rp->rx_ring[i].rx_status = 0;
1036 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1037 if (rp->rx_skbuff[i]) {
1038 pci_unmap_single(rp->pdev,
1039 rp->rx_skbuff_dma[i],
1040 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1041 dev_kfree_skb(rp->rx_skbuff[i]);
1042 }
1043 rp->rx_skbuff[i] = NULL;
1044 }
1045}
1046
1047static void alloc_tbufs(struct net_device* dev)
1048{
1049 struct rhine_private *rp = netdev_priv(dev);
1050 dma_addr_t next;
1051 int i;
1052
1053 rp->dirty_tx = rp->cur_tx = 0;
1054 next = rp->tx_ring_dma;
1055 for (i = 0; i < TX_RING_SIZE; i++) {
1056 rp->tx_skbuff[i] = NULL;
1057 rp->tx_ring[i].tx_status = 0;
1058 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1059 next += sizeof(struct tx_desc);
1060 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001061 if (rp->quirks & rqRhineI)
1062 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 }
1064 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1065
1066}
1067
1068static void free_tbufs(struct net_device* dev)
1069{
1070 struct rhine_private *rp = netdev_priv(dev);
1071 int i;
1072
1073 for (i = 0; i < TX_RING_SIZE; i++) {
1074 rp->tx_ring[i].tx_status = 0;
1075 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1076 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1077 if (rp->tx_skbuff[i]) {
1078 if (rp->tx_skbuff_dma[i]) {
1079 pci_unmap_single(rp->pdev,
1080 rp->tx_skbuff_dma[i],
1081 rp->tx_skbuff[i]->len,
1082 PCI_DMA_TODEVICE);
1083 }
1084 dev_kfree_skb(rp->tx_skbuff[i]);
1085 }
1086 rp->tx_skbuff[i] = NULL;
1087 rp->tx_buf[i] = NULL;
1088 }
1089}
1090
1091static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1092{
1093 struct rhine_private *rp = netdev_priv(dev);
1094 void __iomem *ioaddr = rp->base;
1095
1096 mii_check_media(&rp->mii_if, debug, init_media);
1097
1098 if (rp->mii_if.full_duplex)
1099 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1100 ioaddr + ChipCmd1);
1101 else
1102 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1103 ioaddr + ChipCmd1);
Roger Luethi00b428c2006-03-28 20:53:56 +02001104 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001105 netdev_info(dev, "force_media %d, carrier %d\n",
1106 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001107}
1108
1109/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001110static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001111{
1112 if (mii->force_media) {
1113 /* autoneg is off: Link is always assumed to be up */
1114 if (!netif_carrier_ok(mii->dev))
1115 netif_carrier_on(mii->dev);
1116 }
1117 else /* Let MMI library update carrier status */
1118 rhine_check_media(mii->dev, 0);
1119 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001120 netdev_info(mii->dev, "force_media %d, carrier %d\n",
1121 mii->force_media, netif_carrier_ok(mii->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122}
1123
Roger Luethi38f49e82010-12-06 00:59:40 +00001124/**
1125 * rhine_set_cam - set CAM multicast filters
1126 * @ioaddr: register block of this Rhine
1127 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1128 * @addr: multicast address (6 bytes)
1129 *
1130 * Load addresses into multicast filters.
1131 */
1132static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1133{
1134 int i;
1135
1136 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1137 wmb();
1138
1139 /* Paranoid -- idx out of range should never happen */
1140 idx &= (MCAM_SIZE - 1);
1141
1142 iowrite8((u8) idx, ioaddr + CamAddr);
1143
1144 for (i = 0; i < 6; i++, addr++)
1145 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1146 udelay(10);
1147 wmb();
1148
1149 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1150 udelay(10);
1151
1152 iowrite8(0, ioaddr + CamCon);
1153}
1154
1155/**
1156 * rhine_set_vlan_cam - set CAM VLAN filters
1157 * @ioaddr: register block of this Rhine
1158 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1159 * @addr: VLAN ID (2 bytes)
1160 *
1161 * Load addresses into VLAN filters.
1162 */
1163static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1164{
1165 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1166 wmb();
1167
1168 /* Paranoid -- idx out of range should never happen */
1169 idx &= (VCAM_SIZE - 1);
1170
1171 iowrite8((u8) idx, ioaddr + CamAddr);
1172
1173 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1174 udelay(10);
1175 wmb();
1176
1177 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1178 udelay(10);
1179
1180 iowrite8(0, ioaddr + CamCon);
1181}
1182
1183/**
1184 * rhine_set_cam_mask - set multicast CAM mask
1185 * @ioaddr: register block of this Rhine
1186 * @mask: multicast CAM mask
1187 *
1188 * Mask sets multicast filters active/inactive.
1189 */
1190static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1191{
1192 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1193 wmb();
1194
1195 /* write mask */
1196 iowrite32(mask, ioaddr + CamMask);
1197
1198 /* disable CAMEN */
1199 iowrite8(0, ioaddr + CamCon);
1200}
1201
1202/**
1203 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1204 * @ioaddr: register block of this Rhine
1205 * @mask: VLAN CAM mask
1206 *
1207 * Mask sets VLAN filters active/inactive.
1208 */
1209static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1210{
1211 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1212 wmb();
1213
1214 /* write mask */
1215 iowrite32(mask, ioaddr + CamMask);
1216
1217 /* disable CAMEN */
1218 iowrite8(0, ioaddr + CamCon);
1219}
1220
1221/**
1222 * rhine_init_cam_filter - initialize CAM filters
1223 * @dev: network device
1224 *
1225 * Initialize (disable) hardware VLAN and multicast support on this
1226 * Rhine.
1227 */
1228static void rhine_init_cam_filter(struct net_device *dev)
1229{
1230 struct rhine_private *rp = netdev_priv(dev);
1231 void __iomem *ioaddr = rp->base;
1232
1233 /* Disable all CAMs */
1234 rhine_set_vlan_cam_mask(ioaddr, 0);
1235 rhine_set_cam_mask(ioaddr, 0);
1236
1237 /* disable hardware VLAN support */
1238 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1239 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1240}
1241
1242/**
1243 * rhine_update_vcam - update VLAN CAM filters
1244 * @rp: rhine_private data of this Rhine
1245 *
1246 * Update VLAN CAM filters to match configuration change.
1247 */
1248static void rhine_update_vcam(struct net_device *dev)
1249{
1250 struct rhine_private *rp = netdev_priv(dev);
1251 void __iomem *ioaddr = rp->base;
1252 u16 vid;
1253 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1254 unsigned int i = 0;
1255
1256 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1257 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1258 vCAMmask |= 1 << i;
1259 if (++i >= VCAM_SIZE)
1260 break;
1261 }
1262 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1263}
1264
Jiri Pirko8e586132011-12-08 19:52:37 -05001265static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001266{
1267 struct rhine_private *rp = netdev_priv(dev);
1268
1269 spin_lock_irq(&rp->lock);
1270 set_bit(vid, rp->active_vlans);
1271 rhine_update_vcam(dev);
1272 spin_unlock_irq(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001273 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001274}
1275
Jiri Pirko8e586132011-12-08 19:52:37 -05001276static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001277{
1278 struct rhine_private *rp = netdev_priv(dev);
1279
1280 spin_lock_irq(&rp->lock);
1281 clear_bit(vid, rp->active_vlans);
1282 rhine_update_vcam(dev);
1283 spin_unlock_irq(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001284 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001285}
1286
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287static void init_registers(struct net_device *dev)
1288{
1289 struct rhine_private *rp = netdev_priv(dev);
1290 void __iomem *ioaddr = rp->base;
1291 int i;
1292
1293 for (i = 0; i < 6; i++)
1294 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1295
1296 /* Initialize other registers. */
1297 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1298 /* Configure initial FIFO thresholds. */
1299 iowrite8(0x20, ioaddr + TxConfig);
1300 rp->tx_thresh = 0x20;
1301 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1302
1303 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1304 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1305
1306 rhine_set_rx_mode(dev);
1307
Roger Luethi38f49e82010-12-06 00:59:40 +00001308 if (rp->pdev->revision >= VT6105M)
1309 rhine_init_cam_filter(dev);
1310
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001311 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Enable interrupts by setting the interrupt mask. */
1314 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1315 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1316 IntrTxDone | IntrTxError | IntrTxUnderrun |
1317 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1318 ioaddr + IntrEnable);
1319
1320 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1321 ioaddr + ChipCmd);
1322 rhine_check_media(dev, 1);
1323}
1324
1325/* Enable MII link status auto-polling (required for IntrLinkChange) */
1326static void rhine_enable_linkmon(void __iomem *ioaddr)
1327{
1328 iowrite8(0, ioaddr + MIICmd);
1329 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1330 iowrite8(0x80, ioaddr + MIICmd);
1331
1332 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1333
1334 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1335}
1336
1337/* Disable MII link status auto-polling (required for MDIO access) */
1338static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1339{
1340 iowrite8(0, ioaddr + MIICmd);
1341
1342 if (quirks & rqRhineI) {
1343 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1344
John W. Linville38bb6b22006-05-19 10:51:21 -04001345 /* Can be called from ISR. Evil. */
1346 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
1348 /* 0x80 must be set immediately before turning it off */
1349 iowrite8(0x80, ioaddr + MIICmd);
1350
1351 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1352
1353 /* Heh. Now clear 0x80 again. */
1354 iowrite8(0, ioaddr + MIICmd);
1355 }
1356 else
1357 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1358}
1359
1360/* Read and write over the MII Management Data I/O (MDIO) interface. */
1361
1362static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1363{
1364 struct rhine_private *rp = netdev_priv(dev);
1365 void __iomem *ioaddr = rp->base;
1366 int result;
1367
1368 rhine_disable_linkmon(ioaddr, rp->quirks);
1369
1370 /* rhine_disable_linkmon already cleared MIICmd */
1371 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1372 iowrite8(regnum, ioaddr + MIIRegAddr);
1373 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1374 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1375 result = ioread16(ioaddr + MIIData);
1376
1377 rhine_enable_linkmon(ioaddr);
1378 return result;
1379}
1380
1381static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1382{
1383 struct rhine_private *rp = netdev_priv(dev);
1384 void __iomem *ioaddr = rp->base;
1385
1386 rhine_disable_linkmon(ioaddr, rp->quirks);
1387
1388 /* rhine_disable_linkmon already cleared MIICmd */
1389 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1390 iowrite8(regnum, ioaddr + MIIRegAddr);
1391 iowrite16(value, ioaddr + MIIData);
1392 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1393 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1394
1395 rhine_enable_linkmon(ioaddr);
1396}
1397
1398static int rhine_open(struct net_device *dev)
1399{
1400 struct rhine_private *rp = netdev_priv(dev);
1401 void __iomem *ioaddr = rp->base;
1402 int rc;
1403
Julia Lawall76781382009-11-18 08:23:53 +00001404 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 dev);
1406 if (rc)
1407 return rc;
1408
1409 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001410 netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
1412 rc = alloc_ring(dev);
1413 if (rc) {
1414 free_irq(rp->pdev->irq, dev);
1415 return rc;
1416 }
1417 alloc_rbufs(dev);
1418 alloc_tbufs(dev);
1419 rhine_chip_reset(dev);
1420 init_registers(dev);
1421 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001422 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
1423 __func__, ioread16(ioaddr + ChipCmd),
1424 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
1426 netif_start_queue(dev);
1427
1428 return 0;
1429}
1430
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001431static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001433 struct rhine_private *rp = container_of(work, struct rhine_private,
1434 reset_task);
1435 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
1437 /* protect against concurrent rx interrupts */
1438 disable_irq(rp->pdev->irq);
1439
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001440 napi_disable(&rp->napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001441
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001442 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 /* clear all descriptors */
1445 free_tbufs(dev);
1446 free_rbufs(dev);
1447 alloc_tbufs(dev);
1448 alloc_rbufs(dev);
1449
1450 /* Reinitialize the hardware. */
1451 rhine_chip_reset(dev);
1452 init_registers(dev);
1453
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001454 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 enable_irq(rp->pdev->irq);
1456
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001457 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001458 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 netif_wake_queue(dev);
1460}
1461
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001462static void rhine_tx_timeout(struct net_device *dev)
1463{
1464 struct rhine_private *rp = netdev_priv(dev);
1465 void __iomem *ioaddr = rp->base;
1466
Joe Perchesdf4511f2011-04-16 14:15:25 +00001467 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1468 ioread16(ioaddr + IntrStatus),
1469 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001470
1471 schedule_work(&rp->reset_task);
1472}
1473
Stephen Hemminger613573252009-08-31 19:50:58 +00001474static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1475 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476{
1477 struct rhine_private *rp = netdev_priv(dev);
1478 void __iomem *ioaddr = rp->base;
1479 unsigned entry;
Dongdong Deng22580f82009-08-13 19:12:31 +00001480 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
1482 /* Caution: the write order is important here, set the field
1483 with the "ownership" bits last. */
1484
1485 /* Calculate the next Tx descriptor entry. */
1486 entry = rp->cur_tx % TX_RING_SIZE;
1487
Herbert Xu5b057c62006-06-23 02:06:41 -07001488 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001489 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 rp->tx_skbuff[entry] = skb;
1492
1493 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001494 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 /* Must use alignment buffer. */
1496 if (skb->len > PKT_BUF_SZ) {
1497 /* packet too long, drop it */
1498 dev_kfree_skb(skb);
1499 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001500 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001501 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001503
1504 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001506 if (skb->len < ETH_ZLEN)
1507 memset(rp->tx_buf[entry] + skb->len, 0,
1508 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 rp->tx_skbuff_dma[entry] = 0;
1510 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1511 (rp->tx_buf[entry] -
1512 rp->tx_bufs));
1513 } else {
1514 rp->tx_skbuff_dma[entry] =
1515 pci_map_single(rp->pdev, skb->data, skb->len,
1516 PCI_DMA_TODEVICE);
1517 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1518 }
1519
1520 rp->tx_ring[entry].desc_length =
1521 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1522
Roger Luethi38f49e82010-12-06 00:59:40 +00001523 if (unlikely(vlan_tx_tag_present(skb))) {
1524 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1525 /* request tagging */
1526 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1527 }
1528 else
1529 rp->tx_ring[entry].tx_status = 0;
1530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 /* lock eth irq */
Dongdong Deng22580f82009-08-13 19:12:31 +00001532 spin_lock_irqsave(&rp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001534 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 wmb();
1536
1537 rp->cur_tx++;
1538
1539 /* Non-x86 Todo: explicitly flush cache lines here. */
1540
Roger Luethi38f49e82010-12-06 00:59:40 +00001541 if (vlan_tx_tag_present(skb))
1542 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1543 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 /* Wake the potentially-idle transmit channel */
1546 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1547 ioaddr + ChipCmd1);
1548 IOSYNC;
1549
1550 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1551 netif_stop_queue(dev);
1552
Dongdong Deng22580f82009-08-13 19:12:31 +00001553 spin_unlock_irqrestore(&rp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
1555 if (debug > 4) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001556 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1557 rp->cur_tx-1, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
Patrick McHardy6ed10652009-06-23 06:03:08 +00001559 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560}
1561
1562/* The interrupt handler does all of the Rx thread work and cleans up
1563 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001564static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
1566 struct net_device *dev = dev_instance;
1567 struct rhine_private *rp = netdev_priv(dev);
1568 void __iomem *ioaddr = rp->base;
1569 u32 intr_status;
1570 int boguscnt = max_interrupt_work;
1571 int handled = 0;
1572
1573 while ((intr_status = get_intr_status(dev))) {
1574 handled = 1;
1575
1576 /* Acknowledge all of the current interrupt sources ASAP. */
1577 if (intr_status & IntrTxDescRace)
1578 iowrite8(0x08, ioaddr + IntrStatus2);
1579 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1580 IOSYNC;
1581
1582 if (debug > 4)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001583 netdev_dbg(dev, "Interrupt, status %08x\n",
1584 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
1586 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
Roger Luethi633949a2006-08-14 23:00:17 -07001587 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
Roger Luethi633949a2006-08-14 23:00:17 -07001588 iowrite16(IntrTxAborted |
1589 IntrTxDone | IntrTxError | IntrTxUnderrun |
1590 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1591 ioaddr + IntrEnable);
1592
Ben Hutchings288379f2009-01-19 16:43:59 -08001593 napi_schedule(&rp->napi);
Roger Luethi633949a2006-08-14 23:00:17 -07001594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
1596 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1597 if (intr_status & IntrTxErrSummary) {
1598 /* Avoid scavenging before Tx engine turned off */
1599 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1600 if (debug > 2 &&
1601 ioread8(ioaddr+ChipCmd) & CmdTxOn)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001602 netdev_warn(dev,
1603 "%s: Tx engine still on\n",
1604 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 }
1606 rhine_tx(dev);
1607 }
1608
1609 /* Abnormal error summary/uncommon events handlers. */
1610 if (intr_status & (IntrPCIErr | IntrLinkChange |
1611 IntrStatsMax | IntrTxError | IntrTxAborted |
1612 IntrTxUnderrun | IntrTxDescRace))
1613 rhine_error(dev, intr_status);
1614
1615 if (--boguscnt < 0) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001616 netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
1617 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 break;
1619 }
1620 }
1621
1622 if (debug > 3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001623 netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1624 ioread16(ioaddr + IntrStatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 return IRQ_RETVAL(handled);
1626}
1627
1628/* This routine is logically part of the interrupt handler, but isolated
1629 for clarity. */
1630static void rhine_tx(struct net_device *dev)
1631{
1632 struct rhine_private *rp = netdev_priv(dev);
1633 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1634
1635 spin_lock(&rp->lock);
1636
1637 /* find and cleanup dirty tx descriptors */
1638 while (rp->dirty_tx != rp->cur_tx) {
1639 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1640 if (debug > 6)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001641 netdev_dbg(dev, "Tx scavenge %d status %08x\n",
1642 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 if (txstatus & DescOwn)
1644 break;
1645 if (txstatus & 0x8000) {
1646 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001647 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1648 txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001649 dev->stats.tx_errors++;
1650 if (txstatus & 0x0400)
1651 dev->stats.tx_carrier_errors++;
1652 if (txstatus & 0x0200)
1653 dev->stats.tx_window_errors++;
1654 if (txstatus & 0x0100)
1655 dev->stats.tx_aborted_errors++;
1656 if (txstatus & 0x0080)
1657 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1659 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001660 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1662 break; /* Keep the skb - we try again */
1663 }
1664 /* Transmitter restarted in 'abnormal' handler. */
1665 } else {
1666 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001667 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001669 dev->stats.collisions += txstatus & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 if (debug > 6)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001671 netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
1672 (txstatus >> 3) & 0xF,
1673 txstatus & 0xF);
Eric Dumazet553e2332009-05-27 10:34:50 +00001674 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1675 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 }
1677 /* Free the original skb. */
1678 if (rp->tx_skbuff_dma[entry]) {
1679 pci_unmap_single(rp->pdev,
1680 rp->tx_skbuff_dma[entry],
1681 rp->tx_skbuff[entry]->len,
1682 PCI_DMA_TODEVICE);
1683 }
1684 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1685 rp->tx_skbuff[entry] = NULL;
1686 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1687 }
1688 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1689 netif_wake_queue(dev);
1690
1691 spin_unlock(&rp->lock);
1692}
1693
Roger Luethi38f49e82010-12-06 00:59:40 +00001694/**
1695 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1696 * @skb: pointer to sk_buff
1697 * @data_size: used data area of the buffer including CRC
1698 *
1699 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1700 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1701 * aligned following the CRC.
1702 */
1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1704{
1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00001706 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00001707}
1708
Roger Luethi633949a2006-08-14 23:00:17 -07001709/* Process up to limit frames from receive ring */
1710static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711{
1712 struct rhine_private *rp = netdev_priv(dev);
Roger Luethi633949a2006-08-14 23:00:17 -07001713 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
1716 if (debug > 4) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001717 netdev_dbg(dev, "%s(), entry %d status %08x\n",
1718 __func__, entry,
1719 le32_to_cpu(rp->rx_head_desc->rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 }
1721
1722 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001723 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 struct rx_desc *desc = rp->rx_head_desc;
1725 u32 desc_status = le32_to_cpu(desc->rx_status);
Roger Luethi38f49e82010-12-06 00:59:40 +00001726 u32 desc_length = le32_to_cpu(desc->desc_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 int data_size = desc_status >> 16;
1728
Roger Luethi633949a2006-08-14 23:00:17 -07001729 if (desc_status & DescOwn)
1730 break;
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 if (debug > 4)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001733 netdev_dbg(dev, "%s() status is %08x\n",
1734 __func__, desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1737 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001738 netdev_warn(dev,
1739 "Oversized Ethernet frame spanned multiple buffers, "
1740 "entry %#x length %d status %08x!\n",
1741 entry, data_size,
1742 desc_status);
1743 netdev_warn(dev,
1744 "Oversized Ethernet frame %p vs %p\n",
1745 rp->rx_head_desc,
1746 &rp->rx_ring[entry]);
Eric Dumazet553e2332009-05-27 10:34:50 +00001747 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 } else if (desc_status & RxErr) {
1749 /* There was a error. */
1750 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001751 netdev_dbg(dev, "%s() Rx error was %08x\n",
1752 __func__, desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001753 dev->stats.rx_errors++;
1754 if (desc_status & 0x0030)
1755 dev->stats.rx_length_errors++;
1756 if (desc_status & 0x0048)
1757 dev->stats.rx_fifo_errors++;
1758 if (desc_status & 0x0004)
1759 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 if (desc_status & 0x0002) {
1761 /* this can also be updated outside the interrupt handler */
1762 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00001763 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 spin_unlock(&rp->lock);
1765 }
1766 }
1767 } else {
Eric Dumazet89d71a62009-10-13 05:34:20 +00001768 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 /* Length should omit the CRC */
1770 int pkt_len = data_size - 4;
Roger Luethi38f49e82010-12-06 00:59:40 +00001771 u16 vlan_tci = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
1773 /* Check if the packet is long enough to accept without
1774 copying to a minimally-sized skbuff. */
Eric Dumazet89d71a62009-10-13 05:34:20 +00001775 if (pkt_len < rx_copybreak)
1776 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1777 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 pci_dma_sync_single_for_cpu(rp->pdev,
1779 rp->rx_skbuff_dma[entry],
1780 rp->rx_buf_sz,
1781 PCI_DMA_FROMDEVICE);
1782
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001783 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07001784 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001785 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 skb_put(skb, pkt_len);
1787 pci_dma_sync_single_for_device(rp->pdev,
1788 rp->rx_skbuff_dma[entry],
1789 rp->rx_buf_sz,
1790 PCI_DMA_FROMDEVICE);
1791 } else {
1792 skb = rp->rx_skbuff[entry];
1793 if (skb == NULL) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001794 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 break;
1796 }
1797 rp->rx_skbuff[entry] = NULL;
1798 skb_put(skb, pkt_len);
1799 pci_unmap_single(rp->pdev,
1800 rp->rx_skbuff_dma[entry],
1801 rp->rx_buf_sz,
1802 PCI_DMA_FROMDEVICE);
1803 }
Roger Luethi38f49e82010-12-06 00:59:40 +00001804
1805 if (unlikely(desc_length & DescTag))
1806 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1807
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00001809
1810 if (unlikely(desc_length & DescTag))
1811 __vlan_hwaccel_put_tag(skb, vlan_tci);
Roger Luethi633949a2006-08-14 23:00:17 -07001812 netif_receive_skb(skb);
Eric Dumazet553e2332009-05-27 10:34:50 +00001813 dev->stats.rx_bytes += pkt_len;
1814 dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816 entry = (++rp->cur_rx) % RX_RING_SIZE;
1817 rp->rx_head_desc = &rp->rx_ring[entry];
1818 }
1819
1820 /* Refill the Rx ring buffers. */
1821 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1822 struct sk_buff *skb;
1823 entry = rp->dirty_rx % RX_RING_SIZE;
1824 if (rp->rx_skbuff[entry] == NULL) {
Kevin Lob26b5552008-08-27 11:35:09 +08001825 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 rp->rx_skbuff[entry] = skb;
1827 if (skb == NULL)
1828 break; /* Better luck next round. */
1829 skb->dev = dev; /* Mark as being used by this device. */
1830 rp->rx_skbuff_dma[entry] =
David S. Miller689be432005-06-28 15:25:31 -07001831 pci_map_single(rp->pdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 rp->rx_buf_sz,
1833 PCI_DMA_FROMDEVICE);
1834 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1835 }
1836 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1837 }
Roger Luethi633949a2006-08-14 23:00:17 -07001838
1839 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840}
1841
1842/*
1843 * Clears the "tally counters" for CRC errors and missed frames(?).
1844 * It has been reported that some chips need a write of 0 to clear
1845 * these, for others the counters are set to 1 when written to and
1846 * instead cleared when read. So we clear them both ways ...
1847 */
1848static inline void clear_tally_counters(void __iomem *ioaddr)
1849{
1850 iowrite32(0, ioaddr + RxMissed);
1851 ioread16(ioaddr + RxCRCErrs);
1852 ioread16(ioaddr + RxMissed);
1853}
1854
1855static void rhine_restart_tx(struct net_device *dev) {
1856 struct rhine_private *rp = netdev_priv(dev);
1857 void __iomem *ioaddr = rp->base;
1858 int entry = rp->dirty_tx % TX_RING_SIZE;
1859 u32 intr_status;
1860
1861 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001862 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 * In that case the ISR will be back here RSN anyway.
1864 */
1865 intr_status = get_intr_status(dev);
1866
1867 if ((intr_status & IntrTxErrSummary) == 0) {
1868
1869 /* We know better than the chip where it should continue. */
1870 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1871 ioaddr + TxRingPtr);
1872
1873 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1874 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00001875
1876 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1877 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1878 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1879
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1881 ioaddr + ChipCmd1);
1882 IOSYNC;
1883 }
1884 else {
1885 /* This should never happen */
1886 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001887 netdev_warn(dev, "%s() Another error occurred %08x\n",
1888 __func__, intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 }
1890
1891}
1892
1893static void rhine_error(struct net_device *dev, int intr_status)
1894{
1895 struct rhine_private *rp = netdev_priv(dev);
1896 void __iomem *ioaddr = rp->base;
1897
1898 spin_lock(&rp->lock);
1899
1900 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04001901 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 if (intr_status & IntrStatsMax) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001903 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1904 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 clear_tally_counters(ioaddr);
1906 }
1907 if (intr_status & IntrTxAborted) {
1908 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001909 netdev_info(dev, "Abort %08x, frame dropped\n",
1910 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 }
1912 if (intr_status & IntrTxUnderrun) {
1913 if (rp->tx_thresh < 0xE0)
Roger Luethi38f49e82010-12-06 00:59:40 +00001914 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001916 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1917 rp->tx_thresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 }
1919 if (intr_status & IntrTxDescRace) {
1920 if (debug > 2)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001921 netdev_info(dev, "Tx descriptor write-back race\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 }
1923 if ((intr_status & IntrTxError) &&
1924 (intr_status & (IntrTxAborted |
1925 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1926 if (rp->tx_thresh < 0xE0) {
Roger Luethi38f49e82010-12-06 00:59:40 +00001927 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 }
1929 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001930 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1931 rp->tx_thresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 }
1933 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1934 IntrTxError))
1935 rhine_restart_tx(dev);
1936
1937 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1938 IntrTxError | IntrTxAborted | IntrNormalSummary |
1939 IntrTxDescRace)) {
1940 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00001941 netdev_err(dev, "Something Wicked happened! %08x\n",
1942 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 }
1944
1945 spin_unlock(&rp->lock);
1946}
1947
1948static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1949{
1950 struct rhine_private *rp = netdev_priv(dev);
1951 void __iomem *ioaddr = rp->base;
1952 unsigned long flags;
1953
1954 spin_lock_irqsave(&rp->lock, flags);
Eric Dumazet553e2332009-05-27 10:34:50 +00001955 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1956 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 clear_tally_counters(ioaddr);
1958 spin_unlock_irqrestore(&rp->lock, flags);
1959
Eric Dumazet553e2332009-05-27 10:34:50 +00001960 return &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961}
1962
1963static void rhine_set_rx_mode(struct net_device *dev)
1964{
1965 struct rhine_private *rp = netdev_priv(dev);
1966 void __iomem *ioaddr = rp->base;
1967 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00001968 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
1969 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 rx_mode = 0x1C;
1973 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1974 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001975 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00001976 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 /* Too many to match, or accept all multicasts. */
1978 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1979 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Roger Luethi38f49e82010-12-06 00:59:40 +00001980 } else if (rp->pdev->revision >= VT6105M) {
1981 int i = 0;
1982 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
1983 netdev_for_each_mc_addr(ha, dev) {
1984 if (i == MCAM_SIZE)
1985 break;
1986 rhine_set_cam(ioaddr, i, ha->addr);
1987 mCAMmask |= 1 << i;
1988 i++;
1989 }
1990 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00001993 netdev_for_each_mc_addr(ha, dev) {
1994 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
1996 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1997 }
1998 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1999 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002001 /* enable/disable VLAN receive filtering */
2002 if (rp->pdev->revision >= VT6105M) {
2003 if (dev->flags & IFF_PROMISC)
2004 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2005 else
2006 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2007 }
2008 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009}
2010
2011static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2012{
2013 struct rhine_private *rp = netdev_priv(dev);
2014
Rick Jones23020ab2011-11-09 09:58:07 +00002015 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2016 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2017 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018}
2019
2020static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2021{
2022 struct rhine_private *rp = netdev_priv(dev);
2023 int rc;
2024
2025 spin_lock_irq(&rp->lock);
2026 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2027 spin_unlock_irq(&rp->lock);
2028
2029 return rc;
2030}
2031
2032static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2033{
2034 struct rhine_private *rp = netdev_priv(dev);
2035 int rc;
2036
2037 spin_lock_irq(&rp->lock);
2038 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2039 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02002040 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042 return rc;
2043}
2044
2045static int netdev_nway_reset(struct net_device *dev)
2046{
2047 struct rhine_private *rp = netdev_priv(dev);
2048
2049 return mii_nway_restart(&rp->mii_if);
2050}
2051
2052static u32 netdev_get_link(struct net_device *dev)
2053{
2054 struct rhine_private *rp = netdev_priv(dev);
2055
2056 return mii_link_ok(&rp->mii_if);
2057}
2058
2059static u32 netdev_get_msglevel(struct net_device *dev)
2060{
2061 return debug;
2062}
2063
2064static void netdev_set_msglevel(struct net_device *dev, u32 value)
2065{
2066 debug = value;
2067}
2068
2069static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2070{
2071 struct rhine_private *rp = netdev_priv(dev);
2072
2073 if (!(rp->quirks & rqWOL))
2074 return;
2075
2076 spin_lock_irq(&rp->lock);
2077 wol->supported = WAKE_PHY | WAKE_MAGIC |
2078 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2079 wol->wolopts = rp->wolopts;
2080 spin_unlock_irq(&rp->lock);
2081}
2082
2083static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2084{
2085 struct rhine_private *rp = netdev_priv(dev);
2086 u32 support = WAKE_PHY | WAKE_MAGIC |
2087 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2088
2089 if (!(rp->quirks & rqWOL))
2090 return -EINVAL;
2091
2092 if (wol->wolopts & ~support)
2093 return -EINVAL;
2094
2095 spin_lock_irq(&rp->lock);
2096 rp->wolopts = wol->wolopts;
2097 spin_unlock_irq(&rp->lock);
2098
2099 return 0;
2100}
2101
Jeff Garzik7282d492006-09-13 14:30:00 -04002102static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 .get_drvinfo = netdev_get_drvinfo,
2104 .get_settings = netdev_get_settings,
2105 .set_settings = netdev_set_settings,
2106 .nway_reset = netdev_nway_reset,
2107 .get_link = netdev_get_link,
2108 .get_msglevel = netdev_get_msglevel,
2109 .set_msglevel = netdev_set_msglevel,
2110 .get_wol = rhine_get_wol,
2111 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112};
2113
2114static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2115{
2116 struct rhine_private *rp = netdev_priv(dev);
2117 int rc;
2118
2119 if (!netif_running(dev))
2120 return -EINVAL;
2121
2122 spin_lock_irq(&rp->lock);
2123 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2124 spin_unlock_irq(&rp->lock);
Roger Luethi00b428c2006-03-28 20:53:56 +02002125 rhine_set_carrier(&rp->mii_if);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127 return rc;
2128}
2129
2130static int rhine_close(struct net_device *dev)
2131{
2132 struct rhine_private *rp = netdev_priv(dev);
2133 void __iomem *ioaddr = rp->base;
2134
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002135 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002136 cancel_work_sync(&rp->reset_task);
2137 netif_stop_queue(dev);
2138
2139 spin_lock_irq(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002142 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2143 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
2145 /* Switch to loopback mode to avoid hardware races. */
2146 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2147
2148 /* Disable interrupts by clearing the interrupt mask. */
2149 iowrite16(0x0000, ioaddr + IntrEnable);
2150
2151 /* Stop the chip's Tx and Rx processes. */
2152 iowrite16(CmdStop, ioaddr + ChipCmd);
2153
2154 spin_unlock_irq(&rp->lock);
2155
2156 free_irq(rp->pdev->irq, dev);
2157 free_rbufs(dev);
2158 free_tbufs(dev);
2159 free_ring(dev);
2160
2161 return 0;
2162}
2163
2164
2165static void __devexit rhine_remove_one(struct pci_dev *pdev)
2166{
2167 struct net_device *dev = pci_get_drvdata(pdev);
2168 struct rhine_private *rp = netdev_priv(dev);
2169
2170 unregister_netdev(dev);
2171
2172 pci_iounmap(pdev, rp->base);
2173 pci_release_regions(pdev);
2174
2175 free_netdev(dev);
2176 pci_disable_device(pdev);
2177 pci_set_drvdata(pdev, NULL);
2178}
2179
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002180static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 struct net_device *dev = pci_get_drvdata(pdev);
2183 struct rhine_private *rp = netdev_priv(dev);
2184 void __iomem *ioaddr = rp->base;
2185
2186 if (!(rp->quirks & rqWOL))
2187 return; /* Nothing to do for non-WOL adapters */
2188
2189 rhine_power_init(dev);
2190
2191 /* Make sure we use pattern 0, 1 and not 4, 5 */
2192 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002193 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
2195 if (rp->wolopts & WAKE_MAGIC) {
2196 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2197 /*
2198 * Turn EEPROM-controlled wake-up back on -- some hardware may
2199 * not cooperate otherwise.
2200 */
2201 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2202 }
2203
2204 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2205 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2206
2207 if (rp->wolopts & WAKE_PHY)
2208 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2209
2210 if (rp->wolopts & WAKE_UCAST)
2211 iowrite8(WOLucast, ioaddr + WOLcrSet);
2212
2213 if (rp->wolopts) {
2214 /* Enable legacy WOL (for old motherboards) */
2215 iowrite8(0x01, ioaddr + PwcfgSet);
2216 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2217 }
2218
2219 /* Hit power state D3 (sleep) */
Roger Luethib933b4d2006-08-14 23:00:21 -07002220 if (!avoid_D3)
2221 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223 /* TODO: Check use of pci_enable_wake() */
2224
2225}
2226
2227#ifdef CONFIG_PM
2228static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2229{
2230 struct net_device *dev = pci_get_drvdata(pdev);
2231 struct rhine_private *rp = netdev_priv(dev);
2232 unsigned long flags;
2233
2234 if (!netif_running(dev))
2235 return 0;
2236
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002237 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002238
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 netif_device_detach(dev);
2240 pci_save_state(pdev);
2241
2242 spin_lock_irqsave(&rp->lock, flags);
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002243 rhine_shutdown(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 spin_unlock_irqrestore(&rp->lock, flags);
2245
2246 free_irq(dev->irq, dev);
2247 return 0;
2248}
2249
2250static int rhine_resume(struct pci_dev *pdev)
2251{
2252 struct net_device *dev = pci_get_drvdata(pdev);
2253 struct rhine_private *rp = netdev_priv(dev);
2254 unsigned long flags;
2255 int ret;
2256
2257 if (!netif_running(dev))
2258 return 0;
2259
Roger Luethi38f49e82010-12-06 00:59:40 +00002260 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
Joe Perchesdf4511f2011-04-16 14:15:25 +00002261 netdev_err(dev, "request_irq failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
2263 ret = pci_set_power_state(pdev, PCI_D0);
2264 if (debug > 1)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002265 netdev_info(dev, "Entering power state D0 %s (%d)\n",
2266 ret ? "failed" : "succeeded", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
2268 pci_restore_state(pdev);
2269
2270 spin_lock_irqsave(&rp->lock, flags);
2271#ifdef USE_MMIO
2272 enable_mmio(rp->pioaddr, rp->quirks);
2273#endif
2274 rhine_power_init(dev);
2275 free_tbufs(dev);
2276 free_rbufs(dev);
2277 alloc_tbufs(dev);
2278 alloc_rbufs(dev);
2279 init_registers(dev);
2280 spin_unlock_irqrestore(&rp->lock, flags);
2281
2282 netif_device_attach(dev);
2283
2284 return 0;
2285}
2286#endif /* CONFIG_PM */
2287
2288static struct pci_driver rhine_driver = {
2289 .name = DRV_NAME,
2290 .id_table = rhine_pci_tbl,
2291 .probe = rhine_init_one,
2292 .remove = __devexit_p(rhine_remove_one),
2293#ifdef CONFIG_PM
2294 .suspend = rhine_suspend,
2295 .resume = rhine_resume,
2296#endif /* CONFIG_PM */
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002297 .shutdown = rhine_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298};
2299
Roger Luethie84df482007-03-06 19:57:37 +01002300static struct dmi_system_id __initdata rhine_dmi_table[] = {
2301 {
2302 .ident = "EPIA-M",
2303 .matches = {
2304 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2305 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2306 },
2307 },
2308 {
2309 .ident = "KV7",
2310 .matches = {
2311 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2312 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2313 },
2314 },
2315 { NULL }
2316};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318static int __init rhine_init(void)
2319{
2320/* when a module, this is printed whether or not devices are found in probe */
2321#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002322 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323#endif
Roger Luethie84df482007-03-06 19:57:37 +01002324 if (dmi_check_system(rhine_dmi_table)) {
2325 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002326 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002327 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002328 }
2329 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002330 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002331
Jeff Garzik29917622006-08-19 17:48:59 -04002332 return pci_register_driver(&rhine_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333}
2334
2335
2336static void __exit rhine_cleanup(void)
2337{
2338 pci_unregister_driver(&rhine_driver);
2339}
2340
2341
2342module_init(rhine_init);
2343module_exit(rhine_cleanup);