blob: 13cfcced212ecaa263819efcd7c0097cd9f08f02 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi207070f2013-09-21 14:24:11 +020035#define DRV_VERSION "1.5.1"
Roger Luethi38f49e82010-12-06 00:59:40 +000036#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
Francois Romieufc3e0f82012-01-07 22:39:37 +010042static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000048#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070051static int rx_copybreak = 1518;
52#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Roger Luethib933b4d2006-08-14 23:00:21 -070056/* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000058static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
63 */
64
65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67static const int multicast_filter_limit = 32;
68
69
70/* Operational parameters that are set at compile time. */
71
72/* Keep the ring sizes a power of two for compile efficiency.
73 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 Making the Tx ring too large decreases the effectiveness of channel
75 bonding and packet priority.
76 There are no ill effects from too-large receive rings. */
77#define TX_RING_SIZE 16
78#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070079#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/* Operational parameters that usually are not changed. */
82
83/* Time in jiffies before concluding the transmitter is hung. */
84#define TX_TIMEOUT (2*HZ)
85
86#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
87
88#include <linux/module.h>
89#include <linux/moduleparam.h>
90#include <linux/kernel.h>
91#include <linux/string.h>
92#include <linux/timer.h>
93#include <linux/errno.h>
94#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/interrupt.h>
96#include <linux/pci.h>
Alexey Charkov2d283862014-04-22 19:28:09 +040097#include <linux/of_address.h>
98#include <linux/of_device.h>
99#include <linux/of_irq.h>
100#include <linux/platform_device.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400101#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <linux/netdevice.h>
103#include <linux/etherdevice.h>
104#include <linux/skbuff.h>
105#include <linux/init.h>
106#include <linux/delay.h>
107#include <linux/mii.h>
108#include <linux/ethtool.h>
109#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000110#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800112#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <asm/processor.h> /* Processor type for cache alignment. */
114#include <asm/io.h>
115#include <asm/irq.h>
116#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100117#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119/* These identify the driver base version and may not be removed. */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500120static const char version[] =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000121 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123/* This driver was written to use PCI memory space. Some early versions
124 of the Rhine may only work correctly with I/O space accesses. */
125#ifdef CONFIG_VIA_RHINE_MMIO
126#define USE_MMIO
127#else
128#endif
129
130MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
131MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
132MODULE_LICENSE("GPL");
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134module_param(debug, int, 0);
135module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700136module_param(avoid_D3, bool, 0);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100137MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700139MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Roger Luethi38f49e82010-12-06 00:59:40 +0000141#define MCAM_SIZE 32
142#define VCAM_SIZE 32
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144/*
145 Theory of Operation
146
147I. Board Compatibility
148
149This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
150controller.
151
152II. Board-specific settings
153
154Boards with this chip are functional only in a bus-master PCI slot.
155
156Many operational settings are loaded from the EEPROM to the Config word at
157offset 0x78. For most of these settings, this driver assumes that they are
158correct.
159If this driver is compiled to use PCI memory space operations the EEPROM
160must be configured to enable memory ops.
161
162III. Driver operation
163
164IIIa. Ring buffers
165
166This driver uses two statically allocated fixed-size descriptor lists
167formed into rings by a branch from the final descriptor to the beginning of
168the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
169
170IIIb/c. Transmit/Receive Structure
171
172This driver attempts to use a zero-copy receive and transmit scheme.
173
174Alas, all data buffers are required to start on a 32 bit boundary, so
175the driver must often copy transmit packets into bounce buffers.
176
177The driver allocates full frame size skbuffs for the Rx ring buffers at
178open() time and passes the skb->data field to the chip as receive data
179buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
180a fresh skbuff is allocated and the frame is copied to the new skbuff.
181When the incoming frame is larger, the skbuff is passed directly up the
182protocol stack. Buffers consumed this way are replaced by newly allocated
183skbuffs in the last phase of rhine_rx().
184
185The RX_COPYBREAK value is chosen to trade-off the memory wasted by
186using a full-sized skbuff for small frames vs. the copying costs of larger
187frames. New boards are typically used in generously configured machines
188and the underfilled buffers have negligible impact compared to the benefit of
189a single allocation size, so the default value of zero results in never
190copying packets. When copying is done, the cost is usually mitigated by using
191a combined copy/checksum routine. Copying also preloads the cache, which is
192most useful with small frames.
193
194Since the VIA chips are only able to transfer data to buffers on 32 bit
195boundaries, the IP header at offset 14 in an ethernet frame isn't
196longword aligned for further processing. Copying these unaligned buffers
197has the beneficial effect of 16-byte aligning the IP header.
198
199IIId. Synchronization
200
201The driver runs as two independent, single-threaded flows of control. One
202is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800203netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
204which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800207netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
208the ring is not available it stops the transmit queue by
209calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211The interrupt handler has exclusive control over the Rx ring and records stats
212from the Tx ring. After reaping the stats, it marks the Tx queue entry as
213empty by incrementing the dirty_tx mark. If at least half of the entries in
214the Rx ring are available the transmit queue is woken up if it was stopped.
215
216IV. Notes
217
218IVb. References
219
220Preliminary VT86C100A manual from http://www.via.com.tw/
221http://www.scyld.com/expert/100mbps.html
222http://www.scyld.com/expert/NWay.html
223ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
224ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
225
226
227IVc. Errata
228
229The VT86C100A manual is not reliable information.
230The 3043 chip does not handle unaligned transmit or receive buffers, resulting
231in significant performance degradation for bounce buffer copies on transmit
232and unaligned IP headers on receive.
233The chip does not pad to minimum transmit length.
234
235*/
236
237
238/* This table drives the PCI probe routines. It's mostly boilerplate in all
239 of the drivers, and will likely be provided by some future kernel.
240 Note the matching code -- the first table entry matchs all 56** cards but
241 second only the 1234 card.
242*/
243
244enum rhine_revs {
245 VT86C100A = 0x00,
246 VTunknown0 = 0x20,
247 VT6102 = 0x40,
248 VT8231 = 0x50, /* Integrated MAC */
249 VT8233 = 0x60, /* Integrated MAC */
250 VT8235 = 0x74, /* Integrated MAC */
251 VT8237 = 0x78, /* Integrated MAC */
252 VTunknown1 = 0x7C,
253 VT6105 = 0x80,
254 VT6105_B0 = 0x83,
255 VT6105L = 0x8A,
256 VT6107 = 0x8C,
257 VTunknown2 = 0x8E,
258 VT6105M = 0x90, /* Management adapter */
259};
260
261enum rhine_quirks {
262 rqWOL = 0x0001, /* Wake-On-LAN support */
263 rqForceReset = 0x0002,
264 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
265 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
266 rqRhineI = 0x0100, /* See comment below */
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400267 rqIntPHY = 0x0200, /* Integrated PHY */
268 rqMgmt = 0x0400, /* Management adapter */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269};
270/*
271 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
272 * MMIO as well as for the collision counter and the Tx FIFO underflow
273 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
274 */
275
276/* Beware of PCI posted writes */
277#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
278
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000279static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400280 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
281 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
282 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
283 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 { } /* terminate list */
285};
286MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
287
Alexey Charkov2d283862014-04-22 19:28:09 +0400288/* OpenFirmware identifiers for platform-bus devices
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400289 * The .data field is currently only used to store quirks
Alexey Charkov2d283862014-04-22 19:28:09 +0400290 */
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400291static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
Alexey Charkov2d283862014-04-22 19:28:09 +0400292static struct of_device_id rhine_of_tbl[] = {
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400293 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
Alexey Charkov2d283862014-04-22 19:28:09 +0400294 { } /* terminate list */
295};
296MODULE_DEVICE_TABLE(of, rhine_of_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298/* Offsets to the device registers. */
299enum register_offsets {
300 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000301 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 IntrStatus=0x0C, IntrEnable=0x0E,
303 MulticastFilter0=0x10, MulticastFilter1=0x14,
304 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000305 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
307 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
308 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
309 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000310 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
312 WOLcrClr1=0xA6, WOLcgClr=0xA7,
313 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
314};
315
316/* Bits in ConfigD */
317enum backoff_bits {
318 BackOptional=0x01, BackModify=0x02,
319 BackCaptureEffect=0x04, BackRandom=0x08
320};
321
Roger Luethi38f49e82010-12-06 00:59:40 +0000322/* Bits in the TxConfig (TCR) register */
323enum tcr_bits {
324 TCR_PQEN=0x01,
325 TCR_LB0=0x02, /* loopback[0] */
326 TCR_LB1=0x04, /* loopback[1] */
327 TCR_OFSET=0x08,
328 TCR_RTGOPT=0x10,
329 TCR_RTFT0=0x20,
330 TCR_RTFT1=0x40,
331 TCR_RTSF=0x80,
332};
333
334/* Bits in the CamCon (CAMC) register */
335enum camcon_bits {
336 CAMC_CAMEN=0x01,
337 CAMC_VCAMSL=0x02,
338 CAMC_CAMWR=0x04,
339 CAMC_CAMRD=0x08,
340};
341
342/* Bits in the PCIBusConfig1 (BCR1) register */
343enum bcr1_bits {
344 BCR1_POT0=0x01,
345 BCR1_POT1=0x02,
346 BCR1_POT2=0x04,
347 BCR1_CTFT0=0x08,
348 BCR1_CTFT1=0x10,
349 BCR1_CTSF=0x20,
350 BCR1_TXQNOBK=0x40, /* for VT6105 */
351 BCR1_VIDFR=0x80, /* for VT6105 */
352 BCR1_MED0=0x40, /* for VT6102 */
353 BCR1_MED1=0x80, /* for VT6102 */
354};
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356#ifdef USE_MMIO
357/* Registers we check that mmio and reg are the same. */
358static const int mmio_verify_registers[] = {
359 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
360 0
361};
362#endif
363
364/* Bits in the interrupt status/mask registers. */
365enum intr_status_bits {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100366 IntrRxDone = 0x0001,
367 IntrTxDone = 0x0002,
368 IntrRxErr = 0x0004,
369 IntrTxError = 0x0008,
370 IntrRxEmpty = 0x0020,
371 IntrPCIErr = 0x0040,
372 IntrStatsMax = 0x0080,
373 IntrRxEarly = 0x0100,
374 IntrTxUnderrun = 0x0210,
375 IntrRxOverflow = 0x0400,
376 IntrRxDropped = 0x0800,
377 IntrRxNoBuf = 0x1000,
378 IntrTxAborted = 0x2000,
379 IntrLinkChange = 0x4000,
380 IntrRxWakeUp = 0x8000,
381 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
382 IntrNormalSummary = IntrRxDone | IntrTxDone,
383 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
384 IntrTxUnderrun,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385};
386
387/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
388enum wol_bits {
389 WOLucast = 0x10,
390 WOLmagic = 0x20,
391 WOLbmcast = 0x30,
392 WOLlnkon = 0x40,
393 WOLlnkoff = 0x80,
394};
395
396/* The Rx and Tx buffer descriptors. */
397struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400398 __le32 rx_status;
399 __le32 desc_length; /* Chain flag, Buffer/frame length */
400 __le32 addr;
401 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402};
403struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400404 __le32 tx_status;
405 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
406 __le32 addr;
407 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408};
409
410/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
411#define TXDESC 0x00e08000
412
413enum rx_status_bits {
414 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
415};
416
417/* Bits in *_desc.*_status */
418enum desc_status_bits {
419 DescOwn=0x80000000
420};
421
Roger Luethi38f49e82010-12-06 00:59:40 +0000422/* Bits in *_desc.*_length */
423enum desc_length_bits {
424 DescTag=0x00010000
425};
426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427/* Bits in ChipCmd. */
428enum chip_cmd_bits {
429 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
430 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
431 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
432 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
433};
434
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000435struct rhine_stats {
436 u64 packets;
437 u64 bytes;
438 struct u64_stats_sync syncp;
439};
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000442 /* Bit mask for configured VLAN ids */
443 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 /* Descriptor rings */
446 struct rx_desc *rx_ring;
447 struct tx_desc *tx_ring;
448 dma_addr_t rx_ring_dma;
449 dma_addr_t tx_ring_dma;
450
451 /* The addresses of receive-in-place skbuffs. */
452 struct sk_buff *rx_skbuff[RX_RING_SIZE];
453 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
454
455 /* The saved address of a sent-in-place packet/buffer, for later free(). */
456 struct sk_buff *tx_skbuff[TX_RING_SIZE];
457 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
458
Roger Luethi4be5de22006-04-04 20:49:16 +0200459 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 unsigned char *tx_buf[TX_RING_SIZE];
461 unsigned char *tx_bufs;
462 dma_addr_t tx_bufs_dma;
463
Alexey Charkovf7630d12014-04-22 19:28:08 +0400464 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700466 struct net_device *dev;
467 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 spinlock_t lock;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100469 struct mutex task_lock;
470 bool task_enable;
471 struct work_struct slow_event_task;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800472 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Francois Romieufc3e0f82012-01-07 22:39:37 +0100474 u32 msg_enable;
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 /* Frequently used values: keep some adjacent for cache effect. */
477 u32 quirks;
478 struct rx_desc *rx_head_desc;
479 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
480 unsigned int cur_tx, dirty_tx;
481 unsigned int rx_buf_sz; /* Based on MTU+slack. */
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000482 struct rhine_stats rx_stats;
483 struct rhine_stats tx_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 u8 wolopts;
485
486 u8 tx_thresh, rx_thresh;
487
488 struct mii_if_info mii_if;
489 void __iomem *base;
490};
491
Roger Luethi38f49e82010-12-06 00:59:40 +0000492#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
493#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
494#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
495
496#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
497#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
498#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
499
500#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
501#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
502#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
503
504#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
505#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
506#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
507
508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509static int mdio_read(struct net_device *dev, int phy_id, int location);
510static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
511static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800512static void rhine_reset_task(struct work_struct *work);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100513static void rhine_slow_event_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000515static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
516 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100517static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700519static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520static void rhine_set_rx_mode(struct net_device *dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000521static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
522 struct rtnl_link_stats64 *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400524static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525static int rhine_close(struct net_device *dev);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000526static int rhine_vlan_rx_add_vid(struct net_device *dev,
527 __be16 proto, u16 vid);
528static int rhine_vlan_rx_kill_vid(struct net_device *dev,
529 __be16 proto, u16 vid);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100530static void rhine_restart_tx(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000532static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
Francois Romieua384a332012-01-07 22:19:36 +0100533{
534 void __iomem *ioaddr = rp->base;
535 int i;
536
537 for (i = 0; i < 1024; i++) {
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000538 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
539
540 if (low ^ has_mask_bits)
Francois Romieua384a332012-01-07 22:19:36 +0100541 break;
542 udelay(10);
543 }
544 if (i > 64) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100545 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000546 "count: %04d\n", low ? "low" : "high", reg, mask, i);
Francois Romieua384a332012-01-07 22:19:36 +0100547 }
548}
549
550static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
551{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000552 rhine_wait_bit(rp, reg, mask, false);
Francois Romieua384a332012-01-07 22:19:36 +0100553}
554
555static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
556{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000557 rhine_wait_bit(rp, reg, mask, true);
Francois Romieua384a332012-01-07 22:19:36 +0100558}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Francois Romieua20a28b2011-12-30 14:53:58 +0100560static u32 rhine_get_events(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 void __iomem *ioaddr = rp->base;
563 u32 intr_status;
564
565 intr_status = ioread16(ioaddr + IntrStatus);
566 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
567 if (rp->quirks & rqStatusWBRace)
568 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
569 return intr_status;
570}
571
Francois Romieua20a28b2011-12-30 14:53:58 +0100572static void rhine_ack_events(struct rhine_private *rp, u32 mask)
573{
574 void __iomem *ioaddr = rp->base;
575
576 if (rp->quirks & rqStatusWBRace)
577 iowrite8(mask >> 16, ioaddr + IntrStatus2);
578 iowrite16(mask, ioaddr + IntrStatus);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100579 mmiowb();
Francois Romieua20a28b2011-12-30 14:53:58 +0100580}
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582/*
583 * Get power related registers into sane state.
584 * Notify user about past WOL event.
585 */
586static void rhine_power_init(struct net_device *dev)
587{
588 struct rhine_private *rp = netdev_priv(dev);
589 void __iomem *ioaddr = rp->base;
590 u16 wolstat;
591
592 if (rp->quirks & rqWOL) {
593 /* Make sure chip is in power state D0 */
594 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
595
596 /* Disable "force PME-enable" */
597 iowrite8(0x80, ioaddr + WOLcgClr);
598
599 /* Clear power-event config bits (WOL) */
600 iowrite8(0xFF, ioaddr + WOLcrClr);
601 /* More recent cards can manage two additional patterns */
602 if (rp->quirks & rq6patterns)
603 iowrite8(0x03, ioaddr + WOLcrClr1);
604
605 /* Save power-event status bits */
606 wolstat = ioread8(ioaddr + PwrcsrSet);
607 if (rp->quirks & rq6patterns)
608 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
609
610 /* Clear power-event status bits */
611 iowrite8(0xFF, ioaddr + PwrcsrClr);
612 if (rp->quirks & rq6patterns)
613 iowrite8(0x03, ioaddr + PwrcsrClr1);
614
615 if (wolstat) {
616 char *reason;
617 switch (wolstat) {
618 case WOLmagic:
619 reason = "Magic packet";
620 break;
621 case WOLlnkon:
622 reason = "Link went up";
623 break;
624 case WOLlnkoff:
625 reason = "Link went down";
626 break;
627 case WOLucast:
628 reason = "Unicast packet";
629 break;
630 case WOLbmcast:
631 reason = "Multicast/broadcast packet";
632 break;
633 default:
634 reason = "Unknown";
635 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000636 netdev_info(dev, "Woke system up. Reason: %s\n",
637 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 }
639 }
640}
641
642static void rhine_chip_reset(struct net_device *dev)
643{
644 struct rhine_private *rp = netdev_priv(dev);
645 void __iomem *ioaddr = rp->base;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100646 u8 cmd1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
648 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
649 IOSYNC;
650
651 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000652 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654 /* Force reset */
655 if (rp->quirks & rqForceReset)
656 iowrite8(0x40, ioaddr + MiscCmd);
657
658 /* Reset can take somewhat longer (rare) */
Francois Romieua384a332012-01-07 22:19:36 +0100659 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 }
661
Francois Romieufc3e0f82012-01-07 22:39:37 +0100662 cmd1 = ioread8(ioaddr + ChipCmd1);
663 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
664 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665}
666
667#ifdef USE_MMIO
668static void enable_mmio(long pioaddr, u32 quirks)
669{
670 int n;
671 if (quirks & rqRhineI) {
672 /* More recent docs say that this bit is reserved ... */
673 n = inb(pioaddr + ConfigA) | 0x20;
674 outb(n, pioaddr + ConfigA);
675 } else {
676 n = inb(pioaddr + ConfigD) | 0x80;
677 outb(n, pioaddr + ConfigD);
678 }
679}
680#endif
681
682/*
683 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
684 * (plus 0x6C for Rhine-I/II)
685 */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500686static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
688 struct rhine_private *rp = netdev_priv(dev);
689 void __iomem *ioaddr = rp->base;
Francois Romieua384a332012-01-07 22:19:36 +0100690 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 outb(0x20, pioaddr + MACRegEEcsr);
Francois Romieua384a332012-01-07 22:19:36 +0100693 for (i = 0; i < 1024; i++) {
694 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
695 break;
696 }
697 if (i > 512)
698 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700#ifdef USE_MMIO
701 /*
702 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
703 * MMIO. If reloading EEPROM was done first this could be avoided, but
704 * it is not known if that still works with the "win98-reboot" problem.
705 */
706 enable_mmio(pioaddr, rp->quirks);
707#endif
708
709 /* Turn off EEPROM-controlled wake-up (magic packet) */
710 if (rp->quirks & rqWOL)
711 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
712
713}
714
715#ifdef CONFIG_NET_POLL_CONTROLLER
716static void rhine_poll(struct net_device *dev)
717{
Francois Romieu05d334e2012-03-09 15:28:18 +0100718 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +0400719 const int irq = rp->irq;
Francois Romieu05d334e2012-03-09 15:28:18 +0100720
721 disable_irq(irq);
722 rhine_interrupt(irq, dev);
723 enable_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724}
725#endif
726
Francois Romieu269f3112011-12-30 14:43:54 +0100727static void rhine_kick_tx_threshold(struct rhine_private *rp)
728{
729 if (rp->tx_thresh < 0xe0) {
730 void __iomem *ioaddr = rp->base;
731
732 rp->tx_thresh += 0x20;
733 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
734 }
735}
736
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100737static void rhine_tx_err(struct rhine_private *rp, u32 status)
738{
739 struct net_device *dev = rp->dev;
740
741 if (status & IntrTxAborted) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100742 netif_info(rp, tx_err, dev,
743 "Abort %08x, frame dropped\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100744 }
745
746 if (status & IntrTxUnderrun) {
747 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100748 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
749 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100750 }
751
Francois Romieufc3e0f82012-01-07 22:39:37 +0100752 if (status & IntrTxDescRace)
753 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100754
755 if ((status & IntrTxError) &&
756 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
757 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100758 netif_info(rp, tx_err, dev, "Unspecified error. "
759 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100760 }
761
762 rhine_restart_tx(dev);
763}
764
765static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
766{
767 void __iomem *ioaddr = rp->base;
768 struct net_device_stats *stats = &rp->dev->stats;
769
770 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
771 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
772
773 /*
774 * Clears the "tally counters" for CRC errors and missed frames(?).
775 * It has been reported that some chips need a write of 0 to clear
776 * these, for others the counters are set to 1 when written to and
777 * instead cleared when read. So we clear them both ways ...
778 */
779 iowrite32(0, ioaddr + RxMissed);
780 ioread16(ioaddr + RxCRCErrs);
781 ioread16(ioaddr + RxMissed);
782}
783
784#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
785 IntrRxErr | \
786 IntrRxEmpty | \
787 IntrRxOverflow | \
788 IntrRxDropped | \
789 IntrRxNoBuf | \
790 IntrRxWakeUp)
791
792#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
793 IntrTxAborted | \
794 IntrTxUnderrun | \
795 IntrTxDescRace)
796#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
797
798#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
799 RHINE_EVENT_NAPI_TX | \
800 IntrStatsMax)
801#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
802#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
803
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700804static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700805{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700806 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
807 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700808 void __iomem *ioaddr = rp->base;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100809 u16 enable_mask = RHINE_EVENT & 0xffff;
810 int work_done = 0;
811 u32 status;
Roger Luethi633949a2006-08-14 23:00:17 -0700812
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100813 status = rhine_get_events(rp);
814 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
815
816 if (status & RHINE_EVENT_NAPI_RX)
817 work_done += rhine_rx(dev, budget);
818
819 if (status & RHINE_EVENT_NAPI_TX) {
820 if (status & RHINE_EVENT_NAPI_TX_ERR) {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100821 /* Avoid scavenging before Tx engine turned off */
Francois Romieua384a332012-01-07 22:19:36 +0100822 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100823 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
824 netif_warn(rp, tx_err, dev, "Tx still on\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100825 }
Francois Romieufc3e0f82012-01-07 22:39:37 +0100826
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100827 rhine_tx(dev);
828
829 if (status & RHINE_EVENT_NAPI_TX_ERR)
830 rhine_tx_err(rp, status);
831 }
832
833 if (status & IntrStatsMax) {
834 spin_lock(&rp->lock);
835 rhine_update_rx_crc_and_missed_errord(rp);
836 spin_unlock(&rp->lock);
837 }
838
839 if (status & RHINE_EVENT_SLOW) {
840 enable_mask &= ~RHINE_EVENT_SLOW;
841 schedule_work(&rp->slow_event_task);
842 }
Roger Luethi633949a2006-08-14 23:00:17 -0700843
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700844 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800845 napi_complete(napi);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100846 iowrite16(enable_mask, ioaddr + IntrEnable);
847 mmiowb();
Roger Luethi633949a2006-08-14 23:00:17 -0700848 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700849 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700850}
Roger Luethi633949a2006-08-14 23:00:17 -0700851
Bill Pemberton76e239e2012-12-03 09:23:48 -0500852static void rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 struct rhine_private *rp = netdev_priv(dev);
855
856 /* Reset the chip to erase previous misconfiguration. */
857 rhine_chip_reset(dev);
858
859 /* Rhine-I needs extra time to recuperate before EEPROM reload */
860 if (rp->quirks & rqRhineI)
861 msleep(5);
862
863 /* Reload EEPROM controlled bytes cleared by soft reset */
Alexey Charkov2d283862014-04-22 19:28:09 +0400864 if (dev_is_pci(dev->dev.parent))
865 rhine_reload_eeprom(pioaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866}
867
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800868static const struct net_device_ops rhine_netdev_ops = {
869 .ndo_open = rhine_open,
870 .ndo_stop = rhine_close,
871 .ndo_start_xmit = rhine_start_tx,
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000872 .ndo_get_stats64 = rhine_get_stats64,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000873 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000874 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800875 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000876 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800877 .ndo_do_ioctl = netdev_ioctl,
878 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000879 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
880 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800881#ifdef CONFIG_NET_POLL_CONTROLLER
882 .ndo_poll_controller = rhine_poll,
883#endif
884};
885
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400886static int rhine_init_one_common(struct device *hwdev, u32 quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +0400887 long pioaddr, void __iomem *ioaddr, int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888{
889 struct net_device *dev;
890 struct rhine_private *rp;
Alexey Charkov2d283862014-04-22 19:28:09 +0400891 int i, rc, phy_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 /* this should always be supported */
Alexey Charkovf7630d12014-04-22 19:28:08 +0400895 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +0400897 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
Alexey Charkov2d283862014-04-22 19:28:09 +0400898 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 dev = alloc_etherdev(sizeof(struct rhine_private));
902 if (!dev) {
903 rc = -ENOMEM;
Alexey Charkov2d283862014-04-22 19:28:09 +0400904 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 }
Alexey Charkovf7630d12014-04-22 19:28:08 +0400906 SET_NETDEV_DEV(dev, hwdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
908 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700909 rp->dev = dev;
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400910 rp->quirks = quirks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 rp->pioaddr = pioaddr;
Alexey Charkov2d283862014-04-22 19:28:09 +0400912 rp->base = ioaddr;
913 rp->irq = irq;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100914 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400916 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
John Stultz827da442013-10-07 15:51:58 -0700918 u64_stats_init(&rp->tx_stats.syncp);
919 u64_stats_init(&rp->rx_stats.syncp);
920
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 /* Get chip registers into a sane state */
922 rhine_power_init(dev);
923 rhine_hw_init(dev, pioaddr);
924
925 for (i = 0; i < 6; i++)
926 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
927
Joe Perches482e3fe2011-04-16 14:15:26 +0000928 if (!is_valid_ether_addr(dev->dev_addr)) {
929 /* Report it and use a random ethernet address instead */
930 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000931 eth_hw_addr_random(dev);
Joe Perches482e3fe2011-04-16 14:15:26 +0000932 netdev_info(dev, "Using random MAC address: %pM\n",
933 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 }
935
936 /* For Rhine-I/II, phy_id is loaded from EEPROM */
937 if (!phy_id)
938 phy_id = ioread8(ioaddr + 0x6C);
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 spin_lock_init(&rp->lock);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100941 mutex_init(&rp->task_lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800942 INIT_WORK(&rp->reset_task, rhine_reset_task);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100943 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 rp->mii_if.dev = dev;
946 rp->mii_if.mdio_read = mdio_read;
947 rp->mii_if.mdio_write = mdio_write;
948 rp->mii_if.phy_id_mask = 0x1f;
949 rp->mii_if.reg_num_mask = 0x1f;
950
951 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800952 dev->netdev_ops = &rhine_netdev_ops;
wangweidonge76070f2014-03-17 15:52:17 +0800953 dev->ethtool_ops = &netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800955
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700956 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +0200957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 if (rp->quirks & rqRhineI)
959 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
960
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400961 if (rp->quirks & rqMgmt)
Patrick McHardyf6469682013-04-19 02:04:27 +0000962 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
963 NETIF_F_HW_VLAN_CTAG_RX |
964 NETIF_F_HW_VLAN_CTAG_FILTER;
Roger Luethi38f49e82010-12-06 00:59:40 +0000965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 /* dev->name not defined before register_netdev()! */
967 rc = register_netdev(dev);
968 if (rc)
Alexey Charkov2d283862014-04-22 19:28:09 +0400969 goto err_out_free_netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400971 if (rp->quirks & rqRhineI)
972 name = "Rhine";
973 else if (rp->quirks & rqStatusWBRace)
974 name = "Rhine II";
975 else if (rp->quirks & rqMgmt)
976 name = "Rhine III (Management Adapter)";
977 else
978 name = "Rhine III";
979
Joe Perchesdf4511f2011-04-16 14:15:25 +0000980 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
Alexey Charkov2d283862014-04-22 19:28:09 +0400981 name, (long)ioaddr, dev->dev_addr, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Alexey Charkovf7630d12014-04-22 19:28:08 +0400983 dev_set_drvdata(hwdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 {
986 u16 mii_cmd;
987 int mii_status = mdio_read(dev, phy_id, 1);
988 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
989 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
990 if (mii_status != 0xffff && mii_status != 0x0000) {
991 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +0000992 netdev_info(dev,
993 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
994 phy_id,
995 mii_status, rp->mii_if.advertising,
996 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 /* set IFF_RUNNING */
999 if (mii_status & BMSR_LSTATUS)
1000 netif_carrier_on(dev);
1001 else
1002 netif_carrier_off(dev);
1003
1004 }
1005 }
1006 rp->mii_if.phy_id = phy_id;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001007 if (avoid_D3)
1008 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 return 0;
1011
Alexey Charkov2d283862014-04-22 19:28:09 +04001012err_out_free_netdev:
1013 free_netdev(dev);
1014err_out:
1015 return rc;
1016}
1017
1018static int rhine_init_one_pci(struct pci_dev *pdev,
1019 const struct pci_device_id *ent)
1020{
1021 struct device *hwdev = &pdev->dev;
1022 int i, rc;
1023 long pioaddr, memaddr;
1024 void __iomem *ioaddr;
1025 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001026 u32 quirks;
Alexey Charkov2d283862014-04-22 19:28:09 +04001027#ifdef USE_MMIO
1028 int bar = 1;
1029#else
1030 int bar = 0;
1031#endif
1032
1033/* when built into the kernel, we only print version if device is found */
1034#ifndef MODULE
1035 pr_info_once("%s\n", version);
1036#endif
1037
1038 rc = pci_enable_device(pdev);
1039 if (rc)
1040 goto err_out;
1041
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001042 if (pdev->revision < VTunknown0) {
1043 quirks = rqRhineI;
1044 } else if (pdev->revision >= VT6102) {
1045 quirks = rqWOL | rqForceReset;
1046 if (pdev->revision < VT6105) {
1047 quirks |= rqStatusWBRace;
1048 } else {
1049 quirks |= rqIntPHY;
1050 if (pdev->revision >= VT6105_B0)
1051 quirks |= rq6patterns;
1052 if (pdev->revision >= VT6105M)
1053 quirks |= rqMgmt;
1054 }
1055 }
1056
Alexey Charkov2d283862014-04-22 19:28:09 +04001057 /* sanity check */
1058 if ((pci_resource_len(pdev, 0) < io_size) ||
1059 (pci_resource_len(pdev, 1) < io_size)) {
1060 rc = -EIO;
1061 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1062 goto err_out_pci_disable;
1063 }
1064
1065 pioaddr = pci_resource_start(pdev, 0);
1066 memaddr = pci_resource_start(pdev, 1);
1067
1068 pci_set_master(pdev);
1069
1070 rc = pci_request_regions(pdev, DRV_NAME);
1071 if (rc)
1072 goto err_out_pci_disable;
1073
1074 ioaddr = pci_iomap(pdev, bar, io_size);
1075 if (!ioaddr) {
1076 rc = -EIO;
1077 dev_err(hwdev,
1078 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1079 dev_name(hwdev), io_size, memaddr);
1080 goto err_out_free_res;
1081 }
1082
1083#ifdef USE_MMIO
1084 enable_mmio(pioaddr, quirks);
1085
1086 /* Check that selected MMIO registers match the PIO ones */
1087 i = 0;
1088 while (mmio_verify_registers[i]) {
1089 int reg = mmio_verify_registers[i++];
1090 unsigned char a = inb(pioaddr+reg);
1091 unsigned char b = readb(ioaddr+reg);
1092
1093 if (a != b) {
1094 rc = -EIO;
1095 dev_err(hwdev,
1096 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
1097 reg, a, b);
1098 goto err_out_unmap;
1099 }
1100 }
1101#endif /* USE_MMIO */
1102
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001103 rc = rhine_init_one_common(&pdev->dev, quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +04001104 pioaddr, ioaddr, pdev->irq);
1105 if (!rc)
1106 return 0;
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108err_out_unmap:
1109 pci_iounmap(pdev, ioaddr);
1110err_out_free_res:
1111 pci_release_regions(pdev);
Roger Luethiae996152014-03-18 18:14:01 +01001112err_out_pci_disable:
1113 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114err_out:
1115 return rc;
1116}
1117
Alexey Charkov2d283862014-04-22 19:28:09 +04001118static int rhine_init_one_platform(struct platform_device *pdev)
1119{
1120 const struct of_device_id *match;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001121 const u32 *quirks;
Alexey Charkov2d283862014-04-22 19:28:09 +04001122 int irq;
1123 struct resource *res;
1124 void __iomem *ioaddr;
1125
1126 match = of_match_device(rhine_of_tbl, &pdev->dev);
1127 if (!match)
1128 return -EINVAL;
1129
1130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1131 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1132 if (IS_ERR(ioaddr))
1133 return PTR_ERR(ioaddr);
1134
1135 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1136 if (!irq)
1137 return -EINVAL;
1138
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001139 quirks = match->data;
1140 if (!quirks)
Alexey Charkov2d283862014-04-22 19:28:09 +04001141 return -EINVAL;
1142
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001143 return rhine_init_one_common(&pdev->dev, *quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +04001144 (long)ioaddr, ioaddr, irq);
1145}
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147static int alloc_ring(struct net_device* dev)
1148{
1149 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001150 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 void *ring;
1152 dma_addr_t ring_dma;
1153
Alexey Charkovf7630d12014-04-22 19:28:08 +04001154 ring = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001155 RX_RING_SIZE * sizeof(struct rx_desc) +
1156 TX_RING_SIZE * sizeof(struct tx_desc),
1157 &ring_dma,
1158 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001160 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 return -ENOMEM;
1162 }
1163 if (rp->quirks & rqRhineI) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001164 rp->tx_bufs = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001165 PKT_BUF_SZ * TX_RING_SIZE,
1166 &rp->tx_bufs_dma,
1167 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 if (rp->tx_bufs == NULL) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001169 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001170 RX_RING_SIZE * sizeof(struct rx_desc) +
1171 TX_RING_SIZE * sizeof(struct tx_desc),
1172 ring, ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 return -ENOMEM;
1174 }
1175 }
1176
1177 rp->rx_ring = ring;
1178 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1179 rp->rx_ring_dma = ring_dma;
1180 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1181
1182 return 0;
1183}
1184
1185static void free_ring(struct net_device* dev)
1186{
1187 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001188 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
Alexey Charkovf7630d12014-04-22 19:28:08 +04001190 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001191 RX_RING_SIZE * sizeof(struct rx_desc) +
1192 TX_RING_SIZE * sizeof(struct tx_desc),
1193 rp->rx_ring, rp->rx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 rp->tx_ring = NULL;
1195
1196 if (rp->tx_bufs)
Alexey Charkovf7630d12014-04-22 19:28:08 +04001197 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001198 rp->tx_bufs, rp->tx_bufs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 rp->tx_bufs = NULL;
1201
1202}
1203
1204static void alloc_rbufs(struct net_device *dev)
1205{
1206 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001207 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 dma_addr_t next;
1209 int i;
1210
1211 rp->dirty_rx = rp->cur_rx = 0;
1212
1213 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1214 rp->rx_head_desc = &rp->rx_ring[0];
1215 next = rp->rx_ring_dma;
1216
1217 /* Init the ring entries */
1218 for (i = 0; i < RX_RING_SIZE; i++) {
1219 rp->rx_ring[i].rx_status = 0;
1220 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1221 next += sizeof(struct rx_desc);
1222 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1223 rp->rx_skbuff[i] = NULL;
1224 }
1225 /* Mark the last entry as wrapping the ring. */
1226 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1227
1228 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1229 for (i = 0; i < RX_RING_SIZE; i++) {
Kevin Lob26b5552008-08-27 11:35:09 +08001230 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 rp->rx_skbuff[i] = skb;
1232 if (skb == NULL)
1233 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 rp->rx_skbuff_dma[i] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001236 dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001237 DMA_FROM_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001238 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001239 rp->rx_skbuff_dma[i] = 0;
1240 dev_kfree_skb(skb);
1241 break;
1242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1244 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1245 }
1246 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1247}
1248
1249static void free_rbufs(struct net_device* dev)
1250{
1251 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001252 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 int i;
1254
1255 /* Free all the skbuffs in the Rx queue. */
1256 for (i = 0; i < RX_RING_SIZE; i++) {
1257 rp->rx_ring[i].rx_status = 0;
1258 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1259 if (rp->rx_skbuff[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001260 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 rp->rx_skbuff_dma[i],
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001262 rp->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 dev_kfree_skb(rp->rx_skbuff[i]);
1264 }
1265 rp->rx_skbuff[i] = NULL;
1266 }
1267}
1268
1269static void alloc_tbufs(struct net_device* dev)
1270{
1271 struct rhine_private *rp = netdev_priv(dev);
1272 dma_addr_t next;
1273 int i;
1274
1275 rp->dirty_tx = rp->cur_tx = 0;
1276 next = rp->tx_ring_dma;
1277 for (i = 0; i < TX_RING_SIZE; i++) {
1278 rp->tx_skbuff[i] = NULL;
1279 rp->tx_ring[i].tx_status = 0;
1280 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1281 next += sizeof(struct tx_desc);
1282 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001283 if (rp->quirks & rqRhineI)
1284 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 }
1286 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1287
1288}
1289
1290static void free_tbufs(struct net_device* dev)
1291{
1292 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001293 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 int i;
1295
1296 for (i = 0; i < TX_RING_SIZE; i++) {
1297 rp->tx_ring[i].tx_status = 0;
1298 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1299 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1300 if (rp->tx_skbuff[i]) {
1301 if (rp->tx_skbuff_dma[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001302 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 rp->tx_skbuff_dma[i],
1304 rp->tx_skbuff[i]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001305 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 }
1307 dev_kfree_skb(rp->tx_skbuff[i]);
1308 }
1309 rp->tx_skbuff[i] = NULL;
1310 rp->tx_buf[i] = NULL;
1311 }
1312}
1313
1314static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1315{
1316 struct rhine_private *rp = netdev_priv(dev);
1317 void __iomem *ioaddr = rp->base;
1318
Francois Romieufc3e0f82012-01-07 22:39:37 +01001319 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 if (rp->mii_if.full_duplex)
1322 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1323 ioaddr + ChipCmd1);
1324 else
1325 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1326 ioaddr + ChipCmd1);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001327
1328 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1329 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001330}
1331
1332/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001333static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001334{
Francois Romieufc3e0f82012-01-07 22:39:37 +01001335 struct net_device *dev = mii->dev;
1336 struct rhine_private *rp = netdev_priv(dev);
1337
Roger Luethi00b428c2006-03-28 20:53:56 +02001338 if (mii->force_media) {
1339 /* autoneg is off: Link is always assumed to be up */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001340 if (!netif_carrier_ok(dev))
1341 netif_carrier_on(dev);
1342 } else /* Let MMI library update carrier status */
1343 rhine_check_media(dev, 0);
1344
1345 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1346 mii->force_media, netif_carrier_ok(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347}
1348
Roger Luethi38f49e82010-12-06 00:59:40 +00001349/**
1350 * rhine_set_cam - set CAM multicast filters
1351 * @ioaddr: register block of this Rhine
1352 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1353 * @addr: multicast address (6 bytes)
1354 *
1355 * Load addresses into multicast filters.
1356 */
1357static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1358{
1359 int i;
1360
1361 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1362 wmb();
1363
1364 /* Paranoid -- idx out of range should never happen */
1365 idx &= (MCAM_SIZE - 1);
1366
1367 iowrite8((u8) idx, ioaddr + CamAddr);
1368
1369 for (i = 0; i < 6; i++, addr++)
1370 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1371 udelay(10);
1372 wmb();
1373
1374 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1375 udelay(10);
1376
1377 iowrite8(0, ioaddr + CamCon);
1378}
1379
1380/**
1381 * rhine_set_vlan_cam - set CAM VLAN filters
1382 * @ioaddr: register block of this Rhine
1383 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1384 * @addr: VLAN ID (2 bytes)
1385 *
1386 * Load addresses into VLAN filters.
1387 */
1388static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1389{
1390 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1391 wmb();
1392
1393 /* Paranoid -- idx out of range should never happen */
1394 idx &= (VCAM_SIZE - 1);
1395
1396 iowrite8((u8) idx, ioaddr + CamAddr);
1397
1398 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1399 udelay(10);
1400 wmb();
1401
1402 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1403 udelay(10);
1404
1405 iowrite8(0, ioaddr + CamCon);
1406}
1407
1408/**
1409 * rhine_set_cam_mask - set multicast CAM mask
1410 * @ioaddr: register block of this Rhine
1411 * @mask: multicast CAM mask
1412 *
1413 * Mask sets multicast filters active/inactive.
1414 */
1415static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1416{
1417 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1418 wmb();
1419
1420 /* write mask */
1421 iowrite32(mask, ioaddr + CamMask);
1422
1423 /* disable CAMEN */
1424 iowrite8(0, ioaddr + CamCon);
1425}
1426
1427/**
1428 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1429 * @ioaddr: register block of this Rhine
1430 * @mask: VLAN CAM mask
1431 *
1432 * Mask sets VLAN filters active/inactive.
1433 */
1434static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1435{
1436 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1437 wmb();
1438
1439 /* write mask */
1440 iowrite32(mask, ioaddr + CamMask);
1441
1442 /* disable CAMEN */
1443 iowrite8(0, ioaddr + CamCon);
1444}
1445
1446/**
1447 * rhine_init_cam_filter - initialize CAM filters
1448 * @dev: network device
1449 *
1450 * Initialize (disable) hardware VLAN and multicast support on this
1451 * Rhine.
1452 */
1453static void rhine_init_cam_filter(struct net_device *dev)
1454{
1455 struct rhine_private *rp = netdev_priv(dev);
1456 void __iomem *ioaddr = rp->base;
1457
1458 /* Disable all CAMs */
1459 rhine_set_vlan_cam_mask(ioaddr, 0);
1460 rhine_set_cam_mask(ioaddr, 0);
1461
1462 /* disable hardware VLAN support */
1463 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1464 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1465}
1466
1467/**
1468 * rhine_update_vcam - update VLAN CAM filters
1469 * @rp: rhine_private data of this Rhine
1470 *
1471 * Update VLAN CAM filters to match configuration change.
1472 */
1473static void rhine_update_vcam(struct net_device *dev)
1474{
1475 struct rhine_private *rp = netdev_priv(dev);
1476 void __iomem *ioaddr = rp->base;
1477 u16 vid;
1478 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1479 unsigned int i = 0;
1480
1481 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1482 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1483 vCAMmask |= 1 << i;
1484 if (++i >= VCAM_SIZE)
1485 break;
1486 }
1487 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1488}
1489
Patrick McHardy80d5c362013-04-19 02:04:28 +00001490static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001491{
1492 struct rhine_private *rp = netdev_priv(dev);
1493
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001494 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001495 set_bit(vid, rp->active_vlans);
1496 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001497 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001498 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001499}
1500
Patrick McHardy80d5c362013-04-19 02:04:28 +00001501static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001502{
1503 struct rhine_private *rp = netdev_priv(dev);
1504
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001505 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001506 clear_bit(vid, rp->active_vlans);
1507 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001508 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001509 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001510}
1511
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512static void init_registers(struct net_device *dev)
1513{
1514 struct rhine_private *rp = netdev_priv(dev);
1515 void __iomem *ioaddr = rp->base;
1516 int i;
1517
1518 for (i = 0; i < 6; i++)
1519 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1520
1521 /* Initialize other registers. */
1522 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1523 /* Configure initial FIFO thresholds. */
1524 iowrite8(0x20, ioaddr + TxConfig);
1525 rp->tx_thresh = 0x20;
1526 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1527
1528 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1529 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1530
1531 rhine_set_rx_mode(dev);
1532
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001533 if (rp->quirks & rqMgmt)
Roger Luethi38f49e82010-12-06 00:59:40 +00001534 rhine_init_cam_filter(dev);
1535
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001536 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001537
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001538 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
1540 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1541 ioaddr + ChipCmd);
1542 rhine_check_media(dev, 1);
1543}
1544
1545/* Enable MII link status auto-polling (required for IntrLinkChange) */
Francois Romieua384a332012-01-07 22:19:36 +01001546static void rhine_enable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547{
Francois Romieua384a332012-01-07 22:19:36 +01001548 void __iomem *ioaddr = rp->base;
1549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 iowrite8(0, ioaddr + MIICmd);
1551 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1552 iowrite8(0x80, ioaddr + MIICmd);
1553
Francois Romieua384a332012-01-07 22:19:36 +01001554 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1557}
1558
1559/* Disable MII link status auto-polling (required for MDIO access) */
Francois Romieua384a332012-01-07 22:19:36 +01001560static void rhine_disable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561{
Francois Romieua384a332012-01-07 22:19:36 +01001562 void __iomem *ioaddr = rp->base;
1563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 iowrite8(0, ioaddr + MIICmd);
1565
Francois Romieua384a332012-01-07 22:19:36 +01001566 if (rp->quirks & rqRhineI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1568
John W. Linville38bb6b22006-05-19 10:51:21 -04001569 /* Can be called from ISR. Evil. */
1570 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
1572 /* 0x80 must be set immediately before turning it off */
1573 iowrite8(0x80, ioaddr + MIICmd);
1574
Francois Romieua384a332012-01-07 22:19:36 +01001575 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
1577 /* Heh. Now clear 0x80 again. */
1578 iowrite8(0, ioaddr + MIICmd);
1579 }
1580 else
Francois Romieua384a332012-01-07 22:19:36 +01001581 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582}
1583
1584/* Read and write over the MII Management Data I/O (MDIO) interface. */
1585
1586static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1587{
1588 struct rhine_private *rp = netdev_priv(dev);
1589 void __iomem *ioaddr = rp->base;
1590 int result;
1591
Francois Romieua384a332012-01-07 22:19:36 +01001592 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
1594 /* rhine_disable_linkmon already cleared MIICmd */
1595 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1596 iowrite8(regnum, ioaddr + MIIRegAddr);
1597 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
Francois Romieua384a332012-01-07 22:19:36 +01001598 rhine_wait_bit_low(rp, MIICmd, 0x40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 result = ioread16(ioaddr + MIIData);
1600
Francois Romieua384a332012-01-07 22:19:36 +01001601 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 return result;
1603}
1604
1605static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1606{
1607 struct rhine_private *rp = netdev_priv(dev);
1608 void __iomem *ioaddr = rp->base;
1609
Francois Romieua384a332012-01-07 22:19:36 +01001610 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 /* rhine_disable_linkmon already cleared MIICmd */
1613 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1614 iowrite8(regnum, ioaddr + MIIRegAddr);
1615 iowrite16(value, ioaddr + MIIData);
1616 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
Francois Romieua384a332012-01-07 22:19:36 +01001617 rhine_wait_bit_low(rp, MIICmd, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
Francois Romieua384a332012-01-07 22:19:36 +01001619 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620}
1621
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001622static void rhine_task_disable(struct rhine_private *rp)
1623{
1624 mutex_lock(&rp->task_lock);
1625 rp->task_enable = false;
1626 mutex_unlock(&rp->task_lock);
1627
1628 cancel_work_sync(&rp->slow_event_task);
1629 cancel_work_sync(&rp->reset_task);
1630}
1631
1632static void rhine_task_enable(struct rhine_private *rp)
1633{
1634 mutex_lock(&rp->task_lock);
1635 rp->task_enable = true;
1636 mutex_unlock(&rp->task_lock);
1637}
1638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639static int rhine_open(struct net_device *dev)
1640{
1641 struct rhine_private *rp = netdev_priv(dev);
1642 void __iomem *ioaddr = rp->base;
1643 int rc;
1644
Alexey Charkovf7630d12014-04-22 19:28:08 +04001645 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 if (rc)
1647 return rc;
1648
Alexey Charkovf7630d12014-04-22 19:28:08 +04001649 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651 rc = alloc_ring(dev);
1652 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001653 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return rc;
1655 }
1656 alloc_rbufs(dev);
1657 alloc_tbufs(dev);
1658 rhine_chip_reset(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001659 rhine_task_enable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 init_registers(dev);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001661
1662 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1663 __func__, ioread16(ioaddr + ChipCmd),
1664 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
1666 netif_start_queue(dev);
1667
1668 return 0;
1669}
1670
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001671static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001673 struct rhine_private *rp = container_of(work, struct rhine_private,
1674 reset_task);
1675 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001677 mutex_lock(&rp->task_lock);
1678
1679 if (!rp->task_enable)
1680 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001682 napi_disable(&rp->napi);
Richard Weinbergera9265922014-01-14 22:46:36 +01001683 netif_tx_disable(dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001684 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
1686 /* clear all descriptors */
1687 free_tbufs(dev);
1688 free_rbufs(dev);
1689 alloc_tbufs(dev);
1690 alloc_rbufs(dev);
1691
1692 /* Reinitialize the hardware. */
1693 rhine_chip_reset(dev);
1694 init_registers(dev);
1695
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001696 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001698 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001699 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 netif_wake_queue(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001701
1702out_unlock:
1703 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704}
1705
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001706static void rhine_tx_timeout(struct net_device *dev)
1707{
1708 struct rhine_private *rp = netdev_priv(dev);
1709 void __iomem *ioaddr = rp->base;
1710
Joe Perchesdf4511f2011-04-16 14:15:25 +00001711 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1712 ioread16(ioaddr + IntrStatus),
1713 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001714
1715 schedule_work(&rp->reset_task);
1716}
1717
Stephen Hemminger613573252009-08-31 19:50:58 +00001718static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1719 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
1721 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001722 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 void __iomem *ioaddr = rp->base;
1724 unsigned entry;
1725
1726 /* Caution: the write order is important here, set the field
1727 with the "ownership" bits last. */
1728
1729 /* Calculate the next Tx descriptor entry. */
1730 entry = rp->cur_tx % TX_RING_SIZE;
1731
Herbert Xu5b057c62006-06-23 02:06:41 -07001732 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001733 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
1735 rp->tx_skbuff[entry] = skb;
1736
1737 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001738 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 /* Must use alignment buffer. */
1740 if (skb->len > PKT_BUF_SZ) {
1741 /* packet too long, drop it */
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001742 dev_kfree_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001744 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001745 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001747
1748 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001750 if (skb->len < ETH_ZLEN)
1751 memset(rp->tx_buf[entry] + skb->len, 0,
1752 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 rp->tx_skbuff_dma[entry] = 0;
1754 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1755 (rp->tx_buf[entry] -
1756 rp->tx_bufs));
1757 } else {
1758 rp->tx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001759 dma_map_single(hwdev, skb->data, skb->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001760 DMA_TO_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001761 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001762 dev_kfree_skb_any(skb);
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001763 rp->tx_skbuff_dma[entry] = 0;
1764 dev->stats.tx_dropped++;
1765 return NETDEV_TX_OK;
1766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1768 }
1769
1770 rp->tx_ring[entry].desc_length =
1771 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1772
Roger Luethi38f49e82010-12-06 00:59:40 +00001773 if (unlikely(vlan_tx_tag_present(skb))) {
Roger Luethi207070f2013-09-21 14:24:11 +02001774 u16 vid_pcp = vlan_tx_tag_get(skb);
1775
1776 /* drop CFI/DEI bit, register needs VID and PCP */
1777 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1778 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1779 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
Roger Luethi38f49e82010-12-06 00:59:40 +00001780 /* request tagging */
1781 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1782 }
1783 else
1784 rp->tx_ring[entry].tx_status = 0;
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 /* lock eth irq */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001788 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 wmb();
1790
1791 rp->cur_tx++;
1792
1793 /* Non-x86 Todo: explicitly flush cache lines here. */
1794
Roger Luethi38f49e82010-12-06 00:59:40 +00001795 if (vlan_tx_tag_present(skb))
1796 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1797 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 /* Wake the potentially-idle transmit channel */
1800 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1801 ioaddr + ChipCmd1);
1802 IOSYNC;
1803
1804 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1805 netif_stop_queue(dev);
1806
Francois Romieufc3e0f82012-01-07 22:39:37 +01001807 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1808 rp->cur_tx - 1, entry);
1809
Patrick McHardy6ed10652009-06-23 06:03:08 +00001810 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811}
1812
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001813static void rhine_irq_disable(struct rhine_private *rp)
1814{
1815 iowrite16(0x0000, rp->base + IntrEnable);
1816 mmiowb();
1817}
1818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819/* The interrupt handler does all of the Rx thread work and cleans up
1820 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001821static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822{
1823 struct net_device *dev = dev_instance;
1824 struct rhine_private *rp = netdev_priv(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001825 u32 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 int handled = 0;
1827
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001828 status = rhine_get_events(rp);
1829
Francois Romieufc3e0f82012-01-07 22:39:37 +01001830 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001831
1832 if (status & RHINE_EVENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 handled = 1;
1834
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001835 rhine_irq_disable(rp);
1836 napi_schedule(&rp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 }
1838
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001839 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001840 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1841 status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001842 }
1843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 return IRQ_RETVAL(handled);
1845}
1846
1847/* This routine is logically part of the interrupt handler, but isolated
1848 for clarity. */
1849static void rhine_tx(struct net_device *dev)
1850{
1851 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001852 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1854
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 /* find and cleanup dirty tx descriptors */
1856 while (rp->dirty_tx != rp->cur_tx) {
1857 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001858 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1859 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 if (txstatus & DescOwn)
1861 break;
1862 if (txstatus & 0x8000) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001863 netif_dbg(rp, tx_done, dev,
1864 "Transmit error, Tx status %08x\n", txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001865 dev->stats.tx_errors++;
1866 if (txstatus & 0x0400)
1867 dev->stats.tx_carrier_errors++;
1868 if (txstatus & 0x0200)
1869 dev->stats.tx_window_errors++;
1870 if (txstatus & 0x0100)
1871 dev->stats.tx_aborted_errors++;
1872 if (txstatus & 0x0080)
1873 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1875 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001876 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1878 break; /* Keep the skb - we try again */
1879 }
1880 /* Transmitter restarted in 'abnormal' handler. */
1881 } else {
1882 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001883 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001885 dev->stats.collisions += txstatus & 0x0F;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001886 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1887 (txstatus >> 3) & 0xF, txstatus & 0xF);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001888
1889 u64_stats_update_begin(&rp->tx_stats.syncp);
1890 rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1891 rp->tx_stats.packets++;
1892 u64_stats_update_end(&rp->tx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 }
1894 /* Free the original skb. */
1895 if (rp->tx_skbuff_dma[entry]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001896 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 rp->tx_skbuff_dma[entry],
1898 rp->tx_skbuff[entry]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001899 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 }
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001901 dev_consume_skb_any(rp->tx_skbuff[entry]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 rp->tx_skbuff[entry] = NULL;
1903 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1904 }
1905 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1906 netif_wake_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907}
1908
Roger Luethi38f49e82010-12-06 00:59:40 +00001909/**
1910 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1911 * @skb: pointer to sk_buff
1912 * @data_size: used data area of the buffer including CRC
1913 *
1914 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1915 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1916 * aligned following the CRC.
1917 */
1918static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1919{
1920 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00001921 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00001922}
1923
Roger Luethi633949a2006-08-14 23:00:17 -07001924/* Process up to limit frames from receive ring */
1925static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001928 struct device *hwdev = dev->dev.parent;
Roger Luethi633949a2006-08-14 23:00:17 -07001929 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Francois Romieufc3e0f82012-01-07 22:39:37 +01001932 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1933 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
1935 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001936 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 struct rx_desc *desc = rp->rx_head_desc;
1938 u32 desc_status = le32_to_cpu(desc->rx_status);
Roger Luethi38f49e82010-12-06 00:59:40 +00001939 u32 desc_length = le32_to_cpu(desc->desc_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 int data_size = desc_status >> 16;
1941
Roger Luethi633949a2006-08-14 23:00:17 -07001942 if (desc_status & DescOwn)
1943 break;
1944
Francois Romieufc3e0f82012-01-07 22:39:37 +01001945 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1946 desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1949 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001950 netdev_warn(dev,
1951 "Oversized Ethernet frame spanned multiple buffers, "
1952 "entry %#x length %d status %08x!\n",
1953 entry, data_size,
1954 desc_status);
1955 netdev_warn(dev,
1956 "Oversized Ethernet frame %p vs %p\n",
1957 rp->rx_head_desc,
1958 &rp->rx_ring[entry]);
Eric Dumazet553e2332009-05-27 10:34:50 +00001959 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 } else if (desc_status & RxErr) {
1961 /* There was a error. */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001962 netif_dbg(rp, rx_err, dev,
1963 "%s() Rx error %08x\n", __func__,
1964 desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001965 dev->stats.rx_errors++;
1966 if (desc_status & 0x0030)
1967 dev->stats.rx_length_errors++;
1968 if (desc_status & 0x0048)
1969 dev->stats.rx_fifo_errors++;
1970 if (desc_status & 0x0004)
1971 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 if (desc_status & 0x0002) {
1973 /* this can also be updated outside the interrupt handler */
1974 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00001975 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 spin_unlock(&rp->lock);
1977 }
1978 }
1979 } else {
Eric Dumazet89d71a62009-10-13 05:34:20 +00001980 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 /* Length should omit the CRC */
1982 int pkt_len = data_size - 4;
Roger Luethi38f49e82010-12-06 00:59:40 +00001983 u16 vlan_tci = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984
1985 /* Check if the packet is long enough to accept without
1986 copying to a minimally-sized skbuff. */
Eric Dumazet89d71a62009-10-13 05:34:20 +00001987 if (pkt_len < rx_copybreak)
1988 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1989 if (skb) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001990 dma_sync_single_for_cpu(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001991 rp->rx_skbuff_dma[entry],
1992 rp->rx_buf_sz,
1993 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001995 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07001996 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001997 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001999 dma_sync_single_for_device(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002000 rp->rx_skbuff_dma[entry],
2001 rp->rx_buf_sz,
2002 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 } else {
2004 skb = rp->rx_skbuff[entry];
2005 if (skb == NULL) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00002006 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 break;
2008 }
2009 rp->rx_skbuff[entry] = NULL;
2010 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002011 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 rp->rx_skbuff_dma[entry],
2013 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002014 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002016
2017 if (unlikely(desc_length & DescTag))
2018 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00002021
2022 if (unlikely(desc_length & DescTag))
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002023 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Roger Luethi633949a2006-08-14 23:00:17 -07002024 netif_receive_skb(skb);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002025
2026 u64_stats_update_begin(&rp->rx_stats.syncp);
2027 rp->rx_stats.bytes += pkt_len;
2028 rp->rx_stats.packets++;
2029 u64_stats_update_end(&rp->rx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 }
2031 entry = (++rp->cur_rx) % RX_RING_SIZE;
2032 rp->rx_head_desc = &rp->rx_ring[entry];
2033 }
2034
2035 /* Refill the Rx ring buffers. */
2036 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2037 struct sk_buff *skb;
2038 entry = rp->dirty_rx % RX_RING_SIZE;
2039 if (rp->rx_skbuff[entry] == NULL) {
Kevin Lob26b5552008-08-27 11:35:09 +08002040 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 rp->rx_skbuff[entry] = skb;
2042 if (skb == NULL)
2043 break; /* Better luck next round. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 rp->rx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04002045 dma_map_single(hwdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002047 DMA_FROM_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002048 if (dma_mapping_error(hwdev,
2049 rp->rx_skbuff_dma[entry])) {
Neil Horman9b4fe5f2013-07-12 13:35:33 -04002050 dev_kfree_skb(skb);
2051 rp->rx_skbuff_dma[entry] = 0;
2052 break;
2053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2055 }
2056 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2057 }
Roger Luethi633949a2006-08-14 23:00:17 -07002058
2059 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060}
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062static void rhine_restart_tx(struct net_device *dev) {
2063 struct rhine_private *rp = netdev_priv(dev);
2064 void __iomem *ioaddr = rp->base;
2065 int entry = rp->dirty_tx % TX_RING_SIZE;
2066 u32 intr_status;
2067
2068 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002069 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 * In that case the ISR will be back here RSN anyway.
2071 */
Francois Romieua20a28b2011-12-30 14:53:58 +01002072 intr_status = rhine_get_events(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
2074 if ((intr_status & IntrTxErrSummary) == 0) {
2075
2076 /* We know better than the chip where it should continue. */
2077 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2078 ioaddr + TxRingPtr);
2079
2080 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2081 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00002082
2083 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2084 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2085 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2088 ioaddr + ChipCmd1);
2089 IOSYNC;
2090 }
2091 else {
2092 /* This should never happen */
Francois Romieufc3e0f82012-01-07 22:39:37 +01002093 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2094 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 }
2096
2097}
2098
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002099static void rhine_slow_event_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100{
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002101 struct rhine_private *rp =
2102 container_of(work, struct rhine_private, slow_event_task);
2103 struct net_device *dev = rp->dev;
2104 u32 intr_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002106 mutex_lock(&rp->task_lock);
2107
2108 if (!rp->task_enable)
2109 goto out_unlock;
2110
2111 intr_status = rhine_get_events(rp);
2112 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04002115 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116
Francois Romieufc3e0f82012-01-07 22:39:37 +01002117 if (intr_status & IntrPCIErr)
2118 netif_warn(rp, hw, dev, "PCI error\n");
2119
David S. Miller559bcac2013-01-29 22:58:04 -05002120 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002122out_unlock:
2123 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124}
2125
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002126static struct rtnl_link_stats64 *
2127rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128{
2129 struct rhine_private *rp = netdev_priv(dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002130 unsigned int start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002132 spin_lock_bh(&rp->lock);
2133 rhine_update_rx_crc_and_missed_errord(rp);
2134 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002136 netdev_stats_to_stats64(stats, &dev->stats);
2137
2138 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002139 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002140 stats->rx_packets = rp->rx_stats.packets;
2141 stats->rx_bytes = rp->rx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002142 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002143
2144 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002145 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002146 stats->tx_packets = rp->tx_stats.packets;
2147 stats->tx_bytes = rp->tx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002148 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002149
2150 return stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151}
2152
2153static void rhine_set_rx_mode(struct net_device *dev)
2154{
2155 struct rhine_private *rp = netdev_priv(dev);
2156 void __iomem *ioaddr = rp->base;
2157 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00002158 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2159 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
2161 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 rx_mode = 0x1C;
2163 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2164 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002165 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00002166 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 /* Too many to match, or accept all multicasts. */
2168 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2169 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Alexey Charkovca8b6e02014-04-30 22:21:09 +04002170 } else if (rp->quirks & rqMgmt) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002171 int i = 0;
2172 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2173 netdev_for_each_mc_addr(ha, dev) {
2174 if (i == MCAM_SIZE)
2175 break;
2176 rhine_set_cam(ioaddr, i, ha->addr);
2177 mCAMmask |= 1 << i;
2178 i++;
2179 }
2180 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00002183 netdev_for_each_mc_addr(ha, dev) {
2184 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
2186 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2187 }
2188 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2189 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002191 /* enable/disable VLAN receive filtering */
Alexey Charkovca8b6e02014-04-30 22:21:09 +04002192 if (rp->quirks & rqMgmt) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002193 if (dev->flags & IFF_PROMISC)
2194 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2195 else
2196 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2197 }
2198 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199}
2200
2201static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2202{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002203 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Rick Jones23020ab2011-11-09 09:58:07 +00002205 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2206 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Alexey Charkovf7630d12014-04-22 19:28:08 +04002207 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208}
2209
2210static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2211{
2212 struct rhine_private *rp = netdev_priv(dev);
2213 int rc;
2214
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002215 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 rc = mii_ethtool_gset(&rp->mii_if, cmd);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002217 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
2219 return rc;
2220}
2221
2222static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2223{
2224 struct rhine_private *rp = netdev_priv(dev);
2225 int rc;
2226
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002227 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 rc = mii_ethtool_sset(&rp->mii_if, cmd);
Roger Luethi00b428c2006-03-28 20:53:56 +02002229 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002230 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 return rc;
2233}
2234
2235static int netdev_nway_reset(struct net_device *dev)
2236{
2237 struct rhine_private *rp = netdev_priv(dev);
2238
2239 return mii_nway_restart(&rp->mii_if);
2240}
2241
2242static u32 netdev_get_link(struct net_device *dev)
2243{
2244 struct rhine_private *rp = netdev_priv(dev);
2245
2246 return mii_link_ok(&rp->mii_if);
2247}
2248
2249static u32 netdev_get_msglevel(struct net_device *dev)
2250{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002251 struct rhine_private *rp = netdev_priv(dev);
2252
2253 return rp->msg_enable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254}
2255
2256static void netdev_set_msglevel(struct net_device *dev, u32 value)
2257{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002258 struct rhine_private *rp = netdev_priv(dev);
2259
2260 rp->msg_enable = value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261}
2262
2263static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2264{
2265 struct rhine_private *rp = netdev_priv(dev);
2266
2267 if (!(rp->quirks & rqWOL))
2268 return;
2269
2270 spin_lock_irq(&rp->lock);
2271 wol->supported = WAKE_PHY | WAKE_MAGIC |
2272 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2273 wol->wolopts = rp->wolopts;
2274 spin_unlock_irq(&rp->lock);
2275}
2276
2277static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2278{
2279 struct rhine_private *rp = netdev_priv(dev);
2280 u32 support = WAKE_PHY | WAKE_MAGIC |
2281 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2282
2283 if (!(rp->quirks & rqWOL))
2284 return -EINVAL;
2285
2286 if (wol->wolopts & ~support)
2287 return -EINVAL;
2288
2289 spin_lock_irq(&rp->lock);
2290 rp->wolopts = wol->wolopts;
2291 spin_unlock_irq(&rp->lock);
2292
2293 return 0;
2294}
2295
Jeff Garzik7282d492006-09-13 14:30:00 -04002296static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 .get_drvinfo = netdev_get_drvinfo,
2298 .get_settings = netdev_get_settings,
2299 .set_settings = netdev_set_settings,
2300 .nway_reset = netdev_nway_reset,
2301 .get_link = netdev_get_link,
2302 .get_msglevel = netdev_get_msglevel,
2303 .set_msglevel = netdev_set_msglevel,
2304 .get_wol = rhine_get_wol,
2305 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306};
2307
2308static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2309{
2310 struct rhine_private *rp = netdev_priv(dev);
2311 int rc;
2312
2313 if (!netif_running(dev))
2314 return -EINVAL;
2315
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002316 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
Roger Luethi00b428c2006-03-28 20:53:56 +02002318 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002319 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
2321 return rc;
2322}
2323
2324static int rhine_close(struct net_device *dev)
2325{
2326 struct rhine_private *rp = netdev_priv(dev);
2327 void __iomem *ioaddr = rp->base;
2328
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002329 rhine_task_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002330 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002331 netif_stop_queue(dev);
2332
Francois Romieufc3e0f82012-01-07 22:39:37 +01002333 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2334 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
2336 /* Switch to loopback mode to avoid hardware races. */
2337 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2338
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002339 rhine_irq_disable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
2341 /* Stop the chip's Tx and Rx processes. */
2342 iowrite16(CmdStop, ioaddr + ChipCmd);
2343
Alexey Charkovf7630d12014-04-22 19:28:08 +04002344 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 free_rbufs(dev);
2346 free_tbufs(dev);
2347 free_ring(dev);
2348
2349 return 0;
2350}
2351
2352
Alexey Charkov2d283862014-04-22 19:28:09 +04002353static void rhine_remove_one_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354{
2355 struct net_device *dev = pci_get_drvdata(pdev);
2356 struct rhine_private *rp = netdev_priv(dev);
2357
2358 unregister_netdev(dev);
2359
2360 pci_iounmap(pdev, rp->base);
2361 pci_release_regions(pdev);
2362
2363 free_netdev(dev);
2364 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365}
2366
Alexey Charkov2d283862014-04-22 19:28:09 +04002367static int rhine_remove_one_platform(struct platform_device *pdev)
2368{
2369 struct net_device *dev = platform_get_drvdata(pdev);
2370 struct rhine_private *rp = netdev_priv(dev);
2371
2372 unregister_netdev(dev);
2373
2374 iounmap(rp->base);
2375
2376 free_netdev(dev);
2377
2378 return 0;
2379}
2380
2381static void rhine_shutdown_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 struct net_device *dev = pci_get_drvdata(pdev);
2384 struct rhine_private *rp = netdev_priv(dev);
2385 void __iomem *ioaddr = rp->base;
2386
2387 if (!(rp->quirks & rqWOL))
2388 return; /* Nothing to do for non-WOL adapters */
2389
2390 rhine_power_init(dev);
2391
2392 /* Make sure we use pattern 0, 1 and not 4, 5 */
2393 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002394 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002396 spin_lock(&rp->lock);
2397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 if (rp->wolopts & WAKE_MAGIC) {
2399 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2400 /*
2401 * Turn EEPROM-controlled wake-up back on -- some hardware may
2402 * not cooperate otherwise.
2403 */
2404 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2405 }
2406
2407 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2408 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2409
2410 if (rp->wolopts & WAKE_PHY)
2411 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2412
2413 if (rp->wolopts & WAKE_UCAST)
2414 iowrite8(WOLucast, ioaddr + WOLcrSet);
2415
2416 if (rp->wolopts) {
2417 /* Enable legacy WOL (for old motherboards) */
2418 iowrite8(0x01, ioaddr + PwcfgSet);
2419 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2420 }
2421
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002422 spin_unlock(&rp->lock);
2423
Francois Romieue92b9b32012-01-07 22:58:27 +01002424 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
Roger Luethib933b4d2006-08-14 23:00:21 -07002425 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
Francois Romieue92b9b32012-01-07 22:58:27 +01002427 pci_wake_from_d3(pdev, true);
2428 pci_set_power_state(pdev, PCI_D3hot);
2429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430}
2431
Francois Romieue92b9b32012-01-07 22:58:27 +01002432#ifdef CONFIG_PM_SLEEP
2433static int rhine_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002435 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
2438 if (!netif_running(dev))
2439 return 0;
2440
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002441 rhine_task_disable(rp);
2442 rhine_irq_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002443 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 netif_device_detach(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
Alexey Charkovf7630d12014-04-22 19:28:08 +04002447 if (dev_is_pci(device))
Alexey Charkov2d283862014-04-22 19:28:09 +04002448 rhine_shutdown_pci(to_pci_dev(device));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 return 0;
2451}
2452
Francois Romieue92b9b32012-01-07 22:58:27 +01002453static int rhine_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002455 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
2458 if (!netif_running(dev))
2459 return 0;
2460
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461#ifdef USE_MMIO
2462 enable_mmio(rp->pioaddr, rp->quirks);
2463#endif
2464 rhine_power_init(dev);
2465 free_tbufs(dev);
2466 free_rbufs(dev);
2467 alloc_tbufs(dev);
2468 alloc_rbufs(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002469 rhine_task_enable(rp);
2470 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 init_registers(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002472 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
2474 netif_device_attach(dev);
2475
2476 return 0;
2477}
Francois Romieue92b9b32012-01-07 22:58:27 +01002478
2479static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2480#define RHINE_PM_OPS (&rhine_pm_ops)
2481
2482#else
2483
2484#define RHINE_PM_OPS NULL
2485
2486#endif /* !CONFIG_PM_SLEEP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
Alexey Charkov2d283862014-04-22 19:28:09 +04002488static struct pci_driver rhine_driver_pci = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 .name = DRV_NAME,
2490 .id_table = rhine_pci_tbl,
Alexey Charkov2d283862014-04-22 19:28:09 +04002491 .probe = rhine_init_one_pci,
2492 .remove = rhine_remove_one_pci,
2493 .shutdown = rhine_shutdown_pci,
Francois Romieue92b9b32012-01-07 22:58:27 +01002494 .driver.pm = RHINE_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495};
2496
Alexey Charkov2d283862014-04-22 19:28:09 +04002497static struct platform_driver rhine_driver_platform = {
2498 .probe = rhine_init_one_platform,
2499 .remove = rhine_remove_one_platform,
2500 .driver = {
2501 .name = DRV_NAME,
2502 .owner = THIS_MODULE,
2503 .of_match_table = rhine_of_tbl,
2504 .pm = RHINE_PM_OPS,
2505 }
2506};
2507
Sachin Kamat77273ea2013-08-07 16:08:16 +05302508static struct dmi_system_id rhine_dmi_table[] __initdata = {
Roger Luethie84df482007-03-06 19:57:37 +01002509 {
2510 .ident = "EPIA-M",
2511 .matches = {
2512 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2513 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2514 },
2515 },
2516 {
2517 .ident = "KV7",
2518 .matches = {
2519 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2520 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2521 },
2522 },
2523 { NULL }
2524};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
2526static int __init rhine_init(void)
2527{
Alexey Charkov2d283862014-04-22 19:28:09 +04002528 int ret_pci, ret_platform;
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530/* when a module, this is printed whether or not devices are found in probe */
2531#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002532 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533#endif
Roger Luethie84df482007-03-06 19:57:37 +01002534 if (dmi_check_system(rhine_dmi_table)) {
2535 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002536 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002537 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002538 }
2539 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002540 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002541
Alexey Charkov2d283862014-04-22 19:28:09 +04002542 ret_pci = pci_register_driver(&rhine_driver_pci);
2543 ret_platform = platform_driver_register(&rhine_driver_platform);
2544 if ((ret_pci < 0) && (ret_platform < 0))
2545 return ret_pci;
2546
2547 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548}
2549
2550
2551static void __exit rhine_cleanup(void)
2552{
Alexey Charkov2d283862014-04-22 19:28:09 +04002553 platform_driver_unregister(&rhine_driver_platform);
2554 pci_unregister_driver(&rhine_driver_pci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555}
2556
2557
2558module_init(rhine_init);
2559module_exit(rhine_cleanup);