blob: 91661e0cbe3baaeb7b949936d251f40f57e850cd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi207070f2013-09-21 14:24:11 +020035#define DRV_VERSION "1.5.1"
Roger Luethi38f49e82010-12-06 00:59:40 +000036#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
Francois Romieufc3e0f82012-01-07 22:39:37 +010042static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000048#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070051static int rx_copybreak = 1518;
52#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Roger Luethib933b4d2006-08-14 23:00:21 -070056/* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000058static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
63 */
64
65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67static const int multicast_filter_limit = 32;
68
69
70/* Operational parameters that are set at compile time. */
71
72/* Keep the ring sizes a power of two for compile efficiency.
Tino Reichardt92bf2002015-02-24 10:28:01 -080073 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 * Making the Tx ring too large decreases the effectiveness of channel
75 * bonding and packet priority.
76 * With BQL support, we can increase TX ring safely.
77 * There are no ill effects from too-large receive rings.
78 */
79#define TX_RING_SIZE 64
80#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070081#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87
88#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89
90#include <linux/module.h>
91#include <linux/moduleparam.h>
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/timer.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/interrupt.h>
98#include <linux/pci.h>
Alexey Charkov2d283862014-04-22 19:28:09 +040099#include <linux/of_address.h>
100#include <linux/of_device.h>
101#include <linux/of_irq.h>
102#include <linux/platform_device.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400103#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <linux/netdevice.h>
105#include <linux/etherdevice.h>
106#include <linux/skbuff.h>
107#include <linux/init.h>
108#include <linux/delay.h>
109#include <linux/mii.h>
110#include <linux/ethtool.h>
111#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000112#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800114#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <asm/processor.h> /* Processor type for cache alignment. */
116#include <asm/io.h>
117#include <asm/irq.h>
118#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100119#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121/* These identify the driver base version and may not be removed. */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500122static const char version[] =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000123 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
126MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
127MODULE_LICENSE("GPL");
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129module_param(debug, int, 0);
130module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700131module_param(avoid_D3, bool, 0);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100132MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Roger Luethi38f49e82010-12-06 00:59:40 +0000136#define MCAM_SIZE 32
137#define VCAM_SIZE 32
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 Theory of Operation
141
142I. Board Compatibility
143
144This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
145controller.
146
147II. Board-specific settings
148
149Boards with this chip are functional only in a bus-master PCI slot.
150
151Many operational settings are loaded from the EEPROM to the Config word at
152offset 0x78. For most of these settings, this driver assumes that they are
153correct.
154If this driver is compiled to use PCI memory space operations the EEPROM
155must be configured to enable memory ops.
156
157III. Driver operation
158
159IIIa. Ring buffers
160
161This driver uses two statically allocated fixed-size descriptor lists
162formed into rings by a branch from the final descriptor to the beginning of
163the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
164
165IIIb/c. Transmit/Receive Structure
166
167This driver attempts to use a zero-copy receive and transmit scheme.
168
169Alas, all data buffers are required to start on a 32 bit boundary, so
170the driver must often copy transmit packets into bounce buffers.
171
172The driver allocates full frame size skbuffs for the Rx ring buffers at
173open() time and passes the skb->data field to the chip as receive data
174buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
175a fresh skbuff is allocated and the frame is copied to the new skbuff.
176When the incoming frame is larger, the skbuff is passed directly up the
177protocol stack. Buffers consumed this way are replaced by newly allocated
178skbuffs in the last phase of rhine_rx().
179
180The RX_COPYBREAK value is chosen to trade-off the memory wasted by
181using a full-sized skbuff for small frames vs. the copying costs of larger
182frames. New boards are typically used in generously configured machines
183and the underfilled buffers have negligible impact compared to the benefit of
184a single allocation size, so the default value of zero results in never
185copying packets. When copying is done, the cost is usually mitigated by using
186a combined copy/checksum routine. Copying also preloads the cache, which is
187most useful with small frames.
188
189Since the VIA chips are only able to transfer data to buffers on 32 bit
190boundaries, the IP header at offset 14 in an ethernet frame isn't
191longword aligned for further processing. Copying these unaligned buffers
192has the beneficial effect of 16-byte aligning the IP header.
193
194IIId. Synchronization
195
196The driver runs as two independent, single-threaded flows of control. One
197is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800198netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
199which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800202netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
203the ring is not available it stops the transmit queue by
204calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206The interrupt handler has exclusive control over the Rx ring and records stats
207from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208empty by incrementing the dirty_tx mark. If at least half of the entries in
209the Rx ring are available the transmit queue is woken up if it was stopped.
210
211IV. Notes
212
213IVb. References
214
215Preliminary VT86C100A manual from http://www.via.com.tw/
216http://www.scyld.com/expert/100mbps.html
217http://www.scyld.com/expert/NWay.html
218ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
220
221
222IVc. Errata
223
224The VT86C100A manual is not reliable information.
225The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226in significant performance degradation for bounce buffer copies on transmit
227and unaligned IP headers on receive.
228The chip does not pad to minimum transmit length.
229
230*/
231
232
233/* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
237*/
238
239enum rhine_revs {
240 VT86C100A = 0x00,
241 VTunknown0 = 0x20,
242 VT6102 = 0x40,
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
247 VTunknown1 = 0x7C,
248 VT6105 = 0x80,
249 VT6105_B0 = 0x83,
250 VT6105L = 0x8A,
251 VT6107 = 0x8C,
252 VTunknown2 = 0x8E,
253 VT6105M = 0x90, /* Management adapter */
254};
255
256enum rhine_quirks {
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400262 rqIntPHY = 0x0200, /* Integrated PHY */
263 rqMgmt = 0x0400, /* Management adapter */
Alexey Charkov5b579e22014-05-03 16:40:53 +0400264 rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
265 * switched from PIO mode to MMIO
266 * (only applies to PCI)
267 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268};
269/*
270 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
271 * MMIO as well as for the collision counter and the Tx FIFO underflow
272 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
273 */
274
275/* Beware of PCI posted writes */
276#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
277
Benoit Taine9baa3c32014-08-08 15:56:03 +0200278static const struct pci_device_id rhine_pci_tbl[] = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400279 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
280 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
281 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
282 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 { } /* terminate list */
284};
285MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
286
Alexey Charkov2d283862014-04-22 19:28:09 +0400287/* OpenFirmware identifiers for platform-bus devices
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400288 * The .data field is currently only used to store quirks
Alexey Charkov2d283862014-04-22 19:28:09 +0400289 */
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400290static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
Fabian Frederickd2b75a32015-03-17 19:40:27 +0100291static const struct of_device_id rhine_of_tbl[] = {
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400292 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
Alexey Charkov2d283862014-04-22 19:28:09 +0400293 { } /* terminate list */
294};
295MODULE_DEVICE_TABLE(of, rhine_of_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297/* Offsets to the device registers. */
298enum register_offsets {
299 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000300 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 IntrStatus=0x0C, IntrEnable=0x0E,
302 MulticastFilter0=0x10, MulticastFilter1=0x14,
303 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000304 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
306 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
307 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
308 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000309 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
311 WOLcrClr1=0xA6, WOLcgClr=0xA7,
312 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
313};
314
315/* Bits in ConfigD */
316enum backoff_bits {
317 BackOptional=0x01, BackModify=0x02,
318 BackCaptureEffect=0x04, BackRandom=0x08
319};
320
Roger Luethi38f49e82010-12-06 00:59:40 +0000321/* Bits in the TxConfig (TCR) register */
322enum tcr_bits {
323 TCR_PQEN=0x01,
324 TCR_LB0=0x02, /* loopback[0] */
325 TCR_LB1=0x04, /* loopback[1] */
326 TCR_OFSET=0x08,
327 TCR_RTGOPT=0x10,
328 TCR_RTFT0=0x20,
329 TCR_RTFT1=0x40,
330 TCR_RTSF=0x80,
331};
332
333/* Bits in the CamCon (CAMC) register */
334enum camcon_bits {
335 CAMC_CAMEN=0x01,
336 CAMC_VCAMSL=0x02,
337 CAMC_CAMWR=0x04,
338 CAMC_CAMRD=0x08,
339};
340
341/* Bits in the PCIBusConfig1 (BCR1) register */
342enum bcr1_bits {
343 BCR1_POT0=0x01,
344 BCR1_POT1=0x02,
345 BCR1_POT2=0x04,
346 BCR1_CTFT0=0x08,
347 BCR1_CTFT1=0x10,
348 BCR1_CTSF=0x20,
349 BCR1_TXQNOBK=0x40, /* for VT6105 */
350 BCR1_VIDFR=0x80, /* for VT6105 */
351 BCR1_MED0=0x40, /* for VT6102 */
352 BCR1_MED1=0x80, /* for VT6102 */
353};
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355/* Registers we check that mmio and reg are the same. */
356static const int mmio_verify_registers[] = {
357 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
358 0
359};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* Bits in the interrupt status/mask registers. */
362enum intr_status_bits {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100363 IntrRxDone = 0x0001,
364 IntrTxDone = 0x0002,
365 IntrRxErr = 0x0004,
366 IntrTxError = 0x0008,
367 IntrRxEmpty = 0x0020,
368 IntrPCIErr = 0x0040,
369 IntrStatsMax = 0x0080,
370 IntrRxEarly = 0x0100,
371 IntrTxUnderrun = 0x0210,
372 IntrRxOverflow = 0x0400,
373 IntrRxDropped = 0x0800,
374 IntrRxNoBuf = 0x1000,
375 IntrTxAborted = 0x2000,
376 IntrLinkChange = 0x4000,
377 IntrRxWakeUp = 0x8000,
378 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
379 IntrNormalSummary = IntrRxDone | IntrTxDone,
380 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
381 IntrTxUnderrun,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382};
383
384/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
385enum wol_bits {
386 WOLucast = 0x10,
387 WOLmagic = 0x20,
388 WOLbmcast = 0x30,
389 WOLlnkon = 0x40,
390 WOLlnkoff = 0x80,
391};
392
393/* The Rx and Tx buffer descriptors. */
394struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400395 __le32 rx_status;
396 __le32 desc_length; /* Chain flag, Buffer/frame length */
397 __le32 addr;
398 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399};
400struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400401 __le32 tx_status;
402 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
403 __le32 addr;
404 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405};
406
407/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
408#define TXDESC 0x00e08000
409
410enum rx_status_bits {
411 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
412};
413
414/* Bits in *_desc.*_status */
415enum desc_status_bits {
416 DescOwn=0x80000000
417};
418
Roger Luethi38f49e82010-12-06 00:59:40 +0000419/* Bits in *_desc.*_length */
420enum desc_length_bits {
421 DescTag=0x00010000
422};
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/* Bits in ChipCmd. */
425enum chip_cmd_bits {
426 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
427 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
428 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
429 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
430};
431
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000432struct rhine_stats {
433 u64 packets;
434 u64 bytes;
435 struct u64_stats_sync syncp;
436};
437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000439 /* Bit mask for configured VLAN ids */
440 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 /* Descriptor rings */
443 struct rx_desc *rx_ring;
444 struct tx_desc *tx_ring;
445 dma_addr_t rx_ring_dma;
446 dma_addr_t tx_ring_dma;
447
448 /* The addresses of receive-in-place skbuffs. */
449 struct sk_buff *rx_skbuff[RX_RING_SIZE];
450 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
451
452 /* The saved address of a sent-in-place packet/buffer, for later free(). */
453 struct sk_buff *tx_skbuff[TX_RING_SIZE];
454 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
455
Roger Luethi4be5de22006-04-04 20:49:16 +0200456 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 unsigned char *tx_buf[TX_RING_SIZE];
458 unsigned char *tx_bufs;
459 dma_addr_t tx_bufs_dma;
460
Alexey Charkovf7630d12014-04-22 19:28:08 +0400461 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700463 struct net_device *dev;
464 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 spinlock_t lock;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100466 struct mutex task_lock;
467 bool task_enable;
468 struct work_struct slow_event_task;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800469 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Francois Romieufc3e0f82012-01-07 22:39:37 +0100471 u32 msg_enable;
472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 /* Frequently used values: keep some adjacent for cache effect. */
474 u32 quirks;
475 struct rx_desc *rx_head_desc;
476 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
477 unsigned int cur_tx, dirty_tx;
478 unsigned int rx_buf_sz; /* Based on MTU+slack. */
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000479 struct rhine_stats rx_stats;
480 struct rhine_stats tx_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 u8 wolopts;
482
483 u8 tx_thresh, rx_thresh;
484
485 struct mii_if_info mii_if;
486 void __iomem *base;
487};
488
Roger Luethi38f49e82010-12-06 00:59:40 +0000489#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
490#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
491#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
492
493#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
494#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
495#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
496
497#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
498#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
499#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
500
501#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
502#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
503#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
504
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506static int mdio_read(struct net_device *dev, int phy_id, int location);
507static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
508static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800509static void rhine_reset_task(struct work_struct *work);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100510static void rhine_slow_event_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000512static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
513 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100514static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700516static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517static void rhine_set_rx_mode(struct net_device *dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000518static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
519 struct rtnl_link_stats64 *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400521static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522static int rhine_close(struct net_device *dev);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000523static int rhine_vlan_rx_add_vid(struct net_device *dev,
524 __be16 proto, u16 vid);
525static int rhine_vlan_rx_kill_vid(struct net_device *dev,
526 __be16 proto, u16 vid);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100527static void rhine_restart_tx(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000529static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
Francois Romieua384a332012-01-07 22:19:36 +0100530{
531 void __iomem *ioaddr = rp->base;
532 int i;
533
534 for (i = 0; i < 1024; i++) {
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000535 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
536
537 if (low ^ has_mask_bits)
Francois Romieua384a332012-01-07 22:19:36 +0100538 break;
539 udelay(10);
540 }
541 if (i > 64) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100542 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000543 "count: %04d\n", low ? "low" : "high", reg, mask, i);
Francois Romieua384a332012-01-07 22:19:36 +0100544 }
545}
546
547static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
548{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000549 rhine_wait_bit(rp, reg, mask, false);
Francois Romieua384a332012-01-07 22:19:36 +0100550}
551
552static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
553{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000554 rhine_wait_bit(rp, reg, mask, true);
Francois Romieua384a332012-01-07 22:19:36 +0100555}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Francois Romieua20a28b2011-12-30 14:53:58 +0100557static u32 rhine_get_events(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 void __iomem *ioaddr = rp->base;
560 u32 intr_status;
561
562 intr_status = ioread16(ioaddr + IntrStatus);
563 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
564 if (rp->quirks & rqStatusWBRace)
565 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
566 return intr_status;
567}
568
Francois Romieua20a28b2011-12-30 14:53:58 +0100569static void rhine_ack_events(struct rhine_private *rp, u32 mask)
570{
571 void __iomem *ioaddr = rp->base;
572
573 if (rp->quirks & rqStatusWBRace)
574 iowrite8(mask >> 16, ioaddr + IntrStatus2);
575 iowrite16(mask, ioaddr + IntrStatus);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100576 mmiowb();
Francois Romieua20a28b2011-12-30 14:53:58 +0100577}
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579/*
580 * Get power related registers into sane state.
581 * Notify user about past WOL event.
582 */
583static void rhine_power_init(struct net_device *dev)
584{
585 struct rhine_private *rp = netdev_priv(dev);
586 void __iomem *ioaddr = rp->base;
587 u16 wolstat;
588
589 if (rp->quirks & rqWOL) {
590 /* Make sure chip is in power state D0 */
591 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
592
593 /* Disable "force PME-enable" */
594 iowrite8(0x80, ioaddr + WOLcgClr);
595
596 /* Clear power-event config bits (WOL) */
597 iowrite8(0xFF, ioaddr + WOLcrClr);
598 /* More recent cards can manage two additional patterns */
599 if (rp->quirks & rq6patterns)
600 iowrite8(0x03, ioaddr + WOLcrClr1);
601
602 /* Save power-event status bits */
603 wolstat = ioread8(ioaddr + PwrcsrSet);
604 if (rp->quirks & rq6patterns)
605 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
606
607 /* Clear power-event status bits */
608 iowrite8(0xFF, ioaddr + PwrcsrClr);
609 if (rp->quirks & rq6patterns)
610 iowrite8(0x03, ioaddr + PwrcsrClr1);
611
612 if (wolstat) {
613 char *reason;
614 switch (wolstat) {
615 case WOLmagic:
616 reason = "Magic packet";
617 break;
618 case WOLlnkon:
619 reason = "Link went up";
620 break;
621 case WOLlnkoff:
622 reason = "Link went down";
623 break;
624 case WOLucast:
625 reason = "Unicast packet";
626 break;
627 case WOLbmcast:
628 reason = "Multicast/broadcast packet";
629 break;
630 default:
631 reason = "Unknown";
632 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000633 netdev_info(dev, "Woke system up. Reason: %s\n",
634 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 }
636 }
637}
638
639static void rhine_chip_reset(struct net_device *dev)
640{
641 struct rhine_private *rp = netdev_priv(dev);
642 void __iomem *ioaddr = rp->base;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100643 u8 cmd1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
645 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
646 IOSYNC;
647
648 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000649 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
651 /* Force reset */
652 if (rp->quirks & rqForceReset)
653 iowrite8(0x40, ioaddr + MiscCmd);
654
655 /* Reset can take somewhat longer (rare) */
Francois Romieua384a332012-01-07 22:19:36 +0100656 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 }
658
Francois Romieufc3e0f82012-01-07 22:39:37 +0100659 cmd1 = ioread8(ioaddr + ChipCmd1);
660 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
661 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664static void enable_mmio(long pioaddr, u32 quirks)
665{
666 int n;
Alexey Charkov5b579e22014-05-03 16:40:53 +0400667
668 if (quirks & rqNeedEnMMIO) {
669 if (quirks & rqRhineI) {
670 /* More recent docs say that this bit is reserved */
671 n = inb(pioaddr + ConfigA) | 0x20;
672 outb(n, pioaddr + ConfigA);
673 } else {
674 n = inb(pioaddr + ConfigD) | 0x80;
675 outb(n, pioaddr + ConfigD);
676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
678}
Alexey Charkov5b579e22014-05-03 16:40:53 +0400679
680static inline int verify_mmio(struct device *hwdev,
681 long pioaddr,
682 void __iomem *ioaddr,
683 u32 quirks)
684{
685 if (quirks & rqNeedEnMMIO) {
686 int i = 0;
687
688 /* Check that selected MMIO registers match the PIO ones */
689 while (mmio_verify_registers[i]) {
690 int reg = mmio_verify_registers[i++];
691 unsigned char a = inb(pioaddr+reg);
692 unsigned char b = readb(ioaddr+reg);
693
694 if (a != b) {
695 dev_err(hwdev,
696 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
697 reg, a, b);
698 return -EIO;
699 }
700 }
701 }
702 return 0;
703}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
705/*
706 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
707 * (plus 0x6C for Rhine-I/II)
708 */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500709static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710{
711 struct rhine_private *rp = netdev_priv(dev);
712 void __iomem *ioaddr = rp->base;
Francois Romieua384a332012-01-07 22:19:36 +0100713 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
715 outb(0x20, pioaddr + MACRegEEcsr);
Francois Romieua384a332012-01-07 22:19:36 +0100716 for (i = 0; i < 1024; i++) {
717 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
718 break;
719 }
720 if (i > 512)
721 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 /*
724 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
725 * MMIO. If reloading EEPROM was done first this could be avoided, but
726 * it is not known if that still works with the "win98-reboot" problem.
727 */
728 enable_mmio(pioaddr, rp->quirks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730 /* Turn off EEPROM-controlled wake-up (magic packet) */
731 if (rp->quirks & rqWOL)
732 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
733
734}
735
736#ifdef CONFIG_NET_POLL_CONTROLLER
737static void rhine_poll(struct net_device *dev)
738{
Francois Romieu05d334e2012-03-09 15:28:18 +0100739 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +0400740 const int irq = rp->irq;
Francois Romieu05d334e2012-03-09 15:28:18 +0100741
742 disable_irq(irq);
743 rhine_interrupt(irq, dev);
744 enable_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
746#endif
747
Francois Romieu269f3112011-12-30 14:43:54 +0100748static void rhine_kick_tx_threshold(struct rhine_private *rp)
749{
750 if (rp->tx_thresh < 0xe0) {
751 void __iomem *ioaddr = rp->base;
752
753 rp->tx_thresh += 0x20;
754 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
755 }
756}
757
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100758static void rhine_tx_err(struct rhine_private *rp, u32 status)
759{
760 struct net_device *dev = rp->dev;
761
762 if (status & IntrTxAborted) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100763 netif_info(rp, tx_err, dev,
764 "Abort %08x, frame dropped\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100765 }
766
767 if (status & IntrTxUnderrun) {
768 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100769 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
770 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100771 }
772
Francois Romieufc3e0f82012-01-07 22:39:37 +0100773 if (status & IntrTxDescRace)
774 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100775
776 if ((status & IntrTxError) &&
777 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
778 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100779 netif_info(rp, tx_err, dev, "Unspecified error. "
780 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100781 }
782
783 rhine_restart_tx(dev);
784}
785
786static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
787{
788 void __iomem *ioaddr = rp->base;
789 struct net_device_stats *stats = &rp->dev->stats;
790
791 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
792 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
793
794 /*
795 * Clears the "tally counters" for CRC errors and missed frames(?).
796 * It has been reported that some chips need a write of 0 to clear
797 * these, for others the counters are set to 1 when written to and
798 * instead cleared when read. So we clear them both ways ...
799 */
800 iowrite32(0, ioaddr + RxMissed);
801 ioread16(ioaddr + RxCRCErrs);
802 ioread16(ioaddr + RxMissed);
803}
804
805#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
806 IntrRxErr | \
807 IntrRxEmpty | \
808 IntrRxOverflow | \
809 IntrRxDropped | \
810 IntrRxNoBuf | \
811 IntrRxWakeUp)
812
813#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
814 IntrTxAborted | \
815 IntrTxUnderrun | \
816 IntrTxDescRace)
817#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
818
819#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
820 RHINE_EVENT_NAPI_TX | \
821 IntrStatsMax)
822#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
823#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
824
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700825static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700826{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700827 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
828 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700829 void __iomem *ioaddr = rp->base;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100830 u16 enable_mask = RHINE_EVENT & 0xffff;
831 int work_done = 0;
832 u32 status;
Roger Luethi633949a2006-08-14 23:00:17 -0700833
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100834 status = rhine_get_events(rp);
835 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
836
837 if (status & RHINE_EVENT_NAPI_RX)
838 work_done += rhine_rx(dev, budget);
839
840 if (status & RHINE_EVENT_NAPI_TX) {
841 if (status & RHINE_EVENT_NAPI_TX_ERR) {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100842 /* Avoid scavenging before Tx engine turned off */
Francois Romieua384a332012-01-07 22:19:36 +0100843 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100844 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
845 netif_warn(rp, tx_err, dev, "Tx still on\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100846 }
Francois Romieufc3e0f82012-01-07 22:39:37 +0100847
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100848 rhine_tx(dev);
849
850 if (status & RHINE_EVENT_NAPI_TX_ERR)
851 rhine_tx_err(rp, status);
852 }
853
854 if (status & IntrStatsMax) {
855 spin_lock(&rp->lock);
856 rhine_update_rx_crc_and_missed_errord(rp);
857 spin_unlock(&rp->lock);
858 }
859
860 if (status & RHINE_EVENT_SLOW) {
861 enable_mask &= ~RHINE_EVENT_SLOW;
862 schedule_work(&rp->slow_event_task);
863 }
Roger Luethi633949a2006-08-14 23:00:17 -0700864
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700865 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800866 napi_complete(napi);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100867 iowrite16(enable_mask, ioaddr + IntrEnable);
868 mmiowb();
Roger Luethi633949a2006-08-14 23:00:17 -0700869 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700870 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700871}
Roger Luethi633949a2006-08-14 23:00:17 -0700872
Bill Pemberton76e239e2012-12-03 09:23:48 -0500873static void rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
875 struct rhine_private *rp = netdev_priv(dev);
876
877 /* Reset the chip to erase previous misconfiguration. */
878 rhine_chip_reset(dev);
879
880 /* Rhine-I needs extra time to recuperate before EEPROM reload */
881 if (rp->quirks & rqRhineI)
882 msleep(5);
883
884 /* Reload EEPROM controlled bytes cleared by soft reset */
Alexey Charkov2d283862014-04-22 19:28:09 +0400885 if (dev_is_pci(dev->dev.parent))
886 rhine_reload_eeprom(pioaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887}
888
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800889static const struct net_device_ops rhine_netdev_ops = {
890 .ndo_open = rhine_open,
891 .ndo_stop = rhine_close,
892 .ndo_start_xmit = rhine_start_tx,
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000893 .ndo_get_stats64 = rhine_get_stats64,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000894 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000895 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800896 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000897 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800898 .ndo_do_ioctl = netdev_ioctl,
899 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000900 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
901 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800902#ifdef CONFIG_NET_POLL_CONTROLLER
903 .ndo_poll_controller = rhine_poll,
904#endif
905};
906
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400907static int rhine_init_one_common(struct device *hwdev, u32 quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +0400908 long pioaddr, void __iomem *ioaddr, int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
910 struct net_device *dev;
911 struct rhine_private *rp;
Alexey Charkov2d283862014-04-22 19:28:09 +0400912 int i, rc, phy_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
915 /* this should always be supported */
Alexey Charkovf7630d12014-04-22 19:28:08 +0400916 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +0400918 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
Alexey Charkov2d283862014-04-22 19:28:09 +0400919 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 dev = alloc_etherdev(sizeof(struct rhine_private));
923 if (!dev) {
924 rc = -ENOMEM;
Alexey Charkov2d283862014-04-22 19:28:09 +0400925 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 }
Alexey Charkovf7630d12014-04-22 19:28:08 +0400927 SET_NETDEV_DEV(dev, hwdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
929 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700930 rp->dev = dev;
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400931 rp->quirks = quirks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 rp->pioaddr = pioaddr;
Alexey Charkov2d283862014-04-22 19:28:09 +0400933 rp->base = ioaddr;
934 rp->irq = irq;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100935 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400937 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
John Stultz827da442013-10-07 15:51:58 -0700939 u64_stats_init(&rp->tx_stats.syncp);
940 u64_stats_init(&rp->rx_stats.syncp);
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 /* Get chip registers into a sane state */
943 rhine_power_init(dev);
944 rhine_hw_init(dev, pioaddr);
945
946 for (i = 0; i < 6; i++)
947 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
948
Joe Perches482e3fe2011-04-16 14:15:26 +0000949 if (!is_valid_ether_addr(dev->dev_addr)) {
950 /* Report it and use a random ethernet address instead */
951 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000952 eth_hw_addr_random(dev);
Joe Perches482e3fe2011-04-16 14:15:26 +0000953 netdev_info(dev, "Using random MAC address: %pM\n",
954 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 }
956
957 /* For Rhine-I/II, phy_id is loaded from EEPROM */
958 if (!phy_id)
959 phy_id = ioread8(ioaddr + 0x6C);
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 spin_lock_init(&rp->lock);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100962 mutex_init(&rp->task_lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800963 INIT_WORK(&rp->reset_task, rhine_reset_task);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100964 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 rp->mii_if.dev = dev;
967 rp->mii_if.mdio_read = mdio_read;
968 rp->mii_if.mdio_write = mdio_write;
969 rp->mii_if.phy_id_mask = 0x1f;
970 rp->mii_if.reg_num_mask = 0x1f;
971
972 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800973 dev->netdev_ops = &rhine_netdev_ops;
wangweidonge76070f2014-03-17 15:52:17 +0800974 dev->ethtool_ops = &netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800976
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700977 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +0200978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 if (rp->quirks & rqRhineI)
980 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
981
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400982 if (rp->quirks & rqMgmt)
Patrick McHardyf6469682013-04-19 02:04:27 +0000983 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
984 NETIF_F_HW_VLAN_CTAG_RX |
985 NETIF_F_HW_VLAN_CTAG_FILTER;
Roger Luethi38f49e82010-12-06 00:59:40 +0000986
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 /* dev->name not defined before register_netdev()! */
988 rc = register_netdev(dev);
989 if (rc)
Alexey Charkov2d283862014-04-22 19:28:09 +0400990 goto err_out_free_netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400992 if (rp->quirks & rqRhineI)
993 name = "Rhine";
994 else if (rp->quirks & rqStatusWBRace)
995 name = "Rhine II";
996 else if (rp->quirks & rqMgmt)
997 name = "Rhine III (Management Adapter)";
998 else
999 name = "Rhine III";
1000
Joe Perchesdf4511f2011-04-16 14:15:25 +00001001 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
Alexey Charkov2d283862014-04-22 19:28:09 +04001002 name, (long)ioaddr, dev->dev_addr, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
Alexey Charkovf7630d12014-04-22 19:28:08 +04001004 dev_set_drvdata(hwdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 {
1007 u16 mii_cmd;
1008 int mii_status = mdio_read(dev, phy_id, 1);
1009 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1010 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1011 if (mii_status != 0xffff && mii_status != 0x0000) {
1012 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +00001013 netdev_info(dev,
1014 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1015 phy_id,
1016 mii_status, rp->mii_if.advertising,
1017 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
1019 /* set IFF_RUNNING */
1020 if (mii_status & BMSR_LSTATUS)
1021 netif_carrier_on(dev);
1022 else
1023 netif_carrier_off(dev);
1024
1025 }
1026 }
1027 rp->mii_if.phy_id = phy_id;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001028 if (avoid_D3)
1029 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031 return 0;
1032
Alexey Charkov2d283862014-04-22 19:28:09 +04001033err_out_free_netdev:
1034 free_netdev(dev);
1035err_out:
1036 return rc;
1037}
1038
1039static int rhine_init_one_pci(struct pci_dev *pdev,
1040 const struct pci_device_id *ent)
1041{
1042 struct device *hwdev = &pdev->dev;
Alexey Charkov5b579e22014-05-03 16:40:53 +04001043 int rc;
Alexey Charkov2d283862014-04-22 19:28:09 +04001044 long pioaddr, memaddr;
1045 void __iomem *ioaddr;
1046 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
Alexey Charkov5b579e22014-05-03 16:40:53 +04001047
1048/* This driver was written to use PCI memory space. Some early versions
1049 * of the Rhine may only work correctly with I/O space accesses.
1050 * TODO: determine for which revisions this is true and assign the flag
1051 * in code as opposed to this Kconfig option (???)
1052 */
1053#ifdef CONFIG_VIA_RHINE_MMIO
1054 u32 quirks = rqNeedEnMMIO;
Alexey Charkov2d283862014-04-22 19:28:09 +04001055#else
Alexey Charkov5b579e22014-05-03 16:40:53 +04001056 u32 quirks = 0;
Alexey Charkov2d283862014-04-22 19:28:09 +04001057#endif
1058
1059/* when built into the kernel, we only print version if device is found */
1060#ifndef MODULE
1061 pr_info_once("%s\n", version);
1062#endif
1063
1064 rc = pci_enable_device(pdev);
1065 if (rc)
1066 goto err_out;
1067
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001068 if (pdev->revision < VTunknown0) {
Alexey Charkov5b579e22014-05-03 16:40:53 +04001069 quirks |= rqRhineI;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001070 } else if (pdev->revision >= VT6102) {
Alexey Charkov5b579e22014-05-03 16:40:53 +04001071 quirks |= rqWOL | rqForceReset;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001072 if (pdev->revision < VT6105) {
1073 quirks |= rqStatusWBRace;
1074 } else {
1075 quirks |= rqIntPHY;
1076 if (pdev->revision >= VT6105_B0)
1077 quirks |= rq6patterns;
1078 if (pdev->revision >= VT6105M)
1079 quirks |= rqMgmt;
1080 }
1081 }
1082
Alexey Charkov2d283862014-04-22 19:28:09 +04001083 /* sanity check */
1084 if ((pci_resource_len(pdev, 0) < io_size) ||
1085 (pci_resource_len(pdev, 1) < io_size)) {
1086 rc = -EIO;
1087 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1088 goto err_out_pci_disable;
1089 }
1090
1091 pioaddr = pci_resource_start(pdev, 0);
1092 memaddr = pci_resource_start(pdev, 1);
1093
1094 pci_set_master(pdev);
1095
1096 rc = pci_request_regions(pdev, DRV_NAME);
1097 if (rc)
1098 goto err_out_pci_disable;
1099
Alexey Charkov5b579e22014-05-03 16:40:53 +04001100 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
Alexey Charkov2d283862014-04-22 19:28:09 +04001101 if (!ioaddr) {
1102 rc = -EIO;
1103 dev_err(hwdev,
1104 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1105 dev_name(hwdev), io_size, memaddr);
1106 goto err_out_free_res;
1107 }
1108
Alexey Charkov2d283862014-04-22 19:28:09 +04001109 enable_mmio(pioaddr, quirks);
1110
Alexey Charkov5b579e22014-05-03 16:40:53 +04001111 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1112 if (rc)
1113 goto err_out_unmap;
Alexey Charkov2d283862014-04-22 19:28:09 +04001114
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001115 rc = rhine_init_one_common(&pdev->dev, quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +04001116 pioaddr, ioaddr, pdev->irq);
1117 if (!rc)
1118 return 0;
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120err_out_unmap:
1121 pci_iounmap(pdev, ioaddr);
1122err_out_free_res:
1123 pci_release_regions(pdev);
Roger Luethiae996152014-03-18 18:14:01 +01001124err_out_pci_disable:
1125 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126err_out:
1127 return rc;
1128}
1129
Alexey Charkov2d283862014-04-22 19:28:09 +04001130static int rhine_init_one_platform(struct platform_device *pdev)
1131{
1132 const struct of_device_id *match;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001133 const u32 *quirks;
Alexey Charkov2d283862014-04-22 19:28:09 +04001134 int irq;
1135 struct resource *res;
1136 void __iomem *ioaddr;
1137
1138 match = of_match_device(rhine_of_tbl, &pdev->dev);
1139 if (!match)
1140 return -EINVAL;
1141
1142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1143 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1144 if (IS_ERR(ioaddr))
1145 return PTR_ERR(ioaddr);
1146
1147 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1148 if (!irq)
1149 return -EINVAL;
1150
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001151 quirks = match->data;
1152 if (!quirks)
Alexey Charkov2d283862014-04-22 19:28:09 +04001153 return -EINVAL;
1154
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001155 return rhine_init_one_common(&pdev->dev, *quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +04001156 (long)ioaddr, ioaddr, irq);
1157}
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159static int alloc_ring(struct net_device* dev)
1160{
1161 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001162 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 void *ring;
1164 dma_addr_t ring_dma;
1165
Alexey Charkovf7630d12014-04-22 19:28:08 +04001166 ring = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001167 RX_RING_SIZE * sizeof(struct rx_desc) +
1168 TX_RING_SIZE * sizeof(struct tx_desc),
1169 &ring_dma,
1170 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001172 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 return -ENOMEM;
1174 }
1175 if (rp->quirks & rqRhineI) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001176 rp->tx_bufs = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001177 PKT_BUF_SZ * TX_RING_SIZE,
1178 &rp->tx_bufs_dma,
1179 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 if (rp->tx_bufs == NULL) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001181 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001182 RX_RING_SIZE * sizeof(struct rx_desc) +
1183 TX_RING_SIZE * sizeof(struct tx_desc),
1184 ring, ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return -ENOMEM;
1186 }
1187 }
1188
1189 rp->rx_ring = ring;
1190 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1191 rp->rx_ring_dma = ring_dma;
1192 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1193
1194 return 0;
1195}
1196
1197static void free_ring(struct net_device* dev)
1198{
1199 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001200 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Alexey Charkovf7630d12014-04-22 19:28:08 +04001202 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001203 RX_RING_SIZE * sizeof(struct rx_desc) +
1204 TX_RING_SIZE * sizeof(struct tx_desc),
1205 rp->rx_ring, rp->rx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 rp->tx_ring = NULL;
1207
1208 if (rp->tx_bufs)
Alexey Charkovf7630d12014-04-22 19:28:08 +04001209 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001210 rp->tx_bufs, rp->tx_bufs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
1212 rp->tx_bufs = NULL;
1213
1214}
1215
françois romieua21bb8b2015-05-01 22:14:39 +02001216struct rhine_skb_dma {
1217 struct sk_buff *skb;
1218 dma_addr_t dma;
1219};
1220
1221static inline int rhine_skb_dma_init(struct net_device *dev,
1222 struct rhine_skb_dma *sd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
1224 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001225 struct device *hwdev = dev->dev.parent;
françois romieua21bb8b2015-05-01 22:14:39 +02001226 const int size = rp->rx_buf_sz;
1227
1228 sd->skb = netdev_alloc_skb(dev, size);
1229 if (!sd->skb)
1230 return -ENOMEM;
1231
1232 sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1233 if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1234 netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1235 dev_kfree_skb_any(sd->skb);
1236 return -EIO;
1237 }
1238
1239 return 0;
1240}
1241
1242static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1243 struct rhine_skb_dma *sd, int entry)
1244{
1245 rp->rx_skbuff_dma[entry] = sd->dma;
1246 rp->rx_skbuff[entry] = sd->skb;
1247
1248 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1249 dma_wmb();
1250}
1251
1252static void alloc_rbufs(struct net_device *dev)
1253{
1254 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 dma_addr_t next;
françois romieua21bb8b2015-05-01 22:14:39 +02001256 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 rp->dirty_rx = rp->cur_rx = 0;
1259
1260 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1261 rp->rx_head_desc = &rp->rx_ring[0];
1262 next = rp->rx_ring_dma;
1263
1264 /* Init the ring entries */
1265 for (i = 0; i < RX_RING_SIZE; i++) {
1266 rp->rx_ring[i].rx_status = 0;
1267 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1268 next += sizeof(struct rx_desc);
1269 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1270 rp->rx_skbuff[i] = NULL;
1271 }
1272 /* Mark the last entry as wrapping the ring. */
1273 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1274
1275 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1276 for (i = 0; i < RX_RING_SIZE; i++) {
françois romieua21bb8b2015-05-01 22:14:39 +02001277 struct rhine_skb_dma sd;
1278
1279 rc = rhine_skb_dma_init(dev, &sd);
1280 if (rc < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
françois romieua21bb8b2015-05-01 22:14:39 +02001283 rhine_skb_dma_nic_store(rp, &sd, i);
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1286 }
1287 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1288}
1289
1290static void free_rbufs(struct net_device* dev)
1291{
1292 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001293 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 int i;
1295
1296 /* Free all the skbuffs in the Rx queue. */
1297 for (i = 0; i < RX_RING_SIZE; i++) {
1298 rp->rx_ring[i].rx_status = 0;
1299 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1300 if (rp->rx_skbuff[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001301 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 rp->rx_skbuff_dma[i],
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001303 rp->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 dev_kfree_skb(rp->rx_skbuff[i]);
1305 }
1306 rp->rx_skbuff[i] = NULL;
1307 }
1308}
1309
1310static void alloc_tbufs(struct net_device* dev)
1311{
1312 struct rhine_private *rp = netdev_priv(dev);
1313 dma_addr_t next;
1314 int i;
1315
1316 rp->dirty_tx = rp->cur_tx = 0;
1317 next = rp->tx_ring_dma;
1318 for (i = 0; i < TX_RING_SIZE; i++) {
1319 rp->tx_skbuff[i] = NULL;
1320 rp->tx_ring[i].tx_status = 0;
1321 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1322 next += sizeof(struct tx_desc);
1323 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001324 if (rp->quirks & rqRhineI)
1325 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 }
1327 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1328
Tino Reichardt92bf2002015-02-24 10:28:01 -08001329 netdev_reset_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330}
1331
1332static void free_tbufs(struct net_device* dev)
1333{
1334 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001335 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 int i;
1337
1338 for (i = 0; i < TX_RING_SIZE; i++) {
1339 rp->tx_ring[i].tx_status = 0;
1340 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1341 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1342 if (rp->tx_skbuff[i]) {
1343 if (rp->tx_skbuff_dma[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001344 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 rp->tx_skbuff_dma[i],
1346 rp->tx_skbuff[i]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001347 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 }
1349 dev_kfree_skb(rp->tx_skbuff[i]);
1350 }
1351 rp->tx_skbuff[i] = NULL;
1352 rp->tx_buf[i] = NULL;
1353 }
1354}
1355
1356static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1357{
1358 struct rhine_private *rp = netdev_priv(dev);
1359 void __iomem *ioaddr = rp->base;
1360
Ben Hutchings5bdc7382015-01-16 17:55:35 +00001361 if (!rp->mii_if.force_media)
1362 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
1364 if (rp->mii_if.full_duplex)
1365 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1366 ioaddr + ChipCmd1);
1367 else
1368 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1369 ioaddr + ChipCmd1);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001370
1371 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1372 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001373}
1374
1375/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001376static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001377{
Francois Romieufc3e0f82012-01-07 22:39:37 +01001378 struct net_device *dev = mii->dev;
1379 struct rhine_private *rp = netdev_priv(dev);
1380
Roger Luethi00b428c2006-03-28 20:53:56 +02001381 if (mii->force_media) {
1382 /* autoneg is off: Link is always assumed to be up */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001383 if (!netif_carrier_ok(dev))
1384 netif_carrier_on(dev);
François Cachereul17958432014-06-12 12:11:25 +02001385 }
1386
1387 rhine_check_media(dev, 0);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001388
1389 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1390 mii->force_media, netif_carrier_ok(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391}
1392
Roger Luethi38f49e82010-12-06 00:59:40 +00001393/**
1394 * rhine_set_cam - set CAM multicast filters
1395 * @ioaddr: register block of this Rhine
1396 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1397 * @addr: multicast address (6 bytes)
1398 *
1399 * Load addresses into multicast filters.
1400 */
1401static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1402{
1403 int i;
1404
1405 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1406 wmb();
1407
1408 /* Paranoid -- idx out of range should never happen */
1409 idx &= (MCAM_SIZE - 1);
1410
1411 iowrite8((u8) idx, ioaddr + CamAddr);
1412
1413 for (i = 0; i < 6; i++, addr++)
1414 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1415 udelay(10);
1416 wmb();
1417
1418 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1419 udelay(10);
1420
1421 iowrite8(0, ioaddr + CamCon);
1422}
1423
1424/**
1425 * rhine_set_vlan_cam - set CAM VLAN filters
1426 * @ioaddr: register block of this Rhine
1427 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1428 * @addr: VLAN ID (2 bytes)
1429 *
1430 * Load addresses into VLAN filters.
1431 */
1432static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1433{
1434 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1435 wmb();
1436
1437 /* Paranoid -- idx out of range should never happen */
1438 idx &= (VCAM_SIZE - 1);
1439
1440 iowrite8((u8) idx, ioaddr + CamAddr);
1441
1442 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1443 udelay(10);
1444 wmb();
1445
1446 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1447 udelay(10);
1448
1449 iowrite8(0, ioaddr + CamCon);
1450}
1451
1452/**
1453 * rhine_set_cam_mask - set multicast CAM mask
1454 * @ioaddr: register block of this Rhine
1455 * @mask: multicast CAM mask
1456 *
1457 * Mask sets multicast filters active/inactive.
1458 */
1459static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1460{
1461 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1462 wmb();
1463
1464 /* write mask */
1465 iowrite32(mask, ioaddr + CamMask);
1466
1467 /* disable CAMEN */
1468 iowrite8(0, ioaddr + CamCon);
1469}
1470
1471/**
1472 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1473 * @ioaddr: register block of this Rhine
1474 * @mask: VLAN CAM mask
1475 *
1476 * Mask sets VLAN filters active/inactive.
1477 */
1478static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1479{
1480 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1481 wmb();
1482
1483 /* write mask */
1484 iowrite32(mask, ioaddr + CamMask);
1485
1486 /* disable CAMEN */
1487 iowrite8(0, ioaddr + CamCon);
1488}
1489
1490/**
1491 * rhine_init_cam_filter - initialize CAM filters
1492 * @dev: network device
1493 *
1494 * Initialize (disable) hardware VLAN and multicast support on this
1495 * Rhine.
1496 */
1497static void rhine_init_cam_filter(struct net_device *dev)
1498{
1499 struct rhine_private *rp = netdev_priv(dev);
1500 void __iomem *ioaddr = rp->base;
1501
1502 /* Disable all CAMs */
1503 rhine_set_vlan_cam_mask(ioaddr, 0);
1504 rhine_set_cam_mask(ioaddr, 0);
1505
1506 /* disable hardware VLAN support */
1507 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1508 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1509}
1510
1511/**
1512 * rhine_update_vcam - update VLAN CAM filters
1513 * @rp: rhine_private data of this Rhine
1514 *
1515 * Update VLAN CAM filters to match configuration change.
1516 */
1517static void rhine_update_vcam(struct net_device *dev)
1518{
1519 struct rhine_private *rp = netdev_priv(dev);
1520 void __iomem *ioaddr = rp->base;
1521 u16 vid;
1522 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1523 unsigned int i = 0;
1524
1525 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1526 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1527 vCAMmask |= 1 << i;
1528 if (++i >= VCAM_SIZE)
1529 break;
1530 }
1531 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1532}
1533
Patrick McHardy80d5c362013-04-19 02:04:28 +00001534static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001535{
1536 struct rhine_private *rp = netdev_priv(dev);
1537
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001538 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001539 set_bit(vid, rp->active_vlans);
1540 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001541 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001542 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001543}
1544
Patrick McHardy80d5c362013-04-19 02:04:28 +00001545static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001546{
1547 struct rhine_private *rp = netdev_priv(dev);
1548
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001549 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001550 clear_bit(vid, rp->active_vlans);
1551 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001552 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001553 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001554}
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556static void init_registers(struct net_device *dev)
1557{
1558 struct rhine_private *rp = netdev_priv(dev);
1559 void __iomem *ioaddr = rp->base;
1560 int i;
1561
1562 for (i = 0; i < 6; i++)
1563 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1564
1565 /* Initialize other registers. */
1566 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1567 /* Configure initial FIFO thresholds. */
1568 iowrite8(0x20, ioaddr + TxConfig);
1569 rp->tx_thresh = 0x20;
1570 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1571
1572 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1573 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1574
1575 rhine_set_rx_mode(dev);
1576
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001577 if (rp->quirks & rqMgmt)
Roger Luethi38f49e82010-12-06 00:59:40 +00001578 rhine_init_cam_filter(dev);
1579
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001580 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001581
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001582 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1585 ioaddr + ChipCmd);
1586 rhine_check_media(dev, 1);
1587}
1588
1589/* Enable MII link status auto-polling (required for IntrLinkChange) */
Francois Romieua384a332012-01-07 22:19:36 +01001590static void rhine_enable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
Francois Romieua384a332012-01-07 22:19:36 +01001592 void __iomem *ioaddr = rp->base;
1593
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 iowrite8(0, ioaddr + MIICmd);
1595 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1596 iowrite8(0x80, ioaddr + MIICmd);
1597
Francois Romieua384a332012-01-07 22:19:36 +01001598 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
1600 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1601}
1602
1603/* Disable MII link status auto-polling (required for MDIO access) */
Francois Romieua384a332012-01-07 22:19:36 +01001604static void rhine_disable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
Francois Romieua384a332012-01-07 22:19:36 +01001606 void __iomem *ioaddr = rp->base;
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 iowrite8(0, ioaddr + MIICmd);
1609
Francois Romieua384a332012-01-07 22:19:36 +01001610 if (rp->quirks & rqRhineI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1612
John W. Linville38bb6b22006-05-19 10:51:21 -04001613 /* Can be called from ISR. Evil. */
1614 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 /* 0x80 must be set immediately before turning it off */
1617 iowrite8(0x80, ioaddr + MIICmd);
1618
Francois Romieua384a332012-01-07 22:19:36 +01001619 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621 /* Heh. Now clear 0x80 again. */
1622 iowrite8(0, ioaddr + MIICmd);
1623 }
1624 else
Francois Romieua384a332012-01-07 22:19:36 +01001625 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626}
1627
1628/* Read and write over the MII Management Data I/O (MDIO) interface. */
1629
1630static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1631{
1632 struct rhine_private *rp = netdev_priv(dev);
1633 void __iomem *ioaddr = rp->base;
1634 int result;
1635
Francois Romieua384a332012-01-07 22:19:36 +01001636 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
1638 /* rhine_disable_linkmon already cleared MIICmd */
1639 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1640 iowrite8(regnum, ioaddr + MIIRegAddr);
1641 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
Francois Romieua384a332012-01-07 22:19:36 +01001642 rhine_wait_bit_low(rp, MIICmd, 0x40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 result = ioread16(ioaddr + MIIData);
1644
Francois Romieua384a332012-01-07 22:19:36 +01001645 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 return result;
1647}
1648
1649static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1650{
1651 struct rhine_private *rp = netdev_priv(dev);
1652 void __iomem *ioaddr = rp->base;
1653
Francois Romieua384a332012-01-07 22:19:36 +01001654 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
1656 /* rhine_disable_linkmon already cleared MIICmd */
1657 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1658 iowrite8(regnum, ioaddr + MIIRegAddr);
1659 iowrite16(value, ioaddr + MIIData);
1660 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
Francois Romieua384a332012-01-07 22:19:36 +01001661 rhine_wait_bit_low(rp, MIICmd, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Francois Romieua384a332012-01-07 22:19:36 +01001663 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664}
1665
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001666static void rhine_task_disable(struct rhine_private *rp)
1667{
1668 mutex_lock(&rp->task_lock);
1669 rp->task_enable = false;
1670 mutex_unlock(&rp->task_lock);
1671
1672 cancel_work_sync(&rp->slow_event_task);
1673 cancel_work_sync(&rp->reset_task);
1674}
1675
1676static void rhine_task_enable(struct rhine_private *rp)
1677{
1678 mutex_lock(&rp->task_lock);
1679 rp->task_enable = true;
1680 mutex_unlock(&rp->task_lock);
1681}
1682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683static int rhine_open(struct net_device *dev)
1684{
1685 struct rhine_private *rp = netdev_priv(dev);
1686 void __iomem *ioaddr = rp->base;
1687 int rc;
1688
Alexey Charkovf7630d12014-04-22 19:28:08 +04001689 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 if (rc)
françois romieu4d1fd9c2015-05-01 22:14:40 +02001691 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Alexey Charkovf7630d12014-04-22 19:28:08 +04001693 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
1695 rc = alloc_ring(dev);
françois romieu4d1fd9c2015-05-01 22:14:40 +02001696 if (rc < 0)
1697 goto out_free_irq;
1698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 alloc_rbufs(dev);
1700 alloc_tbufs(dev);
1701 rhine_chip_reset(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001702 rhine_task_enable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 init_registers(dev);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001704
1705 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1706 __func__, ioread16(ioaddr + ChipCmd),
1707 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 netif_start_queue(dev);
1710
françois romieu4d1fd9c2015-05-01 22:14:40 +02001711out:
1712 return rc;
1713
1714out_free_irq:
1715 free_irq(rp->irq, dev);
1716 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717}
1718
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001719static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001721 struct rhine_private *rp = container_of(work, struct rhine_private,
1722 reset_task);
1723 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001725 mutex_lock(&rp->task_lock);
1726
1727 if (!rp->task_enable)
1728 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001730 napi_disable(&rp->napi);
Richard Weinbergera9265922014-01-14 22:46:36 +01001731 netif_tx_disable(dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001732 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
1734 /* clear all descriptors */
1735 free_tbufs(dev);
1736 free_rbufs(dev);
1737 alloc_tbufs(dev);
1738 alloc_rbufs(dev);
1739
1740 /* Reinitialize the hardware. */
1741 rhine_chip_reset(dev);
1742 init_registers(dev);
1743
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001744 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001746 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001747 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 netif_wake_queue(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001749
1750out_unlock:
1751 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752}
1753
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001754static void rhine_tx_timeout(struct net_device *dev)
1755{
1756 struct rhine_private *rp = netdev_priv(dev);
1757 void __iomem *ioaddr = rp->base;
1758
Joe Perchesdf4511f2011-04-16 14:15:25 +00001759 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1760 ioread16(ioaddr + IntrStatus),
1761 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001762
1763 schedule_work(&rp->reset_task);
1764}
1765
Stephen Hemminger613573252009-08-31 19:50:58 +00001766static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1767 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
1769 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001770 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 void __iomem *ioaddr = rp->base;
1772 unsigned entry;
1773
1774 /* Caution: the write order is important here, set the field
1775 with the "ownership" bits last. */
1776
1777 /* Calculate the next Tx descriptor entry. */
1778 entry = rp->cur_tx % TX_RING_SIZE;
1779
Herbert Xu5b057c62006-06-23 02:06:41 -07001780 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001781 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783 rp->tx_skbuff[entry] = skb;
1784
1785 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001786 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 /* Must use alignment buffer. */
1788 if (skb->len > PKT_BUF_SZ) {
1789 /* packet too long, drop it */
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001790 dev_kfree_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001792 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001793 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001795
1796 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001798 if (skb->len < ETH_ZLEN)
1799 memset(rp->tx_buf[entry] + skb->len, 0,
1800 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 rp->tx_skbuff_dma[entry] = 0;
1802 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1803 (rp->tx_buf[entry] -
1804 rp->tx_bufs));
1805 } else {
1806 rp->tx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001807 dma_map_single(hwdev, skb->data, skb->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001808 DMA_TO_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001809 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001810 dev_kfree_skb_any(skb);
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001811 rp->tx_skbuff_dma[entry] = 0;
1812 dev->stats.tx_dropped++;
1813 return NETDEV_TX_OK;
1814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1816 }
1817
1818 rp->tx_ring[entry].desc_length =
1819 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1820
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001821 if (unlikely(skb_vlan_tag_present(skb))) {
1822 u16 vid_pcp = skb_vlan_tag_get(skb);
Roger Luethi207070f2013-09-21 14:24:11 +02001823
1824 /* drop CFI/DEI bit, register needs VID and PCP */
1825 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1826 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1827 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
Roger Luethi38f49e82010-12-06 00:59:40 +00001828 /* request tagging */
1829 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1830 }
1831 else
1832 rp->tx_ring[entry].tx_status = 0;
1833
Tino Reichardt92bf2002015-02-24 10:28:01 -08001834 netdev_sent_queue(dev, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 /* lock eth irq */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001837 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 wmb();
1839
1840 rp->cur_tx++;
1841
1842 /* Non-x86 Todo: explicitly flush cache lines here. */
1843
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001844 if (skb_vlan_tag_present(skb))
Roger Luethi38f49e82010-12-06 00:59:40 +00001845 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1846 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 /* Wake the potentially-idle transmit channel */
1849 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1850 ioaddr + ChipCmd1);
1851 IOSYNC;
1852
1853 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1854 netif_stop_queue(dev);
1855
Francois Romieufc3e0f82012-01-07 22:39:37 +01001856 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1857 rp->cur_tx - 1, entry);
1858
Patrick McHardy6ed10652009-06-23 06:03:08 +00001859 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860}
1861
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001862static void rhine_irq_disable(struct rhine_private *rp)
1863{
1864 iowrite16(0x0000, rp->base + IntrEnable);
1865 mmiowb();
1866}
1867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868/* The interrupt handler does all of the Rx thread work and cleans up
1869 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001870static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871{
1872 struct net_device *dev = dev_instance;
1873 struct rhine_private *rp = netdev_priv(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001874 u32 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 int handled = 0;
1876
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001877 status = rhine_get_events(rp);
1878
Francois Romieufc3e0f82012-01-07 22:39:37 +01001879 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001880
1881 if (status & RHINE_EVENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 handled = 1;
1883
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001884 rhine_irq_disable(rp);
1885 napi_schedule(&rp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 }
1887
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001888 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001889 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1890 status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001891 }
1892
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 return IRQ_RETVAL(handled);
1894}
1895
1896/* This routine is logically part of the interrupt handler, but isolated
1897 for clarity. */
1898static void rhine_tx(struct net_device *dev)
1899{
1900 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001901 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
Tino Reichardt92bf2002015-02-24 10:28:01 -08001903 unsigned int pkts_compl = 0, bytes_compl = 0;
1904 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 /* find and cleanup dirty tx descriptors */
1907 while (rp->dirty_tx != rp->cur_tx) {
1908 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001909 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1910 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 if (txstatus & DescOwn)
1912 break;
Tino Reichardt92bf2002015-02-24 10:28:01 -08001913 skb = rp->tx_skbuff[entry];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 if (txstatus & 0x8000) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001915 netif_dbg(rp, tx_done, dev,
1916 "Transmit error, Tx status %08x\n", txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001917 dev->stats.tx_errors++;
1918 if (txstatus & 0x0400)
1919 dev->stats.tx_carrier_errors++;
1920 if (txstatus & 0x0200)
1921 dev->stats.tx_window_errors++;
1922 if (txstatus & 0x0100)
1923 dev->stats.tx_aborted_errors++;
1924 if (txstatus & 0x0080)
1925 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1927 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001928 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1930 break; /* Keep the skb - we try again */
1931 }
1932 /* Transmitter restarted in 'abnormal' handler. */
1933 } else {
1934 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001935 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001937 dev->stats.collisions += txstatus & 0x0F;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001938 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1939 (txstatus >> 3) & 0xF, txstatus & 0xF);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001940
1941 u64_stats_update_begin(&rp->tx_stats.syncp);
Tino Reichardt92bf2002015-02-24 10:28:01 -08001942 rp->tx_stats.bytes += skb->len;
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001943 rp->tx_stats.packets++;
1944 u64_stats_update_end(&rp->tx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 }
1946 /* Free the original skb. */
1947 if (rp->tx_skbuff_dma[entry]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001948 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 rp->tx_skbuff_dma[entry],
Tino Reichardt92bf2002015-02-24 10:28:01 -08001950 skb->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001951 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 }
Tino Reichardt92bf2002015-02-24 10:28:01 -08001953 bytes_compl += skb->len;
1954 pkts_compl++;
1955 dev_consume_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 rp->tx_skbuff[entry] = NULL;
1957 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1958 }
Tino Reichardt92bf2002015-02-24 10:28:01 -08001959
1960 netdev_completed_queue(dev, pkts_compl, bytes_compl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1962 netif_wake_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963}
1964
Roger Luethi38f49e82010-12-06 00:59:40 +00001965/**
1966 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1967 * @skb: pointer to sk_buff
1968 * @data_size: used data area of the buffer including CRC
1969 *
1970 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1971 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1972 * aligned following the CRC.
1973 */
1974static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1975{
1976 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00001977 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00001978}
1979
Roger Luethi633949a2006-08-14 23:00:17 -07001980/* Process up to limit frames from receive ring */
1981static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982{
1983 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001984 struct device *hwdev = dev->dev.parent;
Roger Luethi633949a2006-08-14 23:00:17 -07001985 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Francois Romieufc3e0f82012-01-07 22:39:37 +01001988 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1989 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
1991 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001992 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 struct rx_desc *desc = rp->rx_head_desc;
1994 u32 desc_status = le32_to_cpu(desc->rx_status);
Roger Luethi38f49e82010-12-06 00:59:40 +00001995 u32 desc_length = le32_to_cpu(desc->desc_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 int data_size = desc_status >> 16;
1997
Roger Luethi633949a2006-08-14 23:00:17 -07001998 if (desc_status & DescOwn)
1999 break;
2000
Francois Romieufc3e0f82012-01-07 22:39:37 +01002001 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2002 desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07002003
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2005 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00002006 netdev_warn(dev,
2007 "Oversized Ethernet frame spanned multiple buffers, "
2008 "entry %#x length %d status %08x!\n",
2009 entry, data_size,
2010 desc_status);
2011 netdev_warn(dev,
2012 "Oversized Ethernet frame %p vs %p\n",
2013 rp->rx_head_desc,
2014 &rp->rx_ring[entry]);
Eric Dumazet553e2332009-05-27 10:34:50 +00002015 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 } else if (desc_status & RxErr) {
2017 /* There was a error. */
Francois Romieufc3e0f82012-01-07 22:39:37 +01002018 netif_dbg(rp, rx_err, dev,
2019 "%s() Rx error %08x\n", __func__,
2020 desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00002021 dev->stats.rx_errors++;
2022 if (desc_status & 0x0030)
2023 dev->stats.rx_length_errors++;
2024 if (desc_status & 0x0048)
2025 dev->stats.rx_fifo_errors++;
2026 if (desc_status & 0x0004)
2027 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 if (desc_status & 0x0002) {
2029 /* this can also be updated outside the interrupt handler */
2030 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00002031 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 spin_unlock(&rp->lock);
2033 }
2034 }
2035 } else {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002036 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 /* Length should omit the CRC */
2038 int pkt_len = data_size - 4;
Roger Luethi38f49e82010-12-06 00:59:40 +00002039 u16 vlan_tci = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
2041 /* Check if the packet is long enough to accept without
2042 copying to a minimally-sized skbuff. */
Eric Dumazet89d71a62009-10-13 05:34:20 +00002043 if (pkt_len < rx_copybreak)
2044 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2045 if (skb) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04002046 dma_sync_single_for_cpu(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002047 rp->rx_skbuff_dma[entry],
2048 rp->rx_buf_sz,
2049 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
David S. Miller8c7b7fa2007-07-10 22:08:12 -07002051 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07002052 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07002053 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002055 dma_sync_single_for_device(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002056 rp->rx_skbuff_dma[entry],
2057 rp->rx_buf_sz,
2058 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 } else {
2060 skb = rp->rx_skbuff[entry];
2061 if (skb == NULL) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00002062 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 break;
2064 }
2065 rp->rx_skbuff[entry] = NULL;
2066 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002067 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 rp->rx_skbuff_dma[entry],
2069 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002070 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002072
2073 if (unlikely(desc_length & DescTag))
2074 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00002077
2078 if (unlikely(desc_length & DescTag))
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002079 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Roger Luethi633949a2006-08-14 23:00:17 -07002080 netif_receive_skb(skb);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002081
2082 u64_stats_update_begin(&rp->rx_stats.syncp);
2083 rp->rx_stats.bytes += pkt_len;
2084 rp->rx_stats.packets++;
2085 u64_stats_update_end(&rp->rx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 }
2087 entry = (++rp->cur_rx) % RX_RING_SIZE;
2088 rp->rx_head_desc = &rp->rx_ring[entry];
2089 }
2090
2091 /* Refill the Rx ring buffers. */
2092 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2093 struct sk_buff *skb;
2094 entry = rp->dirty_rx % RX_RING_SIZE;
2095 if (rp->rx_skbuff[entry] == NULL) {
Kevin Lob26b5552008-08-27 11:35:09 +08002096 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 rp->rx_skbuff[entry] = skb;
2098 if (skb == NULL)
2099 break; /* Better luck next round. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 rp->rx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04002101 dma_map_single(hwdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002103 DMA_FROM_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002104 if (dma_mapping_error(hwdev,
2105 rp->rx_skbuff_dma[entry])) {
Neil Horman9b4fe5f2013-07-12 13:35:33 -04002106 dev_kfree_skb(skb);
2107 rp->rx_skbuff_dma[entry] = 0;
2108 break;
2109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
françois romieue45af492015-05-01 22:14:38 +02002111 dma_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 }
2113 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2114 }
Roger Luethi633949a2006-08-14 23:00:17 -07002115
2116 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117}
2118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119static void rhine_restart_tx(struct net_device *dev) {
2120 struct rhine_private *rp = netdev_priv(dev);
2121 void __iomem *ioaddr = rp->base;
2122 int entry = rp->dirty_tx % TX_RING_SIZE;
2123 u32 intr_status;
2124
2125 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002126 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 * In that case the ISR will be back here RSN anyway.
2128 */
Francois Romieua20a28b2011-12-30 14:53:58 +01002129 intr_status = rhine_get_events(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 if ((intr_status & IntrTxErrSummary) == 0) {
2132
2133 /* We know better than the chip where it should continue. */
2134 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2135 ioaddr + TxRingPtr);
2136
2137 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2138 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00002139
2140 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2141 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2142 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2145 ioaddr + ChipCmd1);
2146 IOSYNC;
2147 }
2148 else {
2149 /* This should never happen */
Francois Romieufc3e0f82012-01-07 22:39:37 +01002150 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2151 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 }
2153
2154}
2155
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002156static void rhine_slow_event_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002158 struct rhine_private *rp =
2159 container_of(work, struct rhine_private, slow_event_task);
2160 struct net_device *dev = rp->dev;
2161 u32 intr_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002163 mutex_lock(&rp->task_lock);
2164
2165 if (!rp->task_enable)
2166 goto out_unlock;
2167
2168 intr_status = rhine_get_events(rp);
2169 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
2171 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04002172 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
Francois Romieufc3e0f82012-01-07 22:39:37 +01002174 if (intr_status & IntrPCIErr)
2175 netif_warn(rp, hw, dev, "PCI error\n");
2176
David S. Miller559bcac2013-01-29 22:58:04 -05002177 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002179out_unlock:
2180 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181}
2182
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002183static struct rtnl_link_stats64 *
2184rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
2186 struct rhine_private *rp = netdev_priv(dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002187 unsigned int start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002189 spin_lock_bh(&rp->lock);
2190 rhine_update_rx_crc_and_missed_errord(rp);
2191 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002193 netdev_stats_to_stats64(stats, &dev->stats);
2194
2195 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002196 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002197 stats->rx_packets = rp->rx_stats.packets;
2198 stats->rx_bytes = rp->rx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002199 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002200
2201 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002202 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002203 stats->tx_packets = rp->tx_stats.packets;
2204 stats->tx_bytes = rp->tx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002205 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002206
2207 return stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208}
2209
2210static void rhine_set_rx_mode(struct net_device *dev)
2211{
2212 struct rhine_private *rp = netdev_priv(dev);
2213 void __iomem *ioaddr = rp->base;
2214 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00002215 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2216 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
2218 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 rx_mode = 0x1C;
2220 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2221 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002222 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00002223 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 /* Too many to match, or accept all multicasts. */
2225 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2226 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Alexey Charkovca8b6e02014-04-30 22:21:09 +04002227 } else if (rp->quirks & rqMgmt) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002228 int i = 0;
2229 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2230 netdev_for_each_mc_addr(ha, dev) {
2231 if (i == MCAM_SIZE)
2232 break;
2233 rhine_set_cam(ioaddr, i, ha->addr);
2234 mCAMmask |= 1 << i;
2235 i++;
2236 }
2237 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00002240 netdev_for_each_mc_addr(ha, dev) {
2241 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
2243 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2244 }
2245 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2246 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002248 /* enable/disable VLAN receive filtering */
Alexey Charkovca8b6e02014-04-30 22:21:09 +04002249 if (rp->quirks & rqMgmt) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002250 if (dev->flags & IFF_PROMISC)
2251 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2252 else
2253 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2254 }
2255 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256}
2257
2258static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2259{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002260 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
Rick Jones23020ab2011-11-09 09:58:07 +00002262 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2263 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Alexey Charkovf7630d12014-04-22 19:28:08 +04002264 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265}
2266
2267static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2268{
2269 struct rhine_private *rp = netdev_priv(dev);
2270 int rc;
2271
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002272 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 rc = mii_ethtool_gset(&rp->mii_if, cmd);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002274 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
2276 return rc;
2277}
2278
2279static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2280{
2281 struct rhine_private *rp = netdev_priv(dev);
2282 int rc;
2283
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002284 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 rc = mii_ethtool_sset(&rp->mii_if, cmd);
Roger Luethi00b428c2006-03-28 20:53:56 +02002286 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002287 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
2289 return rc;
2290}
2291
2292static int netdev_nway_reset(struct net_device *dev)
2293{
2294 struct rhine_private *rp = netdev_priv(dev);
2295
2296 return mii_nway_restart(&rp->mii_if);
2297}
2298
2299static u32 netdev_get_link(struct net_device *dev)
2300{
2301 struct rhine_private *rp = netdev_priv(dev);
2302
2303 return mii_link_ok(&rp->mii_if);
2304}
2305
2306static u32 netdev_get_msglevel(struct net_device *dev)
2307{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002308 struct rhine_private *rp = netdev_priv(dev);
2309
2310 return rp->msg_enable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311}
2312
2313static void netdev_set_msglevel(struct net_device *dev, u32 value)
2314{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002315 struct rhine_private *rp = netdev_priv(dev);
2316
2317 rp->msg_enable = value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318}
2319
2320static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2321{
2322 struct rhine_private *rp = netdev_priv(dev);
2323
2324 if (!(rp->quirks & rqWOL))
2325 return;
2326
2327 spin_lock_irq(&rp->lock);
2328 wol->supported = WAKE_PHY | WAKE_MAGIC |
2329 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2330 wol->wolopts = rp->wolopts;
2331 spin_unlock_irq(&rp->lock);
2332}
2333
2334static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2335{
2336 struct rhine_private *rp = netdev_priv(dev);
2337 u32 support = WAKE_PHY | WAKE_MAGIC |
2338 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2339
2340 if (!(rp->quirks & rqWOL))
2341 return -EINVAL;
2342
2343 if (wol->wolopts & ~support)
2344 return -EINVAL;
2345
2346 spin_lock_irq(&rp->lock);
2347 rp->wolopts = wol->wolopts;
2348 spin_unlock_irq(&rp->lock);
2349
2350 return 0;
2351}
2352
Jeff Garzik7282d492006-09-13 14:30:00 -04002353static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 .get_drvinfo = netdev_get_drvinfo,
2355 .get_settings = netdev_get_settings,
2356 .set_settings = netdev_set_settings,
2357 .nway_reset = netdev_nway_reset,
2358 .get_link = netdev_get_link,
2359 .get_msglevel = netdev_get_msglevel,
2360 .set_msglevel = netdev_set_msglevel,
2361 .get_wol = rhine_get_wol,
2362 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363};
2364
2365static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2366{
2367 struct rhine_private *rp = netdev_priv(dev);
2368 int rc;
2369
2370 if (!netif_running(dev))
2371 return -EINVAL;
2372
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002373 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
Roger Luethi00b428c2006-03-28 20:53:56 +02002375 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002376 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377
2378 return rc;
2379}
2380
2381static int rhine_close(struct net_device *dev)
2382{
2383 struct rhine_private *rp = netdev_priv(dev);
2384 void __iomem *ioaddr = rp->base;
2385
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002386 rhine_task_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002387 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002388 netif_stop_queue(dev);
2389
Francois Romieufc3e0f82012-01-07 22:39:37 +01002390 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2391 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
2393 /* Switch to loopback mode to avoid hardware races. */
2394 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2395
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002396 rhine_irq_disable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
2398 /* Stop the chip's Tx and Rx processes. */
2399 iowrite16(CmdStop, ioaddr + ChipCmd);
2400
Alexey Charkovf7630d12014-04-22 19:28:08 +04002401 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 free_rbufs(dev);
2403 free_tbufs(dev);
2404 free_ring(dev);
2405
2406 return 0;
2407}
2408
2409
Alexey Charkov2d283862014-04-22 19:28:09 +04002410static void rhine_remove_one_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411{
2412 struct net_device *dev = pci_get_drvdata(pdev);
2413 struct rhine_private *rp = netdev_priv(dev);
2414
2415 unregister_netdev(dev);
2416
2417 pci_iounmap(pdev, rp->base);
2418 pci_release_regions(pdev);
2419
2420 free_netdev(dev);
2421 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422}
2423
Alexey Charkov2d283862014-04-22 19:28:09 +04002424static int rhine_remove_one_platform(struct platform_device *pdev)
2425{
2426 struct net_device *dev = platform_get_drvdata(pdev);
2427 struct rhine_private *rp = netdev_priv(dev);
2428
2429 unregister_netdev(dev);
2430
2431 iounmap(rp->base);
2432
2433 free_netdev(dev);
2434
2435 return 0;
2436}
2437
2438static void rhine_shutdown_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 struct net_device *dev = pci_get_drvdata(pdev);
2441 struct rhine_private *rp = netdev_priv(dev);
2442 void __iomem *ioaddr = rp->base;
2443
2444 if (!(rp->quirks & rqWOL))
2445 return; /* Nothing to do for non-WOL adapters */
2446
2447 rhine_power_init(dev);
2448
2449 /* Make sure we use pattern 0, 1 and not 4, 5 */
2450 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002451 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002453 spin_lock(&rp->lock);
2454
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 if (rp->wolopts & WAKE_MAGIC) {
2456 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2457 /*
2458 * Turn EEPROM-controlled wake-up back on -- some hardware may
2459 * not cooperate otherwise.
2460 */
2461 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2462 }
2463
2464 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2465 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2466
2467 if (rp->wolopts & WAKE_PHY)
2468 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2469
2470 if (rp->wolopts & WAKE_UCAST)
2471 iowrite8(WOLucast, ioaddr + WOLcrSet);
2472
2473 if (rp->wolopts) {
2474 /* Enable legacy WOL (for old motherboards) */
2475 iowrite8(0x01, ioaddr + PwcfgSet);
2476 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2477 }
2478
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002479 spin_unlock(&rp->lock);
2480
Francois Romieue92b9b32012-01-07 22:58:27 +01002481 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
Roger Luethib933b4d2006-08-14 23:00:21 -07002482 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Francois Romieue92b9b32012-01-07 22:58:27 +01002484 pci_wake_from_d3(pdev, true);
2485 pci_set_power_state(pdev, PCI_D3hot);
2486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487}
2488
Francois Romieue92b9b32012-01-07 22:58:27 +01002489#ifdef CONFIG_PM_SLEEP
2490static int rhine_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002492 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
2495 if (!netif_running(dev))
2496 return 0;
2497
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002498 rhine_task_disable(rp);
2499 rhine_irq_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002500 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002501
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 netif_device_detach(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Alexey Charkovf7630d12014-04-22 19:28:08 +04002504 if (dev_is_pci(device))
Alexey Charkov2d283862014-04-22 19:28:09 +04002505 rhine_shutdown_pci(to_pci_dev(device));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 return 0;
2508}
2509
Francois Romieue92b9b32012-01-07 22:58:27 +01002510static int rhine_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002512 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
2515 if (!netif_running(dev))
2516 return 0;
2517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 enable_mmio(rp->pioaddr, rp->quirks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 rhine_power_init(dev);
2520 free_tbufs(dev);
2521 free_rbufs(dev);
2522 alloc_tbufs(dev);
2523 alloc_rbufs(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002524 rhine_task_enable(rp);
2525 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 init_registers(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002527 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
2529 netif_device_attach(dev);
2530
2531 return 0;
2532}
Francois Romieue92b9b32012-01-07 22:58:27 +01002533
2534static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2535#define RHINE_PM_OPS (&rhine_pm_ops)
2536
2537#else
2538
2539#define RHINE_PM_OPS NULL
2540
2541#endif /* !CONFIG_PM_SLEEP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
Alexey Charkov2d283862014-04-22 19:28:09 +04002543static struct pci_driver rhine_driver_pci = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 .name = DRV_NAME,
2545 .id_table = rhine_pci_tbl,
Alexey Charkov2d283862014-04-22 19:28:09 +04002546 .probe = rhine_init_one_pci,
2547 .remove = rhine_remove_one_pci,
2548 .shutdown = rhine_shutdown_pci,
Francois Romieue92b9b32012-01-07 22:58:27 +01002549 .driver.pm = RHINE_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550};
2551
Alexey Charkov2d283862014-04-22 19:28:09 +04002552static struct platform_driver rhine_driver_platform = {
2553 .probe = rhine_init_one_platform,
2554 .remove = rhine_remove_one_platform,
2555 .driver = {
2556 .name = DRV_NAME,
Alexey Charkov2d283862014-04-22 19:28:09 +04002557 .of_match_table = rhine_of_tbl,
2558 .pm = RHINE_PM_OPS,
2559 }
2560};
2561
Sachin Kamat77273ea2013-08-07 16:08:16 +05302562static struct dmi_system_id rhine_dmi_table[] __initdata = {
Roger Luethie84df482007-03-06 19:57:37 +01002563 {
2564 .ident = "EPIA-M",
2565 .matches = {
2566 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2567 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2568 },
2569 },
2570 {
2571 .ident = "KV7",
2572 .matches = {
2573 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2574 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2575 },
2576 },
2577 { NULL }
2578};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579
2580static int __init rhine_init(void)
2581{
Alexey Charkov2d283862014-04-22 19:28:09 +04002582 int ret_pci, ret_platform;
2583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584/* when a module, this is printed whether or not devices are found in probe */
2585#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002586 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587#endif
Roger Luethie84df482007-03-06 19:57:37 +01002588 if (dmi_check_system(rhine_dmi_table)) {
2589 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002590 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002591 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002592 }
2593 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002594 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002595
Alexey Charkov2d283862014-04-22 19:28:09 +04002596 ret_pci = pci_register_driver(&rhine_driver_pci);
2597 ret_platform = platform_driver_register(&rhine_driver_platform);
2598 if ((ret_pci < 0) && (ret_platform < 0))
2599 return ret_pci;
2600
2601 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602}
2603
2604
2605static void __exit rhine_cleanup(void)
2606{
Alexey Charkov2d283862014-04-22 19:28:09 +04002607 platform_driver_unregister(&rhine_driver_platform);
2608 pci_unregister_driver(&rhine_driver_pci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609}
2610
2611
2612module_init(rhine_init);
2613module_exit(rhine_cleanup);