blob: 4fa92012ceac6b3184218568bc4b059d38055210 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi207070f2013-09-21 14:24:11 +020035#define DRV_VERSION "1.5.1"
Roger Luethi38f49e82010-12-06 00:59:40 +000036#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
Francois Romieufc3e0f82012-01-07 22:39:37 +010042static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000048#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070051static int rx_copybreak = 1518;
52#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Roger Luethib933b4d2006-08-14 23:00:21 -070056/* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000058static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
63 */
64
65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67static const int multicast_filter_limit = 32;
68
69
70/* Operational parameters that are set at compile time. */
71
72/* Keep the ring sizes a power of two for compile efficiency.
73 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 Making the Tx ring too large decreases the effectiveness of channel
75 bonding and packet priority.
76 There are no ill effects from too-large receive rings. */
77#define TX_RING_SIZE 16
78#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070079#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/* Operational parameters that usually are not changed. */
82
83/* Time in jiffies before concluding the transmitter is hung. */
84#define TX_TIMEOUT (2*HZ)
85
86#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
87
88#include <linux/module.h>
89#include <linux/moduleparam.h>
90#include <linux/kernel.h>
91#include <linux/string.h>
92#include <linux/timer.h>
93#include <linux/errno.h>
94#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/interrupt.h>
96#include <linux/pci.h>
Alexey Charkov2d283862014-04-22 19:28:09 +040097#include <linux/of_address.h>
98#include <linux/of_device.h>
99#include <linux/of_irq.h>
100#include <linux/platform_device.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400101#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <linux/netdevice.h>
103#include <linux/etherdevice.h>
104#include <linux/skbuff.h>
105#include <linux/init.h>
106#include <linux/delay.h>
107#include <linux/mii.h>
108#include <linux/ethtool.h>
109#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000110#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800112#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <asm/processor.h> /* Processor type for cache alignment. */
114#include <asm/io.h>
115#include <asm/irq.h>
116#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100117#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119/* These identify the driver base version and may not be removed. */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500120static const char version[] =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000121 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123/* This driver was written to use PCI memory space. Some early versions
124 of the Rhine may only work correctly with I/O space accesses. */
125#ifdef CONFIG_VIA_RHINE_MMIO
126#define USE_MMIO
127#else
128#endif
129
130MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
131MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
132MODULE_LICENSE("GPL");
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134module_param(debug, int, 0);
135module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700136module_param(avoid_D3, bool, 0);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100137MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700139MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Roger Luethi38f49e82010-12-06 00:59:40 +0000141#define MCAM_SIZE 32
142#define VCAM_SIZE 32
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144/*
145 Theory of Operation
146
147I. Board Compatibility
148
149This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
150controller.
151
152II. Board-specific settings
153
154Boards with this chip are functional only in a bus-master PCI slot.
155
156Many operational settings are loaded from the EEPROM to the Config word at
157offset 0x78. For most of these settings, this driver assumes that they are
158correct.
159If this driver is compiled to use PCI memory space operations the EEPROM
160must be configured to enable memory ops.
161
162III. Driver operation
163
164IIIa. Ring buffers
165
166This driver uses two statically allocated fixed-size descriptor lists
167formed into rings by a branch from the final descriptor to the beginning of
168the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
169
170IIIb/c. Transmit/Receive Structure
171
172This driver attempts to use a zero-copy receive and transmit scheme.
173
174Alas, all data buffers are required to start on a 32 bit boundary, so
175the driver must often copy transmit packets into bounce buffers.
176
177The driver allocates full frame size skbuffs for the Rx ring buffers at
178open() time and passes the skb->data field to the chip as receive data
179buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
180a fresh skbuff is allocated and the frame is copied to the new skbuff.
181When the incoming frame is larger, the skbuff is passed directly up the
182protocol stack. Buffers consumed this way are replaced by newly allocated
183skbuffs in the last phase of rhine_rx().
184
185The RX_COPYBREAK value is chosen to trade-off the memory wasted by
186using a full-sized skbuff for small frames vs. the copying costs of larger
187frames. New boards are typically used in generously configured machines
188and the underfilled buffers have negligible impact compared to the benefit of
189a single allocation size, so the default value of zero results in never
190copying packets. When copying is done, the cost is usually mitigated by using
191a combined copy/checksum routine. Copying also preloads the cache, which is
192most useful with small frames.
193
194Since the VIA chips are only able to transfer data to buffers on 32 bit
195boundaries, the IP header at offset 14 in an ethernet frame isn't
196longword aligned for further processing. Copying these unaligned buffers
197has the beneficial effect of 16-byte aligning the IP header.
198
199IIId. Synchronization
200
201The driver runs as two independent, single-threaded flows of control. One
202is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800203netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
204which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800207netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
208the ring is not available it stops the transmit queue by
209calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211The interrupt handler has exclusive control over the Rx ring and records stats
212from the Tx ring. After reaping the stats, it marks the Tx queue entry as
213empty by incrementing the dirty_tx mark. If at least half of the entries in
214the Rx ring are available the transmit queue is woken up if it was stopped.
215
216IV. Notes
217
218IVb. References
219
220Preliminary VT86C100A manual from http://www.via.com.tw/
221http://www.scyld.com/expert/100mbps.html
222http://www.scyld.com/expert/NWay.html
223ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
224ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
225
226
227IVc. Errata
228
229The VT86C100A manual is not reliable information.
230The 3043 chip does not handle unaligned transmit or receive buffers, resulting
231in significant performance degradation for bounce buffer copies on transmit
232and unaligned IP headers on receive.
233The chip does not pad to minimum transmit length.
234
235*/
236
237
238/* This table drives the PCI probe routines. It's mostly boilerplate in all
239 of the drivers, and will likely be provided by some future kernel.
240 Note the matching code -- the first table entry matchs all 56** cards but
241 second only the 1234 card.
242*/
243
244enum rhine_revs {
245 VT86C100A = 0x00,
246 VTunknown0 = 0x20,
247 VT6102 = 0x40,
248 VT8231 = 0x50, /* Integrated MAC */
249 VT8233 = 0x60, /* Integrated MAC */
250 VT8235 = 0x74, /* Integrated MAC */
251 VT8237 = 0x78, /* Integrated MAC */
252 VTunknown1 = 0x7C,
253 VT6105 = 0x80,
254 VT6105_B0 = 0x83,
255 VT6105L = 0x8A,
256 VT6107 = 0x8C,
257 VTunknown2 = 0x8E,
258 VT6105M = 0x90, /* Management adapter */
259};
260
261enum rhine_quirks {
262 rqWOL = 0x0001, /* Wake-On-LAN support */
263 rqForceReset = 0x0002,
264 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
265 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
266 rqRhineI = 0x0100, /* See comment below */
267};
268/*
269 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
270 * MMIO as well as for the collision counter and the Tx FIFO underflow
271 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
272 */
273
274/* Beware of PCI posted writes */
275#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
276
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000277static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400278 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
279 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
280 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
281 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 { } /* terminate list */
283};
284MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
285
Alexey Charkov2d283862014-04-22 19:28:09 +0400286/* OpenFirmware identifiers for platform-bus devices
287 * The .data field is currently only used to store chip revision
288 * (for quirks etc.)
289 */
290static struct of_device_id rhine_of_tbl[] = {
291 { .compatible = "via,vt8500-rhine", .data = (void *)0x84 },
292 { } /* terminate list */
293};
294MODULE_DEVICE_TABLE(of, rhine_of_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296/* Offsets to the device registers. */
297enum register_offsets {
298 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000299 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 IntrStatus=0x0C, IntrEnable=0x0E,
301 MulticastFilter0=0x10, MulticastFilter1=0x14,
302 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000303 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
305 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
306 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
307 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000308 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
310 WOLcrClr1=0xA6, WOLcgClr=0xA7,
311 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
312};
313
314/* Bits in ConfigD */
315enum backoff_bits {
316 BackOptional=0x01, BackModify=0x02,
317 BackCaptureEffect=0x04, BackRandom=0x08
318};
319
Roger Luethi38f49e82010-12-06 00:59:40 +0000320/* Bits in the TxConfig (TCR) register */
321enum tcr_bits {
322 TCR_PQEN=0x01,
323 TCR_LB0=0x02, /* loopback[0] */
324 TCR_LB1=0x04, /* loopback[1] */
325 TCR_OFSET=0x08,
326 TCR_RTGOPT=0x10,
327 TCR_RTFT0=0x20,
328 TCR_RTFT1=0x40,
329 TCR_RTSF=0x80,
330};
331
332/* Bits in the CamCon (CAMC) register */
333enum camcon_bits {
334 CAMC_CAMEN=0x01,
335 CAMC_VCAMSL=0x02,
336 CAMC_CAMWR=0x04,
337 CAMC_CAMRD=0x08,
338};
339
340/* Bits in the PCIBusConfig1 (BCR1) register */
341enum bcr1_bits {
342 BCR1_POT0=0x01,
343 BCR1_POT1=0x02,
344 BCR1_POT2=0x04,
345 BCR1_CTFT0=0x08,
346 BCR1_CTFT1=0x10,
347 BCR1_CTSF=0x20,
348 BCR1_TXQNOBK=0x40, /* for VT6105 */
349 BCR1_VIDFR=0x80, /* for VT6105 */
350 BCR1_MED0=0x40, /* for VT6102 */
351 BCR1_MED1=0x80, /* for VT6102 */
352};
353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354#ifdef USE_MMIO
355/* Registers we check that mmio and reg are the same. */
356static const int mmio_verify_registers[] = {
357 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
358 0
359};
360#endif
361
362/* Bits in the interrupt status/mask registers. */
363enum intr_status_bits {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100364 IntrRxDone = 0x0001,
365 IntrTxDone = 0x0002,
366 IntrRxErr = 0x0004,
367 IntrTxError = 0x0008,
368 IntrRxEmpty = 0x0020,
369 IntrPCIErr = 0x0040,
370 IntrStatsMax = 0x0080,
371 IntrRxEarly = 0x0100,
372 IntrTxUnderrun = 0x0210,
373 IntrRxOverflow = 0x0400,
374 IntrRxDropped = 0x0800,
375 IntrRxNoBuf = 0x1000,
376 IntrTxAborted = 0x2000,
377 IntrLinkChange = 0x4000,
378 IntrRxWakeUp = 0x8000,
379 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
380 IntrNormalSummary = IntrRxDone | IntrTxDone,
381 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
382 IntrTxUnderrun,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383};
384
385/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
386enum wol_bits {
387 WOLucast = 0x10,
388 WOLmagic = 0x20,
389 WOLbmcast = 0x30,
390 WOLlnkon = 0x40,
391 WOLlnkoff = 0x80,
392};
393
394/* The Rx and Tx buffer descriptors. */
395struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400396 __le32 rx_status;
397 __le32 desc_length; /* Chain flag, Buffer/frame length */
398 __le32 addr;
399 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400};
401struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400402 __le32 tx_status;
403 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
404 __le32 addr;
405 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406};
407
408/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
409#define TXDESC 0x00e08000
410
411enum rx_status_bits {
412 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
413};
414
415/* Bits in *_desc.*_status */
416enum desc_status_bits {
417 DescOwn=0x80000000
418};
419
Roger Luethi38f49e82010-12-06 00:59:40 +0000420/* Bits in *_desc.*_length */
421enum desc_length_bits {
422 DescTag=0x00010000
423};
424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425/* Bits in ChipCmd. */
426enum chip_cmd_bits {
427 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
428 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
429 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
430 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
431};
432
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000433struct rhine_stats {
434 u64 packets;
435 u64 bytes;
436 struct u64_stats_sync syncp;
437};
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000440 /* Bit mask for configured VLAN ids */
441 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 /* Descriptor rings */
444 struct rx_desc *rx_ring;
445 struct tx_desc *tx_ring;
446 dma_addr_t rx_ring_dma;
447 dma_addr_t tx_ring_dma;
448
449 /* The addresses of receive-in-place skbuffs. */
450 struct sk_buff *rx_skbuff[RX_RING_SIZE];
451 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
452
453 /* The saved address of a sent-in-place packet/buffer, for later free(). */
454 struct sk_buff *tx_skbuff[TX_RING_SIZE];
455 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
456
Roger Luethi4be5de22006-04-04 20:49:16 +0200457 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 unsigned char *tx_buf[TX_RING_SIZE];
459 unsigned char *tx_bufs;
460 dma_addr_t tx_bufs_dma;
461
Alexey Charkovf7630d12014-04-22 19:28:08 +0400462 int revision;
463 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700465 struct net_device *dev;
466 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 spinlock_t lock;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100468 struct mutex task_lock;
469 bool task_enable;
470 struct work_struct slow_event_task;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800471 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Francois Romieufc3e0f82012-01-07 22:39:37 +0100473 u32 msg_enable;
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 /* Frequently used values: keep some adjacent for cache effect. */
476 u32 quirks;
477 struct rx_desc *rx_head_desc;
478 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
479 unsigned int cur_tx, dirty_tx;
480 unsigned int rx_buf_sz; /* Based on MTU+slack. */
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000481 struct rhine_stats rx_stats;
482 struct rhine_stats tx_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 u8 wolopts;
484
485 u8 tx_thresh, rx_thresh;
486
487 struct mii_if_info mii_if;
488 void __iomem *base;
489};
490
Roger Luethi38f49e82010-12-06 00:59:40 +0000491#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
492#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
493#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
494
495#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
496#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
497#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
498
499#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
500#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
501#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
502
503#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
504#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
505#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
506
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508static int mdio_read(struct net_device *dev, int phy_id, int location);
509static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
510static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800511static void rhine_reset_task(struct work_struct *work);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100512static void rhine_slow_event_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000514static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
515 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100516static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700518static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519static void rhine_set_rx_mode(struct net_device *dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000520static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
521 struct rtnl_link_stats64 *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400523static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524static int rhine_close(struct net_device *dev);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000525static int rhine_vlan_rx_add_vid(struct net_device *dev,
526 __be16 proto, u16 vid);
527static int rhine_vlan_rx_kill_vid(struct net_device *dev,
528 __be16 proto, u16 vid);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100529static void rhine_restart_tx(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000531static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
Francois Romieua384a332012-01-07 22:19:36 +0100532{
533 void __iomem *ioaddr = rp->base;
534 int i;
535
536 for (i = 0; i < 1024; i++) {
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000537 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
538
539 if (low ^ has_mask_bits)
Francois Romieua384a332012-01-07 22:19:36 +0100540 break;
541 udelay(10);
542 }
543 if (i > 64) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100544 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000545 "count: %04d\n", low ? "low" : "high", reg, mask, i);
Francois Romieua384a332012-01-07 22:19:36 +0100546 }
547}
548
549static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
550{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000551 rhine_wait_bit(rp, reg, mask, false);
Francois Romieua384a332012-01-07 22:19:36 +0100552}
553
554static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
555{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000556 rhine_wait_bit(rp, reg, mask, true);
Francois Romieua384a332012-01-07 22:19:36 +0100557}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Francois Romieua20a28b2011-12-30 14:53:58 +0100559static u32 rhine_get_events(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 void __iomem *ioaddr = rp->base;
562 u32 intr_status;
563
564 intr_status = ioread16(ioaddr + IntrStatus);
565 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
566 if (rp->quirks & rqStatusWBRace)
567 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
568 return intr_status;
569}
570
Francois Romieua20a28b2011-12-30 14:53:58 +0100571static void rhine_ack_events(struct rhine_private *rp, u32 mask)
572{
573 void __iomem *ioaddr = rp->base;
574
575 if (rp->quirks & rqStatusWBRace)
576 iowrite8(mask >> 16, ioaddr + IntrStatus2);
577 iowrite16(mask, ioaddr + IntrStatus);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100578 mmiowb();
Francois Romieua20a28b2011-12-30 14:53:58 +0100579}
580
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581/*
582 * Get power related registers into sane state.
583 * Notify user about past WOL event.
584 */
585static void rhine_power_init(struct net_device *dev)
586{
587 struct rhine_private *rp = netdev_priv(dev);
588 void __iomem *ioaddr = rp->base;
589 u16 wolstat;
590
591 if (rp->quirks & rqWOL) {
592 /* Make sure chip is in power state D0 */
593 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
594
595 /* Disable "force PME-enable" */
596 iowrite8(0x80, ioaddr + WOLcgClr);
597
598 /* Clear power-event config bits (WOL) */
599 iowrite8(0xFF, ioaddr + WOLcrClr);
600 /* More recent cards can manage two additional patterns */
601 if (rp->quirks & rq6patterns)
602 iowrite8(0x03, ioaddr + WOLcrClr1);
603
604 /* Save power-event status bits */
605 wolstat = ioread8(ioaddr + PwrcsrSet);
606 if (rp->quirks & rq6patterns)
607 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
608
609 /* Clear power-event status bits */
610 iowrite8(0xFF, ioaddr + PwrcsrClr);
611 if (rp->quirks & rq6patterns)
612 iowrite8(0x03, ioaddr + PwrcsrClr1);
613
614 if (wolstat) {
615 char *reason;
616 switch (wolstat) {
617 case WOLmagic:
618 reason = "Magic packet";
619 break;
620 case WOLlnkon:
621 reason = "Link went up";
622 break;
623 case WOLlnkoff:
624 reason = "Link went down";
625 break;
626 case WOLucast:
627 reason = "Unicast packet";
628 break;
629 case WOLbmcast:
630 reason = "Multicast/broadcast packet";
631 break;
632 default:
633 reason = "Unknown";
634 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000635 netdev_info(dev, "Woke system up. Reason: %s\n",
636 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
638 }
639}
640
641static void rhine_chip_reset(struct net_device *dev)
642{
643 struct rhine_private *rp = netdev_priv(dev);
644 void __iomem *ioaddr = rp->base;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100645 u8 cmd1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
648 IOSYNC;
649
650 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000651 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653 /* Force reset */
654 if (rp->quirks & rqForceReset)
655 iowrite8(0x40, ioaddr + MiscCmd);
656
657 /* Reset can take somewhat longer (rare) */
Francois Romieua384a332012-01-07 22:19:36 +0100658 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 }
660
Francois Romieufc3e0f82012-01-07 22:39:37 +0100661 cmd1 = ioread8(ioaddr + ChipCmd1);
662 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
663 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666#ifdef USE_MMIO
667static void enable_mmio(long pioaddr, u32 quirks)
668{
669 int n;
670 if (quirks & rqRhineI) {
671 /* More recent docs say that this bit is reserved ... */
672 n = inb(pioaddr + ConfigA) | 0x20;
673 outb(n, pioaddr + ConfigA);
674 } else {
675 n = inb(pioaddr + ConfigD) | 0x80;
676 outb(n, pioaddr + ConfigD);
677 }
678}
679#endif
680
681/*
682 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
683 * (plus 0x6C for Rhine-I/II)
684 */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500685static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686{
687 struct rhine_private *rp = netdev_priv(dev);
688 void __iomem *ioaddr = rp->base;
Francois Romieua384a332012-01-07 22:19:36 +0100689 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
691 outb(0x20, pioaddr + MACRegEEcsr);
Francois Romieua384a332012-01-07 22:19:36 +0100692 for (i = 0; i < 1024; i++) {
693 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
694 break;
695 }
696 if (i > 512)
697 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699#ifdef USE_MMIO
700 /*
701 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
702 * MMIO. If reloading EEPROM was done first this could be avoided, but
703 * it is not known if that still works with the "win98-reboot" problem.
704 */
705 enable_mmio(pioaddr, rp->quirks);
706#endif
707
708 /* Turn off EEPROM-controlled wake-up (magic packet) */
709 if (rp->quirks & rqWOL)
710 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
711
712}
713
714#ifdef CONFIG_NET_POLL_CONTROLLER
715static void rhine_poll(struct net_device *dev)
716{
Francois Romieu05d334e2012-03-09 15:28:18 +0100717 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +0400718 const int irq = rp->irq;
Francois Romieu05d334e2012-03-09 15:28:18 +0100719
720 disable_irq(irq);
721 rhine_interrupt(irq, dev);
722 enable_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723}
724#endif
725
Francois Romieu269f3112011-12-30 14:43:54 +0100726static void rhine_kick_tx_threshold(struct rhine_private *rp)
727{
728 if (rp->tx_thresh < 0xe0) {
729 void __iomem *ioaddr = rp->base;
730
731 rp->tx_thresh += 0x20;
732 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
733 }
734}
735
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100736static void rhine_tx_err(struct rhine_private *rp, u32 status)
737{
738 struct net_device *dev = rp->dev;
739
740 if (status & IntrTxAborted) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100741 netif_info(rp, tx_err, dev,
742 "Abort %08x, frame dropped\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100743 }
744
745 if (status & IntrTxUnderrun) {
746 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100747 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
748 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100749 }
750
Francois Romieufc3e0f82012-01-07 22:39:37 +0100751 if (status & IntrTxDescRace)
752 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100753
754 if ((status & IntrTxError) &&
755 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
756 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100757 netif_info(rp, tx_err, dev, "Unspecified error. "
758 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100759 }
760
761 rhine_restart_tx(dev);
762}
763
764static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
765{
766 void __iomem *ioaddr = rp->base;
767 struct net_device_stats *stats = &rp->dev->stats;
768
769 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
770 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
771
772 /*
773 * Clears the "tally counters" for CRC errors and missed frames(?).
774 * It has been reported that some chips need a write of 0 to clear
775 * these, for others the counters are set to 1 when written to and
776 * instead cleared when read. So we clear them both ways ...
777 */
778 iowrite32(0, ioaddr + RxMissed);
779 ioread16(ioaddr + RxCRCErrs);
780 ioread16(ioaddr + RxMissed);
781}
782
783#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
784 IntrRxErr | \
785 IntrRxEmpty | \
786 IntrRxOverflow | \
787 IntrRxDropped | \
788 IntrRxNoBuf | \
789 IntrRxWakeUp)
790
791#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
792 IntrTxAborted | \
793 IntrTxUnderrun | \
794 IntrTxDescRace)
795#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
796
797#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
798 RHINE_EVENT_NAPI_TX | \
799 IntrStatsMax)
800#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
801#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
802
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700803static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700804{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700805 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
806 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700807 void __iomem *ioaddr = rp->base;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100808 u16 enable_mask = RHINE_EVENT & 0xffff;
809 int work_done = 0;
810 u32 status;
Roger Luethi633949a2006-08-14 23:00:17 -0700811
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100812 status = rhine_get_events(rp);
813 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
814
815 if (status & RHINE_EVENT_NAPI_RX)
816 work_done += rhine_rx(dev, budget);
817
818 if (status & RHINE_EVENT_NAPI_TX) {
819 if (status & RHINE_EVENT_NAPI_TX_ERR) {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100820 /* Avoid scavenging before Tx engine turned off */
Francois Romieua384a332012-01-07 22:19:36 +0100821 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100822 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
823 netif_warn(rp, tx_err, dev, "Tx still on\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100824 }
Francois Romieufc3e0f82012-01-07 22:39:37 +0100825
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100826 rhine_tx(dev);
827
828 if (status & RHINE_EVENT_NAPI_TX_ERR)
829 rhine_tx_err(rp, status);
830 }
831
832 if (status & IntrStatsMax) {
833 spin_lock(&rp->lock);
834 rhine_update_rx_crc_and_missed_errord(rp);
835 spin_unlock(&rp->lock);
836 }
837
838 if (status & RHINE_EVENT_SLOW) {
839 enable_mask &= ~RHINE_EVENT_SLOW;
840 schedule_work(&rp->slow_event_task);
841 }
Roger Luethi633949a2006-08-14 23:00:17 -0700842
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700843 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800844 napi_complete(napi);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100845 iowrite16(enable_mask, ioaddr + IntrEnable);
846 mmiowb();
Roger Luethi633949a2006-08-14 23:00:17 -0700847 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700848 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700849}
Roger Luethi633949a2006-08-14 23:00:17 -0700850
Bill Pemberton76e239e2012-12-03 09:23:48 -0500851static void rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
853 struct rhine_private *rp = netdev_priv(dev);
854
855 /* Reset the chip to erase previous misconfiguration. */
856 rhine_chip_reset(dev);
857
858 /* Rhine-I needs extra time to recuperate before EEPROM reload */
859 if (rp->quirks & rqRhineI)
860 msleep(5);
861
862 /* Reload EEPROM controlled bytes cleared by soft reset */
Alexey Charkov2d283862014-04-22 19:28:09 +0400863 if (dev_is_pci(dev->dev.parent))
864 rhine_reload_eeprom(pioaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865}
866
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800867static const struct net_device_ops rhine_netdev_ops = {
868 .ndo_open = rhine_open,
869 .ndo_stop = rhine_close,
870 .ndo_start_xmit = rhine_start_tx,
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000871 .ndo_get_stats64 = rhine_get_stats64,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000872 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000873 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800874 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000875 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800876 .ndo_do_ioctl = netdev_ioctl,
877 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000878 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
879 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800880#ifdef CONFIG_NET_POLL_CONTROLLER
881 .ndo_poll_controller = rhine_poll,
882#endif
883};
884
Alexey Charkov2d283862014-04-22 19:28:09 +0400885static int rhine_init_one_common(struct device *hwdev, int revision,
886 long pioaddr, void __iomem *ioaddr, int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
888 struct net_device *dev;
889 struct rhine_private *rp;
Alexey Charkov2d283862014-04-22 19:28:09 +0400890 int i, rc, phy_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893 /* this should always be supported */
Alexey Charkovf7630d12014-04-22 19:28:08 +0400894 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +0400896 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
Alexey Charkov2d283862014-04-22 19:28:09 +0400897 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 }
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 dev = alloc_etherdev(sizeof(struct rhine_private));
901 if (!dev) {
902 rc = -ENOMEM;
Alexey Charkov2d283862014-04-22 19:28:09 +0400903 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 }
Alexey Charkovf7630d12014-04-22 19:28:08 +0400905 SET_NETDEV_DEV(dev, hwdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
907 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700908 rp->dev = dev;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400909 rp->revision = revision;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 rp->pioaddr = pioaddr;
Alexey Charkov2d283862014-04-22 19:28:09 +0400911 rp->base = ioaddr;
912 rp->irq = irq;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100913 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
Alexey Charkov2d283862014-04-22 19:28:09 +0400915 phy_id = 0;
916 name = "Rhine";
917 if (revision < VTunknown0) {
918 rp->quirks = rqRhineI;
919 } else if (revision >= VT6102) {
920 rp->quirks = rqWOL | rqForceReset;
921 if (revision < VT6105) {
922 name = "Rhine II";
923 rp->quirks |= rqStatusWBRace; /* Rhine-II exclusive */
924 } else {
925 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
926 if (revision >= VT6105_B0)
927 rp->quirks |= rq6patterns;
928 if (revision < VT6105M)
929 name = "Rhine III";
930 else
931 name = "Rhine III (Management Adapter)";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 }
933 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
John Stultz827da442013-10-07 15:51:58 -0700935 u64_stats_init(&rp->tx_stats.syncp);
936 u64_stats_init(&rp->rx_stats.syncp);
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 /* Get chip registers into a sane state */
939 rhine_power_init(dev);
940 rhine_hw_init(dev, pioaddr);
941
942 for (i = 0; i < 6; i++)
943 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
944
Joe Perches482e3fe2011-04-16 14:15:26 +0000945 if (!is_valid_ether_addr(dev->dev_addr)) {
946 /* Report it and use a random ethernet address instead */
947 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000948 eth_hw_addr_random(dev);
Joe Perches482e3fe2011-04-16 14:15:26 +0000949 netdev_info(dev, "Using random MAC address: %pM\n",
950 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
952
953 /* For Rhine-I/II, phy_id is loaded from EEPROM */
954 if (!phy_id)
955 phy_id = ioread8(ioaddr + 0x6C);
956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 spin_lock_init(&rp->lock);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100958 mutex_init(&rp->task_lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800959 INIT_WORK(&rp->reset_task, rhine_reset_task);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100960 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 rp->mii_if.dev = dev;
963 rp->mii_if.mdio_read = mdio_read;
964 rp->mii_if.mdio_write = mdio_write;
965 rp->mii_if.phy_id_mask = 0x1f;
966 rp->mii_if.reg_num_mask = 0x1f;
967
968 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800969 dev->netdev_ops = &rhine_netdev_ops;
wangweidonge76070f2014-03-17 15:52:17 +0800970 dev->ethtool_ops = &netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800972
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700973 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +0200974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 if (rp->quirks & rqRhineI)
976 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
977
Alexey Charkovf7630d12014-04-22 19:28:08 +0400978 if (rp->revision >= VT6105M)
Patrick McHardyf6469682013-04-19 02:04:27 +0000979 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
980 NETIF_F_HW_VLAN_CTAG_RX |
981 NETIF_F_HW_VLAN_CTAG_FILTER;
Roger Luethi38f49e82010-12-06 00:59:40 +0000982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 /* dev->name not defined before register_netdev()! */
984 rc = register_netdev(dev);
985 if (rc)
Alexey Charkov2d283862014-04-22 19:28:09 +0400986 goto err_out_free_netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Joe Perchesdf4511f2011-04-16 14:15:25 +0000988 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
Alexey Charkov2d283862014-04-22 19:28:09 +0400989 name, (long)ioaddr, dev->dev_addr, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Alexey Charkovf7630d12014-04-22 19:28:08 +0400991 dev_set_drvdata(hwdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
993 {
994 u16 mii_cmd;
995 int mii_status = mdio_read(dev, phy_id, 1);
996 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
997 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
998 if (mii_status != 0xffff && mii_status != 0x0000) {
999 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +00001000 netdev_info(dev,
1001 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1002 phy_id,
1003 mii_status, rp->mii_if.advertising,
1004 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* set IFF_RUNNING */
1007 if (mii_status & BMSR_LSTATUS)
1008 netif_carrier_on(dev);
1009 else
1010 netif_carrier_off(dev);
1011
1012 }
1013 }
1014 rp->mii_if.phy_id = phy_id;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001015 if (avoid_D3)
1016 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 return 0;
1019
Alexey Charkov2d283862014-04-22 19:28:09 +04001020err_out_free_netdev:
1021 free_netdev(dev);
1022err_out:
1023 return rc;
1024}
1025
1026static int rhine_init_one_pci(struct pci_dev *pdev,
1027 const struct pci_device_id *ent)
1028{
1029 struct device *hwdev = &pdev->dev;
1030 int i, rc;
1031 long pioaddr, memaddr;
1032 void __iomem *ioaddr;
1033 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1034 u32 quirks = pdev->revision < VTunknown0 ? rqRhineI : 0;
1035#ifdef USE_MMIO
1036 int bar = 1;
1037#else
1038 int bar = 0;
1039#endif
1040
1041/* when built into the kernel, we only print version if device is found */
1042#ifndef MODULE
1043 pr_info_once("%s\n", version);
1044#endif
1045
1046 rc = pci_enable_device(pdev);
1047 if (rc)
1048 goto err_out;
1049
1050 /* sanity check */
1051 if ((pci_resource_len(pdev, 0) < io_size) ||
1052 (pci_resource_len(pdev, 1) < io_size)) {
1053 rc = -EIO;
1054 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1055 goto err_out_pci_disable;
1056 }
1057
1058 pioaddr = pci_resource_start(pdev, 0);
1059 memaddr = pci_resource_start(pdev, 1);
1060
1061 pci_set_master(pdev);
1062
1063 rc = pci_request_regions(pdev, DRV_NAME);
1064 if (rc)
1065 goto err_out_pci_disable;
1066
1067 ioaddr = pci_iomap(pdev, bar, io_size);
1068 if (!ioaddr) {
1069 rc = -EIO;
1070 dev_err(hwdev,
1071 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1072 dev_name(hwdev), io_size, memaddr);
1073 goto err_out_free_res;
1074 }
1075
1076#ifdef USE_MMIO
1077 enable_mmio(pioaddr, quirks);
1078
1079 /* Check that selected MMIO registers match the PIO ones */
1080 i = 0;
1081 while (mmio_verify_registers[i]) {
1082 int reg = mmio_verify_registers[i++];
1083 unsigned char a = inb(pioaddr+reg);
1084 unsigned char b = readb(ioaddr+reg);
1085
1086 if (a != b) {
1087 rc = -EIO;
1088 dev_err(hwdev,
1089 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
1090 reg, a, b);
1091 goto err_out_unmap;
1092 }
1093 }
1094#endif /* USE_MMIO */
1095
1096 rc = rhine_init_one_common(&pdev->dev, pdev->revision,
1097 pioaddr, ioaddr, pdev->irq);
1098 if (!rc)
1099 return 0;
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101err_out_unmap:
1102 pci_iounmap(pdev, ioaddr);
1103err_out_free_res:
1104 pci_release_regions(pdev);
Roger Luethiae996152014-03-18 18:14:01 +01001105err_out_pci_disable:
1106 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107err_out:
1108 return rc;
1109}
1110
Alexey Charkov2d283862014-04-22 19:28:09 +04001111static int rhine_init_one_platform(struct platform_device *pdev)
1112{
1113 const struct of_device_id *match;
1114 u32 revision;
1115 int irq;
1116 struct resource *res;
1117 void __iomem *ioaddr;
1118
1119 match = of_match_device(rhine_of_tbl, &pdev->dev);
1120 if (!match)
1121 return -EINVAL;
1122
1123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1124 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1125 if (IS_ERR(ioaddr))
1126 return PTR_ERR(ioaddr);
1127
1128 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1129 if (!irq)
1130 return -EINVAL;
1131
1132 revision = (u32)match->data;
1133 if (!revision)
1134 return -EINVAL;
1135
1136 return rhine_init_one_common(&pdev->dev, revision,
1137 (long)ioaddr, ioaddr, irq);
1138}
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140static int alloc_ring(struct net_device* dev)
1141{
1142 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001143 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 void *ring;
1145 dma_addr_t ring_dma;
1146
Alexey Charkovf7630d12014-04-22 19:28:08 +04001147 ring = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001148 RX_RING_SIZE * sizeof(struct rx_desc) +
1149 TX_RING_SIZE * sizeof(struct tx_desc),
1150 &ring_dma,
1151 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001153 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 return -ENOMEM;
1155 }
1156 if (rp->quirks & rqRhineI) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001157 rp->tx_bufs = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001158 PKT_BUF_SZ * TX_RING_SIZE,
1159 &rp->tx_bufs_dma,
1160 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 if (rp->tx_bufs == NULL) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001162 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001163 RX_RING_SIZE * sizeof(struct rx_desc) +
1164 TX_RING_SIZE * sizeof(struct tx_desc),
1165 ring, ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 return -ENOMEM;
1167 }
1168 }
1169
1170 rp->rx_ring = ring;
1171 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1172 rp->rx_ring_dma = ring_dma;
1173 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1174
1175 return 0;
1176}
1177
1178static void free_ring(struct net_device* dev)
1179{
1180 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001181 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
Alexey Charkovf7630d12014-04-22 19:28:08 +04001183 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001184 RX_RING_SIZE * sizeof(struct rx_desc) +
1185 TX_RING_SIZE * sizeof(struct tx_desc),
1186 rp->rx_ring, rp->rx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 rp->tx_ring = NULL;
1188
1189 if (rp->tx_bufs)
Alexey Charkovf7630d12014-04-22 19:28:08 +04001190 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001191 rp->tx_bufs, rp->tx_bufs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
1193 rp->tx_bufs = NULL;
1194
1195}
1196
1197static void alloc_rbufs(struct net_device *dev)
1198{
1199 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001200 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 dma_addr_t next;
1202 int i;
1203
1204 rp->dirty_rx = rp->cur_rx = 0;
1205
1206 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1207 rp->rx_head_desc = &rp->rx_ring[0];
1208 next = rp->rx_ring_dma;
1209
1210 /* Init the ring entries */
1211 for (i = 0; i < RX_RING_SIZE; i++) {
1212 rp->rx_ring[i].rx_status = 0;
1213 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1214 next += sizeof(struct rx_desc);
1215 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1216 rp->rx_skbuff[i] = NULL;
1217 }
1218 /* Mark the last entry as wrapping the ring. */
1219 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1220
1221 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1222 for (i = 0; i < RX_RING_SIZE; i++) {
Kevin Lob26b5552008-08-27 11:35:09 +08001223 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 rp->rx_skbuff[i] = skb;
1225 if (skb == NULL)
1226 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228 rp->rx_skbuff_dma[i] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001229 dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001230 DMA_FROM_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001231 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001232 rp->rx_skbuff_dma[i] = 0;
1233 dev_kfree_skb(skb);
1234 break;
1235 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1237 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1238 }
1239 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1240}
1241
1242static void free_rbufs(struct net_device* dev)
1243{
1244 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001245 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 int i;
1247
1248 /* Free all the skbuffs in the Rx queue. */
1249 for (i = 0; i < RX_RING_SIZE; i++) {
1250 rp->rx_ring[i].rx_status = 0;
1251 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1252 if (rp->rx_skbuff[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001253 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 rp->rx_skbuff_dma[i],
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001255 rp->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 dev_kfree_skb(rp->rx_skbuff[i]);
1257 }
1258 rp->rx_skbuff[i] = NULL;
1259 }
1260}
1261
1262static void alloc_tbufs(struct net_device* dev)
1263{
1264 struct rhine_private *rp = netdev_priv(dev);
1265 dma_addr_t next;
1266 int i;
1267
1268 rp->dirty_tx = rp->cur_tx = 0;
1269 next = rp->tx_ring_dma;
1270 for (i = 0; i < TX_RING_SIZE; i++) {
1271 rp->tx_skbuff[i] = NULL;
1272 rp->tx_ring[i].tx_status = 0;
1273 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1274 next += sizeof(struct tx_desc);
1275 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001276 if (rp->quirks & rqRhineI)
1277 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 }
1279 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1280
1281}
1282
1283static void free_tbufs(struct net_device* dev)
1284{
1285 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001286 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 int i;
1288
1289 for (i = 0; i < TX_RING_SIZE; i++) {
1290 rp->tx_ring[i].tx_status = 0;
1291 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1292 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1293 if (rp->tx_skbuff[i]) {
1294 if (rp->tx_skbuff_dma[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001295 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 rp->tx_skbuff_dma[i],
1297 rp->tx_skbuff[i]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001298 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 }
1300 dev_kfree_skb(rp->tx_skbuff[i]);
1301 }
1302 rp->tx_skbuff[i] = NULL;
1303 rp->tx_buf[i] = NULL;
1304 }
1305}
1306
1307static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1308{
1309 struct rhine_private *rp = netdev_priv(dev);
1310 void __iomem *ioaddr = rp->base;
1311
Francois Romieufc3e0f82012-01-07 22:39:37 +01001312 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 if (rp->mii_if.full_duplex)
1315 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1316 ioaddr + ChipCmd1);
1317 else
1318 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1319 ioaddr + ChipCmd1);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001320
1321 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1322 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001323}
1324
1325/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001326static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001327{
Francois Romieufc3e0f82012-01-07 22:39:37 +01001328 struct net_device *dev = mii->dev;
1329 struct rhine_private *rp = netdev_priv(dev);
1330
Roger Luethi00b428c2006-03-28 20:53:56 +02001331 if (mii->force_media) {
1332 /* autoneg is off: Link is always assumed to be up */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001333 if (!netif_carrier_ok(dev))
1334 netif_carrier_on(dev);
1335 } else /* Let MMI library update carrier status */
1336 rhine_check_media(dev, 0);
1337
1338 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1339 mii->force_media, netif_carrier_ok(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340}
1341
Roger Luethi38f49e82010-12-06 00:59:40 +00001342/**
1343 * rhine_set_cam - set CAM multicast filters
1344 * @ioaddr: register block of this Rhine
1345 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1346 * @addr: multicast address (6 bytes)
1347 *
1348 * Load addresses into multicast filters.
1349 */
1350static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1351{
1352 int i;
1353
1354 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1355 wmb();
1356
1357 /* Paranoid -- idx out of range should never happen */
1358 idx &= (MCAM_SIZE - 1);
1359
1360 iowrite8((u8) idx, ioaddr + CamAddr);
1361
1362 for (i = 0; i < 6; i++, addr++)
1363 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1364 udelay(10);
1365 wmb();
1366
1367 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1368 udelay(10);
1369
1370 iowrite8(0, ioaddr + CamCon);
1371}
1372
1373/**
1374 * rhine_set_vlan_cam - set CAM VLAN filters
1375 * @ioaddr: register block of this Rhine
1376 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1377 * @addr: VLAN ID (2 bytes)
1378 *
1379 * Load addresses into VLAN filters.
1380 */
1381static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1382{
1383 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1384 wmb();
1385
1386 /* Paranoid -- idx out of range should never happen */
1387 idx &= (VCAM_SIZE - 1);
1388
1389 iowrite8((u8) idx, ioaddr + CamAddr);
1390
1391 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1392 udelay(10);
1393 wmb();
1394
1395 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1396 udelay(10);
1397
1398 iowrite8(0, ioaddr + CamCon);
1399}
1400
1401/**
1402 * rhine_set_cam_mask - set multicast CAM mask
1403 * @ioaddr: register block of this Rhine
1404 * @mask: multicast CAM mask
1405 *
1406 * Mask sets multicast filters active/inactive.
1407 */
1408static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1409{
1410 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1411 wmb();
1412
1413 /* write mask */
1414 iowrite32(mask, ioaddr + CamMask);
1415
1416 /* disable CAMEN */
1417 iowrite8(0, ioaddr + CamCon);
1418}
1419
1420/**
1421 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1422 * @ioaddr: register block of this Rhine
1423 * @mask: VLAN CAM mask
1424 *
1425 * Mask sets VLAN filters active/inactive.
1426 */
1427static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1428{
1429 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1430 wmb();
1431
1432 /* write mask */
1433 iowrite32(mask, ioaddr + CamMask);
1434
1435 /* disable CAMEN */
1436 iowrite8(0, ioaddr + CamCon);
1437}
1438
1439/**
1440 * rhine_init_cam_filter - initialize CAM filters
1441 * @dev: network device
1442 *
1443 * Initialize (disable) hardware VLAN and multicast support on this
1444 * Rhine.
1445 */
1446static void rhine_init_cam_filter(struct net_device *dev)
1447{
1448 struct rhine_private *rp = netdev_priv(dev);
1449 void __iomem *ioaddr = rp->base;
1450
1451 /* Disable all CAMs */
1452 rhine_set_vlan_cam_mask(ioaddr, 0);
1453 rhine_set_cam_mask(ioaddr, 0);
1454
1455 /* disable hardware VLAN support */
1456 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1457 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1458}
1459
1460/**
1461 * rhine_update_vcam - update VLAN CAM filters
1462 * @rp: rhine_private data of this Rhine
1463 *
1464 * Update VLAN CAM filters to match configuration change.
1465 */
1466static void rhine_update_vcam(struct net_device *dev)
1467{
1468 struct rhine_private *rp = netdev_priv(dev);
1469 void __iomem *ioaddr = rp->base;
1470 u16 vid;
1471 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1472 unsigned int i = 0;
1473
1474 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1475 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1476 vCAMmask |= 1 << i;
1477 if (++i >= VCAM_SIZE)
1478 break;
1479 }
1480 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1481}
1482
Patrick McHardy80d5c362013-04-19 02:04:28 +00001483static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001484{
1485 struct rhine_private *rp = netdev_priv(dev);
1486
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001487 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001488 set_bit(vid, rp->active_vlans);
1489 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001490 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001491 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001492}
1493
Patrick McHardy80d5c362013-04-19 02:04:28 +00001494static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001495{
1496 struct rhine_private *rp = netdev_priv(dev);
1497
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001498 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001499 clear_bit(vid, rp->active_vlans);
1500 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001501 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001502 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001503}
1504
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505static void init_registers(struct net_device *dev)
1506{
1507 struct rhine_private *rp = netdev_priv(dev);
1508 void __iomem *ioaddr = rp->base;
1509 int i;
1510
1511 for (i = 0; i < 6; i++)
1512 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1513
1514 /* Initialize other registers. */
1515 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1516 /* Configure initial FIFO thresholds. */
1517 iowrite8(0x20, ioaddr + TxConfig);
1518 rp->tx_thresh = 0x20;
1519 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1520
1521 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1522 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1523
1524 rhine_set_rx_mode(dev);
1525
Alexey Charkovf7630d12014-04-22 19:28:08 +04001526 if (rp->revision >= VT6105M)
Roger Luethi38f49e82010-12-06 00:59:40 +00001527 rhine_init_cam_filter(dev);
1528
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001529 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001530
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001531 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
1533 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1534 ioaddr + ChipCmd);
1535 rhine_check_media(dev, 1);
1536}
1537
1538/* Enable MII link status auto-polling (required for IntrLinkChange) */
Francois Romieua384a332012-01-07 22:19:36 +01001539static void rhine_enable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540{
Francois Romieua384a332012-01-07 22:19:36 +01001541 void __iomem *ioaddr = rp->base;
1542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 iowrite8(0, ioaddr + MIICmd);
1544 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1545 iowrite8(0x80, ioaddr + MIICmd);
1546
Francois Romieua384a332012-01-07 22:19:36 +01001547 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
1549 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1550}
1551
1552/* Disable MII link status auto-polling (required for MDIO access) */
Francois Romieua384a332012-01-07 22:19:36 +01001553static void rhine_disable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554{
Francois Romieua384a332012-01-07 22:19:36 +01001555 void __iomem *ioaddr = rp->base;
1556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 iowrite8(0, ioaddr + MIICmd);
1558
Francois Romieua384a332012-01-07 22:19:36 +01001559 if (rp->quirks & rqRhineI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1561
John W. Linville38bb6b22006-05-19 10:51:21 -04001562 /* Can be called from ISR. Evil. */
1563 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
1565 /* 0x80 must be set immediately before turning it off */
1566 iowrite8(0x80, ioaddr + MIICmd);
1567
Francois Romieua384a332012-01-07 22:19:36 +01001568 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 /* Heh. Now clear 0x80 again. */
1571 iowrite8(0, ioaddr + MIICmd);
1572 }
1573 else
Francois Romieua384a332012-01-07 22:19:36 +01001574 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
1577/* Read and write over the MII Management Data I/O (MDIO) interface. */
1578
1579static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1580{
1581 struct rhine_private *rp = netdev_priv(dev);
1582 void __iomem *ioaddr = rp->base;
1583 int result;
1584
Francois Romieua384a332012-01-07 22:19:36 +01001585 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587 /* rhine_disable_linkmon already cleared MIICmd */
1588 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1589 iowrite8(regnum, ioaddr + MIIRegAddr);
1590 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
Francois Romieua384a332012-01-07 22:19:36 +01001591 rhine_wait_bit_low(rp, MIICmd, 0x40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 result = ioread16(ioaddr + MIIData);
1593
Francois Romieua384a332012-01-07 22:19:36 +01001594 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 return result;
1596}
1597
1598static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1599{
1600 struct rhine_private *rp = netdev_priv(dev);
1601 void __iomem *ioaddr = rp->base;
1602
Francois Romieua384a332012-01-07 22:19:36 +01001603 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
1605 /* rhine_disable_linkmon already cleared MIICmd */
1606 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1607 iowrite8(regnum, ioaddr + MIIRegAddr);
1608 iowrite16(value, ioaddr + MIIData);
1609 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
Francois Romieua384a332012-01-07 22:19:36 +01001610 rhine_wait_bit_low(rp, MIICmd, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Francois Romieua384a332012-01-07 22:19:36 +01001612 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001615static void rhine_task_disable(struct rhine_private *rp)
1616{
1617 mutex_lock(&rp->task_lock);
1618 rp->task_enable = false;
1619 mutex_unlock(&rp->task_lock);
1620
1621 cancel_work_sync(&rp->slow_event_task);
1622 cancel_work_sync(&rp->reset_task);
1623}
1624
1625static void rhine_task_enable(struct rhine_private *rp)
1626{
1627 mutex_lock(&rp->task_lock);
1628 rp->task_enable = true;
1629 mutex_unlock(&rp->task_lock);
1630}
1631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632static int rhine_open(struct net_device *dev)
1633{
1634 struct rhine_private *rp = netdev_priv(dev);
1635 void __iomem *ioaddr = rp->base;
1636 int rc;
1637
Alexey Charkovf7630d12014-04-22 19:28:08 +04001638 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 if (rc)
1640 return rc;
1641
Alexey Charkovf7630d12014-04-22 19:28:08 +04001642 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
1644 rc = alloc_ring(dev);
1645 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001646 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 return rc;
1648 }
1649 alloc_rbufs(dev);
1650 alloc_tbufs(dev);
1651 rhine_chip_reset(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001652 rhine_task_enable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 init_registers(dev);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001654
1655 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1656 __func__, ioread16(ioaddr + ChipCmd),
1657 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
1659 netif_start_queue(dev);
1660
1661 return 0;
1662}
1663
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001664static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001666 struct rhine_private *rp = container_of(work, struct rhine_private,
1667 reset_task);
1668 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001670 mutex_lock(&rp->task_lock);
1671
1672 if (!rp->task_enable)
1673 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001675 napi_disable(&rp->napi);
Richard Weinbergera9265922014-01-14 22:46:36 +01001676 netif_tx_disable(dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001677 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 /* clear all descriptors */
1680 free_tbufs(dev);
1681 free_rbufs(dev);
1682 alloc_tbufs(dev);
1683 alloc_rbufs(dev);
1684
1685 /* Reinitialize the hardware. */
1686 rhine_chip_reset(dev);
1687 init_registers(dev);
1688
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001689 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001691 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001692 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 netif_wake_queue(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001694
1695out_unlock:
1696 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697}
1698
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001699static void rhine_tx_timeout(struct net_device *dev)
1700{
1701 struct rhine_private *rp = netdev_priv(dev);
1702 void __iomem *ioaddr = rp->base;
1703
Joe Perchesdf4511f2011-04-16 14:15:25 +00001704 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1705 ioread16(ioaddr + IntrStatus),
1706 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001707
1708 schedule_work(&rp->reset_task);
1709}
1710
Stephen Hemminger613573252009-08-31 19:50:58 +00001711static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1712 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
1714 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001715 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 void __iomem *ioaddr = rp->base;
1717 unsigned entry;
1718
1719 /* Caution: the write order is important here, set the field
1720 with the "ownership" bits last. */
1721
1722 /* Calculate the next Tx descriptor entry. */
1723 entry = rp->cur_tx % TX_RING_SIZE;
1724
Herbert Xu5b057c62006-06-23 02:06:41 -07001725 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001726 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
1728 rp->tx_skbuff[entry] = skb;
1729
1730 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001731 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 /* Must use alignment buffer. */
1733 if (skb->len > PKT_BUF_SZ) {
1734 /* packet too long, drop it */
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001735 dev_kfree_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001737 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001738 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001740
1741 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001743 if (skb->len < ETH_ZLEN)
1744 memset(rp->tx_buf[entry] + skb->len, 0,
1745 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 rp->tx_skbuff_dma[entry] = 0;
1747 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1748 (rp->tx_buf[entry] -
1749 rp->tx_bufs));
1750 } else {
1751 rp->tx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001752 dma_map_single(hwdev, skb->data, skb->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001753 DMA_TO_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001754 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001755 dev_kfree_skb_any(skb);
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001756 rp->tx_skbuff_dma[entry] = 0;
1757 dev->stats.tx_dropped++;
1758 return NETDEV_TX_OK;
1759 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1761 }
1762
1763 rp->tx_ring[entry].desc_length =
1764 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1765
Roger Luethi38f49e82010-12-06 00:59:40 +00001766 if (unlikely(vlan_tx_tag_present(skb))) {
Roger Luethi207070f2013-09-21 14:24:11 +02001767 u16 vid_pcp = vlan_tx_tag_get(skb);
1768
1769 /* drop CFI/DEI bit, register needs VID and PCP */
1770 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1771 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1772 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
Roger Luethi38f49e82010-12-06 00:59:40 +00001773 /* request tagging */
1774 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1775 }
1776 else
1777 rp->tx_ring[entry].tx_status = 0;
1778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 /* lock eth irq */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001781 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 wmb();
1783
1784 rp->cur_tx++;
1785
1786 /* Non-x86 Todo: explicitly flush cache lines here. */
1787
Roger Luethi38f49e82010-12-06 00:59:40 +00001788 if (vlan_tx_tag_present(skb))
1789 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1790 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 /* Wake the potentially-idle transmit channel */
1793 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1794 ioaddr + ChipCmd1);
1795 IOSYNC;
1796
1797 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1798 netif_stop_queue(dev);
1799
Francois Romieufc3e0f82012-01-07 22:39:37 +01001800 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1801 rp->cur_tx - 1, entry);
1802
Patrick McHardy6ed10652009-06-23 06:03:08 +00001803 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804}
1805
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001806static void rhine_irq_disable(struct rhine_private *rp)
1807{
1808 iowrite16(0x0000, rp->base + IntrEnable);
1809 mmiowb();
1810}
1811
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812/* The interrupt handler does all of the Rx thread work and cleans up
1813 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001814static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815{
1816 struct net_device *dev = dev_instance;
1817 struct rhine_private *rp = netdev_priv(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001818 u32 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 int handled = 0;
1820
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001821 status = rhine_get_events(rp);
1822
Francois Romieufc3e0f82012-01-07 22:39:37 +01001823 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001824
1825 if (status & RHINE_EVENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 handled = 1;
1827
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001828 rhine_irq_disable(rp);
1829 napi_schedule(&rp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 }
1831
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001832 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001833 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1834 status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001835 }
1836
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return IRQ_RETVAL(handled);
1838}
1839
1840/* This routine is logically part of the interrupt handler, but isolated
1841 for clarity. */
1842static void rhine_tx(struct net_device *dev)
1843{
1844 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001845 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 /* find and cleanup dirty tx descriptors */
1849 while (rp->dirty_tx != rp->cur_tx) {
1850 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001851 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1852 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 if (txstatus & DescOwn)
1854 break;
1855 if (txstatus & 0x8000) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001856 netif_dbg(rp, tx_done, dev,
1857 "Transmit error, Tx status %08x\n", txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001858 dev->stats.tx_errors++;
1859 if (txstatus & 0x0400)
1860 dev->stats.tx_carrier_errors++;
1861 if (txstatus & 0x0200)
1862 dev->stats.tx_window_errors++;
1863 if (txstatus & 0x0100)
1864 dev->stats.tx_aborted_errors++;
1865 if (txstatus & 0x0080)
1866 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1868 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001869 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1871 break; /* Keep the skb - we try again */
1872 }
1873 /* Transmitter restarted in 'abnormal' handler. */
1874 } else {
1875 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001876 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001878 dev->stats.collisions += txstatus & 0x0F;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001879 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1880 (txstatus >> 3) & 0xF, txstatus & 0xF);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001881
1882 u64_stats_update_begin(&rp->tx_stats.syncp);
1883 rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1884 rp->tx_stats.packets++;
1885 u64_stats_update_end(&rp->tx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 }
1887 /* Free the original skb. */
1888 if (rp->tx_skbuff_dma[entry]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001889 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 rp->tx_skbuff_dma[entry],
1891 rp->tx_skbuff[entry]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001892 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 }
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001894 dev_consume_skb_any(rp->tx_skbuff[entry]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 rp->tx_skbuff[entry] = NULL;
1896 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1897 }
1898 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1899 netif_wake_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900}
1901
Roger Luethi38f49e82010-12-06 00:59:40 +00001902/**
1903 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1904 * @skb: pointer to sk_buff
1905 * @data_size: used data area of the buffer including CRC
1906 *
1907 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1908 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1909 * aligned following the CRC.
1910 */
1911static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1912{
1913 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00001914 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00001915}
1916
Roger Luethi633949a2006-08-14 23:00:17 -07001917/* Process up to limit frames from receive ring */
1918static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919{
1920 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001921 struct device *hwdev = dev->dev.parent;
Roger Luethi633949a2006-08-14 23:00:17 -07001922 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
Francois Romieufc3e0f82012-01-07 22:39:37 +01001925 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1926 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
1928 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001929 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 struct rx_desc *desc = rp->rx_head_desc;
1931 u32 desc_status = le32_to_cpu(desc->rx_status);
Roger Luethi38f49e82010-12-06 00:59:40 +00001932 u32 desc_length = le32_to_cpu(desc->desc_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 int data_size = desc_status >> 16;
1934
Roger Luethi633949a2006-08-14 23:00:17 -07001935 if (desc_status & DescOwn)
1936 break;
1937
Francois Romieufc3e0f82012-01-07 22:39:37 +01001938 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1939 desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1942 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001943 netdev_warn(dev,
1944 "Oversized Ethernet frame spanned multiple buffers, "
1945 "entry %#x length %d status %08x!\n",
1946 entry, data_size,
1947 desc_status);
1948 netdev_warn(dev,
1949 "Oversized Ethernet frame %p vs %p\n",
1950 rp->rx_head_desc,
1951 &rp->rx_ring[entry]);
Eric Dumazet553e2332009-05-27 10:34:50 +00001952 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 } else if (desc_status & RxErr) {
1954 /* There was a error. */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001955 netif_dbg(rp, rx_err, dev,
1956 "%s() Rx error %08x\n", __func__,
1957 desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001958 dev->stats.rx_errors++;
1959 if (desc_status & 0x0030)
1960 dev->stats.rx_length_errors++;
1961 if (desc_status & 0x0048)
1962 dev->stats.rx_fifo_errors++;
1963 if (desc_status & 0x0004)
1964 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 if (desc_status & 0x0002) {
1966 /* this can also be updated outside the interrupt handler */
1967 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00001968 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 spin_unlock(&rp->lock);
1970 }
1971 }
1972 } else {
Eric Dumazet89d71a62009-10-13 05:34:20 +00001973 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 /* Length should omit the CRC */
1975 int pkt_len = data_size - 4;
Roger Luethi38f49e82010-12-06 00:59:40 +00001976 u16 vlan_tci = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 /* Check if the packet is long enough to accept without
1979 copying to a minimally-sized skbuff. */
Eric Dumazet89d71a62009-10-13 05:34:20 +00001980 if (pkt_len < rx_copybreak)
1981 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1982 if (skb) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001983 dma_sync_single_for_cpu(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001984 rp->rx_skbuff_dma[entry],
1985 rp->rx_buf_sz,
1986 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001988 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07001989 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001990 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001992 dma_sync_single_for_device(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001993 rp->rx_skbuff_dma[entry],
1994 rp->rx_buf_sz,
1995 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 } else {
1997 skb = rp->rx_skbuff[entry];
1998 if (skb == NULL) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001999 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 break;
2001 }
2002 rp->rx_skbuff[entry] = NULL;
2003 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002004 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 rp->rx_skbuff_dma[entry],
2006 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002007 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002009
2010 if (unlikely(desc_length & DescTag))
2011 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00002014
2015 if (unlikely(desc_length & DescTag))
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002016 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Roger Luethi633949a2006-08-14 23:00:17 -07002017 netif_receive_skb(skb);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002018
2019 u64_stats_update_begin(&rp->rx_stats.syncp);
2020 rp->rx_stats.bytes += pkt_len;
2021 rp->rx_stats.packets++;
2022 u64_stats_update_end(&rp->rx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 }
2024 entry = (++rp->cur_rx) % RX_RING_SIZE;
2025 rp->rx_head_desc = &rp->rx_ring[entry];
2026 }
2027
2028 /* Refill the Rx ring buffers. */
2029 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2030 struct sk_buff *skb;
2031 entry = rp->dirty_rx % RX_RING_SIZE;
2032 if (rp->rx_skbuff[entry] == NULL) {
Kevin Lob26b5552008-08-27 11:35:09 +08002033 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 rp->rx_skbuff[entry] = skb;
2035 if (skb == NULL)
2036 break; /* Better luck next round. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 rp->rx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04002038 dma_map_single(hwdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002040 DMA_FROM_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002041 if (dma_mapping_error(hwdev,
2042 rp->rx_skbuff_dma[entry])) {
Neil Horman9b4fe5f2013-07-12 13:35:33 -04002043 dev_kfree_skb(skb);
2044 rp->rx_skbuff_dma[entry] = 0;
2045 break;
2046 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2048 }
2049 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2050 }
Roger Luethi633949a2006-08-14 23:00:17 -07002051
2052 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053}
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055static void rhine_restart_tx(struct net_device *dev) {
2056 struct rhine_private *rp = netdev_priv(dev);
2057 void __iomem *ioaddr = rp->base;
2058 int entry = rp->dirty_tx % TX_RING_SIZE;
2059 u32 intr_status;
2060
2061 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002062 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 * In that case the ISR will be back here RSN anyway.
2064 */
Francois Romieua20a28b2011-12-30 14:53:58 +01002065 intr_status = rhine_get_events(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
2067 if ((intr_status & IntrTxErrSummary) == 0) {
2068
2069 /* We know better than the chip where it should continue. */
2070 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2071 ioaddr + TxRingPtr);
2072
2073 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2074 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00002075
2076 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2077 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2078 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2079
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2081 ioaddr + ChipCmd1);
2082 IOSYNC;
2083 }
2084 else {
2085 /* This should never happen */
Francois Romieufc3e0f82012-01-07 22:39:37 +01002086 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2087 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 }
2089
2090}
2091
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002092static void rhine_slow_event_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002094 struct rhine_private *rp =
2095 container_of(work, struct rhine_private, slow_event_task);
2096 struct net_device *dev = rp->dev;
2097 u32 intr_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002099 mutex_lock(&rp->task_lock);
2100
2101 if (!rp->task_enable)
2102 goto out_unlock;
2103
2104 intr_status = rhine_get_events(rp);
2105 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
2107 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04002108 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
Francois Romieufc3e0f82012-01-07 22:39:37 +01002110 if (intr_status & IntrPCIErr)
2111 netif_warn(rp, hw, dev, "PCI error\n");
2112
David S. Miller559bcac2013-01-29 22:58:04 -05002113 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002115out_unlock:
2116 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117}
2118
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002119static struct rtnl_link_stats64 *
2120rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121{
2122 struct rhine_private *rp = netdev_priv(dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002123 unsigned int start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002125 spin_lock_bh(&rp->lock);
2126 rhine_update_rx_crc_and_missed_errord(rp);
2127 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002129 netdev_stats_to_stats64(stats, &dev->stats);
2130
2131 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002132 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002133 stats->rx_packets = rp->rx_stats.packets;
2134 stats->rx_bytes = rp->rx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002135 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002136
2137 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002138 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002139 stats->tx_packets = rp->tx_stats.packets;
2140 stats->tx_bytes = rp->tx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002141 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002142
2143 return stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144}
2145
2146static void rhine_set_rx_mode(struct net_device *dev)
2147{
2148 struct rhine_private *rp = netdev_priv(dev);
2149 void __iomem *ioaddr = rp->base;
2150 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00002151 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2152 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
2154 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 rx_mode = 0x1C;
2156 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2157 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002158 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00002159 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 /* Too many to match, or accept all multicasts. */
2161 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2162 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002163 } else if (rp->revision >= VT6105M) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002164 int i = 0;
2165 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2166 netdev_for_each_mc_addr(ha, dev) {
2167 if (i == MCAM_SIZE)
2168 break;
2169 rhine_set_cam(ioaddr, i, ha->addr);
2170 mCAMmask |= 1 << i;
2171 i++;
2172 }
2173 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00002176 netdev_for_each_mc_addr(ha, dev) {
2177 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2180 }
2181 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2182 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002184 /* enable/disable VLAN receive filtering */
Alexey Charkovf7630d12014-04-22 19:28:08 +04002185 if (rp->revision >= VT6105M) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002186 if (dev->flags & IFF_PROMISC)
2187 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2188 else
2189 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2190 }
2191 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192}
2193
2194static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2195{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002196 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
Rick Jones23020ab2011-11-09 09:58:07 +00002198 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2199 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Alexey Charkovf7630d12014-04-22 19:28:08 +04002200 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201}
2202
2203static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2204{
2205 struct rhine_private *rp = netdev_priv(dev);
2206 int rc;
2207
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002208 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 rc = mii_ethtool_gset(&rp->mii_if, cmd);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002210 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
2212 return rc;
2213}
2214
2215static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2216{
2217 struct rhine_private *rp = netdev_priv(dev);
2218 int rc;
2219
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002220 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 rc = mii_ethtool_sset(&rp->mii_if, cmd);
Roger Luethi00b428c2006-03-28 20:53:56 +02002222 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002223 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
2225 return rc;
2226}
2227
2228static int netdev_nway_reset(struct net_device *dev)
2229{
2230 struct rhine_private *rp = netdev_priv(dev);
2231
2232 return mii_nway_restart(&rp->mii_if);
2233}
2234
2235static u32 netdev_get_link(struct net_device *dev)
2236{
2237 struct rhine_private *rp = netdev_priv(dev);
2238
2239 return mii_link_ok(&rp->mii_if);
2240}
2241
2242static u32 netdev_get_msglevel(struct net_device *dev)
2243{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002244 struct rhine_private *rp = netdev_priv(dev);
2245
2246 return rp->msg_enable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247}
2248
2249static void netdev_set_msglevel(struct net_device *dev, u32 value)
2250{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002251 struct rhine_private *rp = netdev_priv(dev);
2252
2253 rp->msg_enable = value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254}
2255
2256static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2257{
2258 struct rhine_private *rp = netdev_priv(dev);
2259
2260 if (!(rp->quirks & rqWOL))
2261 return;
2262
2263 spin_lock_irq(&rp->lock);
2264 wol->supported = WAKE_PHY | WAKE_MAGIC |
2265 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2266 wol->wolopts = rp->wolopts;
2267 spin_unlock_irq(&rp->lock);
2268}
2269
2270static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2271{
2272 struct rhine_private *rp = netdev_priv(dev);
2273 u32 support = WAKE_PHY | WAKE_MAGIC |
2274 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2275
2276 if (!(rp->quirks & rqWOL))
2277 return -EINVAL;
2278
2279 if (wol->wolopts & ~support)
2280 return -EINVAL;
2281
2282 spin_lock_irq(&rp->lock);
2283 rp->wolopts = wol->wolopts;
2284 spin_unlock_irq(&rp->lock);
2285
2286 return 0;
2287}
2288
Jeff Garzik7282d492006-09-13 14:30:00 -04002289static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 .get_drvinfo = netdev_get_drvinfo,
2291 .get_settings = netdev_get_settings,
2292 .set_settings = netdev_set_settings,
2293 .nway_reset = netdev_nway_reset,
2294 .get_link = netdev_get_link,
2295 .get_msglevel = netdev_get_msglevel,
2296 .set_msglevel = netdev_set_msglevel,
2297 .get_wol = rhine_get_wol,
2298 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299};
2300
2301static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2302{
2303 struct rhine_private *rp = netdev_priv(dev);
2304 int rc;
2305
2306 if (!netif_running(dev))
2307 return -EINVAL;
2308
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002309 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
Roger Luethi00b428c2006-03-28 20:53:56 +02002311 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002312 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
2314 return rc;
2315}
2316
2317static int rhine_close(struct net_device *dev)
2318{
2319 struct rhine_private *rp = netdev_priv(dev);
2320 void __iomem *ioaddr = rp->base;
2321
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002322 rhine_task_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002323 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002324 netif_stop_queue(dev);
2325
Francois Romieufc3e0f82012-01-07 22:39:37 +01002326 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2327 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
2329 /* Switch to loopback mode to avoid hardware races. */
2330 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2331
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002332 rhine_irq_disable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
2334 /* Stop the chip's Tx and Rx processes. */
2335 iowrite16(CmdStop, ioaddr + ChipCmd);
2336
Alexey Charkovf7630d12014-04-22 19:28:08 +04002337 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 free_rbufs(dev);
2339 free_tbufs(dev);
2340 free_ring(dev);
2341
2342 return 0;
2343}
2344
2345
Alexey Charkov2d283862014-04-22 19:28:09 +04002346static void rhine_remove_one_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347{
2348 struct net_device *dev = pci_get_drvdata(pdev);
2349 struct rhine_private *rp = netdev_priv(dev);
2350
2351 unregister_netdev(dev);
2352
2353 pci_iounmap(pdev, rp->base);
2354 pci_release_regions(pdev);
2355
2356 free_netdev(dev);
2357 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358}
2359
Alexey Charkov2d283862014-04-22 19:28:09 +04002360static int rhine_remove_one_platform(struct platform_device *pdev)
2361{
2362 struct net_device *dev = platform_get_drvdata(pdev);
2363 struct rhine_private *rp = netdev_priv(dev);
2364
2365 unregister_netdev(dev);
2366
2367 iounmap(rp->base);
2368
2369 free_netdev(dev);
2370
2371 return 0;
2372}
2373
2374static void rhine_shutdown_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 struct net_device *dev = pci_get_drvdata(pdev);
2377 struct rhine_private *rp = netdev_priv(dev);
2378 void __iomem *ioaddr = rp->base;
2379
2380 if (!(rp->quirks & rqWOL))
2381 return; /* Nothing to do for non-WOL adapters */
2382
2383 rhine_power_init(dev);
2384
2385 /* Make sure we use pattern 0, 1 and not 4, 5 */
2386 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002387 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002389 spin_lock(&rp->lock);
2390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 if (rp->wolopts & WAKE_MAGIC) {
2392 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2393 /*
2394 * Turn EEPROM-controlled wake-up back on -- some hardware may
2395 * not cooperate otherwise.
2396 */
2397 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2398 }
2399
2400 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2401 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2402
2403 if (rp->wolopts & WAKE_PHY)
2404 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2405
2406 if (rp->wolopts & WAKE_UCAST)
2407 iowrite8(WOLucast, ioaddr + WOLcrSet);
2408
2409 if (rp->wolopts) {
2410 /* Enable legacy WOL (for old motherboards) */
2411 iowrite8(0x01, ioaddr + PwcfgSet);
2412 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2413 }
2414
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002415 spin_unlock(&rp->lock);
2416
Francois Romieue92b9b32012-01-07 22:58:27 +01002417 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
Roger Luethib933b4d2006-08-14 23:00:21 -07002418 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
Francois Romieue92b9b32012-01-07 22:58:27 +01002420 pci_wake_from_d3(pdev, true);
2421 pci_set_power_state(pdev, PCI_D3hot);
2422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423}
2424
Francois Romieue92b9b32012-01-07 22:58:27 +01002425#ifdef CONFIG_PM_SLEEP
2426static int rhine_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002428 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
2431 if (!netif_running(dev))
2432 return 0;
2433
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002434 rhine_task_disable(rp);
2435 rhine_irq_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002436 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002437
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 netif_device_detach(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
Alexey Charkovf7630d12014-04-22 19:28:08 +04002440 if (dev_is_pci(device))
Alexey Charkov2d283862014-04-22 19:28:09 +04002441 rhine_shutdown_pci(to_pci_dev(device));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return 0;
2444}
2445
Francois Romieue92b9b32012-01-07 22:58:27 +01002446static int rhine_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002448 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
2451 if (!netif_running(dev))
2452 return 0;
2453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454#ifdef USE_MMIO
2455 enable_mmio(rp->pioaddr, rp->quirks);
2456#endif
2457 rhine_power_init(dev);
2458 free_tbufs(dev);
2459 free_rbufs(dev);
2460 alloc_tbufs(dev);
2461 alloc_rbufs(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002462 rhine_task_enable(rp);
2463 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 init_registers(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002465 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
2467 netif_device_attach(dev);
2468
2469 return 0;
2470}
Francois Romieue92b9b32012-01-07 22:58:27 +01002471
2472static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2473#define RHINE_PM_OPS (&rhine_pm_ops)
2474
2475#else
2476
2477#define RHINE_PM_OPS NULL
2478
2479#endif /* !CONFIG_PM_SLEEP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
Alexey Charkov2d283862014-04-22 19:28:09 +04002481static struct pci_driver rhine_driver_pci = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 .name = DRV_NAME,
2483 .id_table = rhine_pci_tbl,
Alexey Charkov2d283862014-04-22 19:28:09 +04002484 .probe = rhine_init_one_pci,
2485 .remove = rhine_remove_one_pci,
2486 .shutdown = rhine_shutdown_pci,
Francois Romieue92b9b32012-01-07 22:58:27 +01002487 .driver.pm = RHINE_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488};
2489
Alexey Charkov2d283862014-04-22 19:28:09 +04002490static struct platform_driver rhine_driver_platform = {
2491 .probe = rhine_init_one_platform,
2492 .remove = rhine_remove_one_platform,
2493 .driver = {
2494 .name = DRV_NAME,
2495 .owner = THIS_MODULE,
2496 .of_match_table = rhine_of_tbl,
2497 .pm = RHINE_PM_OPS,
2498 }
2499};
2500
Sachin Kamat77273ea2013-08-07 16:08:16 +05302501static struct dmi_system_id rhine_dmi_table[] __initdata = {
Roger Luethie84df482007-03-06 19:57:37 +01002502 {
2503 .ident = "EPIA-M",
2504 .matches = {
2505 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2506 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2507 },
2508 },
2509 {
2510 .ident = "KV7",
2511 .matches = {
2512 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2513 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2514 },
2515 },
2516 { NULL }
2517};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
2519static int __init rhine_init(void)
2520{
Alexey Charkov2d283862014-04-22 19:28:09 +04002521 int ret_pci, ret_platform;
2522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523/* when a module, this is printed whether or not devices are found in probe */
2524#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002525 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526#endif
Roger Luethie84df482007-03-06 19:57:37 +01002527 if (dmi_check_system(rhine_dmi_table)) {
2528 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002529 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002530 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002531 }
2532 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002533 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002534
Alexey Charkov2d283862014-04-22 19:28:09 +04002535 ret_pci = pci_register_driver(&rhine_driver_pci);
2536 ret_platform = platform_driver_register(&rhine_driver_platform);
2537 if ((ret_pci < 0) && (ret_platform < 0))
2538 return ret_pci;
2539
2540 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541}
2542
2543
2544static void __exit rhine_cleanup(void)
2545{
Alexey Charkov2d283862014-04-22 19:28:09 +04002546 platform_driver_unregister(&rhine_driver_platform);
2547 pci_unregister_driver(&rhine_driver_pci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548}
2549
2550
2551module_init(rhine_init);
2552module_exit(rhine_cleanup);