blob: 4e21f5510b90347dfdb71345e42f9b8e4ea7dec3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2/*
3 Written/copyright 1999-2001 by Donald Becker.
4 Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5 Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
Mark Brownb27a16b2006-02-02 00:00:01 +00006 Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL. License for under other terms may be
14 available. Contact the original author for details.
15
16 The original author may be reached as becker@scyld.com, or at
17 Scyld Computing Corporation
18 410 Severn Ave., Suite 210
19 Annapolis MD 21403
20
21 Support information and updates available at
22 http://www.scyld.com/network/netsemi.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040023 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 TODO:
27 * big endian support with CFG:BEM instead of cpu_to_le32
Linus Torvalds1da177e2005-04-16 15:20:36 -070028*/
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/string.h>
33#include <linux/timer.h>
34#include <linux/errno.h>
35#include <linux/ioport.h>
36#include <linux/slab.h>
37#include <linux/interrupt.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/skbuff.h>
42#include <linux/init.h>
43#include <linux/spinlock.h>
44#include <linux/ethtool.h>
45#include <linux/delay.h>
46#include <linux/rtnetlink.h>
47#include <linux/mii.h>
48#include <linux/crc32.h>
49#include <linux/bitops.h>
Mark Brownb27a16b2006-02-02 00:00:01 +000050#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/processor.h> /* Processor type for cache alignment. */
52#include <asm/io.h>
53#include <asm/irq.h>
54#include <asm/uaccess.h>
55
56#define DRV_NAME "natsemi"
Andy Gospodarekd5b20692006-09-11 17:39:18 -040057#define DRV_VERSION "2.1"
58#define DRV_RELDATE "Sept 11, 2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#define RX_OFFSET 2
61
62/* Updated to recommendations in pci-skeleton v2.03. */
63
64/* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
66
67#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
68 NETIF_MSG_LINK | \
69 NETIF_MSG_WOL | \
70 NETIF_MSG_RX_ERR | \
71 NETIF_MSG_TX_ERR)
72static int debug = -1;
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074static int mtu;
75
76/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
77 This chip uses a 512 element hash table based on the Ethernet CRC. */
Arjan van de Venf71e1302006-03-03 21:33:57 -050078static const int multicast_filter_limit = 100;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
81 Setting to > 1518 effectively disables this feature. */
82static int rx_copybreak;
83
84/* Used to pass the media type, etc.
85 Both 'options[]' and 'full_duplex[]' should exist for driver
86 interoperability.
87 The media type is usually passed in 'options[]'.
88*/
89#define MAX_UNITS 8 /* More are supported, limit only on options */
90static int options[MAX_UNITS];
91static int full_duplex[MAX_UNITS];
92
93/* Operational parameters that are set at compile time. */
94
95/* Keep the ring sizes a power of two for compile efficiency.
96 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
97 Making the Tx ring too large decreases the effectiveness of channel
98 bonding and packet priority.
99 There are no ill effects from too-large receive rings. */
100#define TX_RING_SIZE 16
101#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
102#define RX_RING_SIZE 32
103
104/* Operational parameters that usually are not changed. */
105/* Time in jiffies before concluding the transmitter is hung. */
106#define TX_TIMEOUT (2*HZ)
107
108#define NATSEMI_HW_TIMEOUT 400
109#define NATSEMI_TIMER_FREQ 3*HZ
110#define NATSEMI_PG0_NREGS 64
111#define NATSEMI_RFDR_NREGS 8
112#define NATSEMI_PG1_NREGS 4
113#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
114 NATSEMI_PG1_NREGS)
115#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
116#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118/* Buffer sizes:
119 * The nic writes 32-bit values, even if the upper bytes of
120 * a 32-bit value are beyond the end of the buffer.
121 */
122#define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */
123#define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */
124#define NATSEMI_LONGPKT 1518 /* limit for normal packets */
125#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
126
127/* These identify the driver base version and may not be removed. */
Randy Dunlape19360f2006-04-10 23:22:06 -0700128static const char version[] __devinitdata =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 KERN_INFO DRV_NAME " dp8381x driver, version "
130 DRV_VERSION ", " DRV_RELDATE "\n"
131 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
132 KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
133 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
134
135MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
136MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
137MODULE_LICENSE("GPL");
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139module_param(mtu, int, 0);
140module_param(debug, int, 0);
141module_param(rx_copybreak, int, 0);
142module_param_array(options, int, NULL, 0);
143module_param_array(full_duplex, int, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
145MODULE_PARM_DESC(debug, "DP8381x default debug level");
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400146MODULE_PARM_DESC(rx_copybreak,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 "DP8381x copy breakpoint for copy-only-tiny-frames");
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400148MODULE_PARM_DESC(options,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
150MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
151
152/*
153 Theory of Operation
154
155I. Board Compatibility
156
157This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
158It also works with other chips in in the DP83810 series.
159
160II. Board-specific settings
161
162This driver requires the PCI interrupt line to be valid.
163It honors the EEPROM-set values.
164
165III. Driver operation
166
167IIIa. Ring buffers
168
169This driver uses two statically allocated fixed-size descriptor lists
170formed into rings by a branch from the final descriptor to the beginning of
171the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
172The NatSemi design uses a 'next descriptor' pointer that the driver forms
173into a list.
174
175IIIb/c. Transmit/Receive Structure
176
177This driver uses a zero-copy receive and transmit scheme.
178The driver allocates full frame size skbuffs for the Rx ring buffers at
179open() time and passes the skb->data field to the chip as receive data
180buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
181a fresh skbuff is allocated and the frame is copied to the new skbuff.
182When the incoming frame is larger, the skbuff is passed directly up the
183protocol stack. Buffers consumed this way are replaced by newly allocated
184skbuffs in a later phase of receives.
185
186The RX_COPYBREAK value is chosen to trade-off the memory wasted by
187using a full-sized skbuff for small frames vs. the copying costs of larger
188frames. New boards are typically used in generously configured machines
189and the underfilled buffers have negligible impact compared to the benefit of
190a single allocation size, so the default value of zero results in never
191copying packets. When copying is done, the cost is usually mitigated by using
192a combined copy/checksum routine. Copying also preloads the cache, which is
193most useful with small frames.
194
195A subtle aspect of the operation is that unaligned buffers are not permitted
196by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
197longword aligned for further processing. On copies frames are put into the
198skbuff at an offset of "+2", 16-byte aligning the IP header.
199
200IIId. Synchronization
201
202Most operations are synchronized on the np->lock irq spinlock, except the
203performance critical codepaths:
204
205The rx process only runs in the interrupt handler. Access from outside
206the interrupt handler is only permitted after disable_irq().
207
Herbert Xu932ff272006-06-09 12:20:56 -0700208The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209is set, then access is permitted under spin_lock_irq(&np->lock).
210
211Thus configuration functions that want to access everything must call
212 disable_irq(dev->irq);
Herbert Xu932ff272006-06-09 12:20:56 -0700213 netif_tx_lock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 spin_lock_irq(&np->lock);
215
216IV. Notes
217
218NatSemi PCI network controllers are very uncommon.
219
220IVb. References
221
222http://www.scyld.com/expert/100mbps.html
223http://www.scyld.com/expert/NWay.html
224Datasheet is available from:
225http://www.national.com/pf/DP/DP83815.html
226
227IVc. Errata
228
229None characterised.
230*/
231
232
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234/*
235 * Support for fibre connections on Am79C874:
236 * This phy needs a special setup when connected to a fibre cable.
237 * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
238 */
239#define PHYID_AM79C874 0x0022561b
240
Jeff Garzika2b524b2006-06-26 22:48:38 -0400241enum {
242 MII_MCTRL = 0x15, /* mode control register */
243 MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */
244 MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */
245};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248/* array of board data directly indexed by pci_tbl[x].driver_data */
Arjan van de Venf71e1302006-03-03 21:33:57 -0500249static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 const char *name;
251 unsigned long flags;
Jeff Garzika2b524b2006-06-26 22:48:38 -0400252 unsigned int eeprom_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253} natsemi_pci_info[] __devinitdata = {
Jeff Garzika2b524b2006-06-26 22:48:38 -0400254 { "NatSemi DP8381[56]", 0, 24 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255};
256
Jeff Garzika2b524b2006-06-26 22:48:38 -0400257static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
258 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
259 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260};
261MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
262
263/* Offsets to the device registers.
264 Unlike software-only systems, device drivers interact with complex hardware.
265 It's not useful to define symbolic names for every register bit in the
266 device.
267*/
268enum register_offsets {
269 ChipCmd = 0x00,
270 ChipConfig = 0x04,
271 EECtrl = 0x08,
272 PCIBusCfg = 0x0C,
273 IntrStatus = 0x10,
274 IntrMask = 0x14,
275 IntrEnable = 0x18,
276 IntrHoldoff = 0x1C, /* DP83816 only */
277 TxRingPtr = 0x20,
278 TxConfig = 0x24,
279 RxRingPtr = 0x30,
280 RxConfig = 0x34,
281 ClkRun = 0x3C,
282 WOLCmd = 0x40,
283 PauseCmd = 0x44,
284 RxFilterAddr = 0x48,
285 RxFilterData = 0x4C,
286 BootRomAddr = 0x50,
287 BootRomData = 0x54,
288 SiliconRev = 0x58,
289 StatsCtrl = 0x5C,
290 StatsData = 0x60,
291 RxPktErrs = 0x60,
292 RxMissed = 0x68,
293 RxCRCErrs = 0x64,
294 BasicControl = 0x80,
295 BasicStatus = 0x84,
296 AnegAdv = 0x90,
297 AnegPeer = 0x94,
298 PhyStatus = 0xC0,
299 MIntrCtrl = 0xC4,
300 MIntrStatus = 0xC8,
301 PhyCtrl = 0xE4,
302
303 /* These are from the spec, around page 78... on a separate table.
304 * The meaning of these registers depend on the value of PGSEL. */
305 PGSEL = 0xCC,
306 PMDCSR = 0xE4,
307 TSTDAT = 0xFC,
308 DSPCFG = 0xF4,
309 SDCFG = 0xF8
310};
311/* the values for the 'magic' registers above (PGSEL=1) */
312#define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
313#define TSTDAT_VAL 0x0
314#define DSPCFG_VAL 0x5040
315#define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
316#define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
317#define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */
318#define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
319
320/* misc PCI space registers */
321enum pci_register_offsets {
322 PCIPM = 0x44,
323};
324
325enum ChipCmd_bits {
326 ChipReset = 0x100,
327 RxReset = 0x20,
328 TxReset = 0x10,
329 RxOff = 0x08,
330 RxOn = 0x04,
331 TxOff = 0x02,
332 TxOn = 0x01,
333};
334
335enum ChipConfig_bits {
336 CfgPhyDis = 0x200,
337 CfgPhyRst = 0x400,
338 CfgExtPhy = 0x1000,
339 CfgAnegEnable = 0x2000,
340 CfgAneg100 = 0x4000,
341 CfgAnegFull = 0x8000,
342 CfgAnegDone = 0x8000000,
343 CfgFullDuplex = 0x20000000,
344 CfgSpeed100 = 0x40000000,
345 CfgLink = 0x80000000,
346};
347
348enum EECtrl_bits {
349 EE_ShiftClk = 0x04,
350 EE_DataIn = 0x01,
351 EE_ChipSelect = 0x08,
352 EE_DataOut = 0x02,
353 MII_Data = 0x10,
354 MII_Write = 0x20,
355 MII_ShiftClk = 0x40,
356};
357
358enum PCIBusCfg_bits {
359 EepromReload = 0x4,
360};
361
362/* Bits in the interrupt status/mask registers. */
363enum IntrStatus_bits {
364 IntrRxDone = 0x0001,
365 IntrRxIntr = 0x0002,
366 IntrRxErr = 0x0004,
367 IntrRxEarly = 0x0008,
368 IntrRxIdle = 0x0010,
369 IntrRxOverrun = 0x0020,
370 IntrTxDone = 0x0040,
371 IntrTxIntr = 0x0080,
372 IntrTxErr = 0x0100,
373 IntrTxIdle = 0x0200,
374 IntrTxUnderrun = 0x0400,
375 StatsMax = 0x0800,
376 SWInt = 0x1000,
377 WOLPkt = 0x2000,
378 LinkChange = 0x4000,
379 IntrHighBits = 0x8000,
380 RxStatusFIFOOver = 0x10000,
381 IntrPCIErr = 0xf00000,
382 RxResetDone = 0x1000000,
383 TxResetDone = 0x2000000,
384 IntrAbnormalSummary = 0xCD20,
385};
386
387/*
388 * Default Interrupts:
389 * Rx OK, Rx Packet Error, Rx Overrun,
390 * Tx OK, Tx Packet Error, Tx Underrun,
391 * MIB Service, Phy Interrupt, High Bits,
392 * Rx Status FIFO overrun,
393 * Received Target Abort, Received Master Abort,
394 * Signalled System Error, Received Parity Error
395 */
396#define DEFAULT_INTR 0x00f1cd65
397
398enum TxConfig_bits {
399 TxDrthMask = 0x3f,
400 TxFlthMask = 0x3f00,
401 TxMxdmaMask = 0x700000,
402 TxMxdma_512 = 0x0,
403 TxMxdma_4 = 0x100000,
404 TxMxdma_8 = 0x200000,
405 TxMxdma_16 = 0x300000,
406 TxMxdma_32 = 0x400000,
407 TxMxdma_64 = 0x500000,
408 TxMxdma_128 = 0x600000,
409 TxMxdma_256 = 0x700000,
410 TxCollRetry = 0x800000,
411 TxAutoPad = 0x10000000,
412 TxMacLoop = 0x20000000,
413 TxHeartIgn = 0x40000000,
414 TxCarrierIgn = 0x80000000
415};
416
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400417/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 * Tx Configuration:
419 * - 256 byte DMA burst length
420 * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
421 * - 64 bytes initial drain threshold (i.e. begin actual transmission
422 * when 64 byte are in the fifo)
423 * - on tx underruns, increase drain threshold by 64.
424 * - at most use a drain threshold of 1472 bytes: The sum of the fill
425 * threshold and the drain threshold must be less than 2016 bytes.
426 *
427 */
428#define TX_FLTH_VAL ((512/32) << 8)
429#define TX_DRTH_VAL_START (64/32)
430#define TX_DRTH_VAL_INC 2
431#define TX_DRTH_VAL_LIMIT (1472/32)
432
433enum RxConfig_bits {
434 RxDrthMask = 0x3e,
435 RxMxdmaMask = 0x700000,
436 RxMxdma_512 = 0x0,
437 RxMxdma_4 = 0x100000,
438 RxMxdma_8 = 0x200000,
439 RxMxdma_16 = 0x300000,
440 RxMxdma_32 = 0x400000,
441 RxMxdma_64 = 0x500000,
442 RxMxdma_128 = 0x600000,
443 RxMxdma_256 = 0x700000,
444 RxAcceptLong = 0x8000000,
445 RxAcceptTx = 0x10000000,
446 RxAcceptRunt = 0x40000000,
447 RxAcceptErr = 0x80000000
448};
449#define RX_DRTH_VAL (128/8)
450
451enum ClkRun_bits {
452 PMEEnable = 0x100,
453 PMEStatus = 0x8000,
454};
455
456enum WolCmd_bits {
457 WakePhy = 0x1,
458 WakeUnicast = 0x2,
459 WakeMulticast = 0x4,
460 WakeBroadcast = 0x8,
461 WakeArp = 0x10,
462 WakePMatch0 = 0x20,
463 WakePMatch1 = 0x40,
464 WakePMatch2 = 0x80,
465 WakePMatch3 = 0x100,
466 WakeMagic = 0x200,
467 WakeMagicSecure = 0x400,
468 SecureHack = 0x100000,
469 WokePhy = 0x400000,
470 WokeUnicast = 0x800000,
471 WokeMulticast = 0x1000000,
472 WokeBroadcast = 0x2000000,
473 WokeArp = 0x4000000,
474 WokePMatch0 = 0x8000000,
475 WokePMatch1 = 0x10000000,
476 WokePMatch2 = 0x20000000,
477 WokePMatch3 = 0x40000000,
478 WokeMagic = 0x80000000,
479 WakeOptsSummary = 0x7ff
480};
481
482enum RxFilterAddr_bits {
483 RFCRAddressMask = 0x3ff,
484 AcceptMulticast = 0x00200000,
485 AcceptMyPhys = 0x08000000,
486 AcceptAllPhys = 0x10000000,
487 AcceptAllMulticast = 0x20000000,
488 AcceptBroadcast = 0x40000000,
489 RxFilterEnable = 0x80000000
490};
491
492enum StatsCtrl_bits {
493 StatsWarn = 0x1,
494 StatsFreeze = 0x2,
495 StatsClear = 0x4,
496 StatsStrobe = 0x8,
497};
498
499enum MIntrCtrl_bits {
500 MICRIntEn = 0x2,
501};
502
503enum PhyCtrl_bits {
504 PhyAddrMask = 0x1f,
505};
506
507#define PHY_ADDR_NONE 32
508#define PHY_ADDR_INTERNAL 1
509
510/* values we might find in the silicon revision register */
511#define SRR_DP83815_C 0x0302
512#define SRR_DP83815_D 0x0403
513#define SRR_DP83816_A4 0x0504
514#define SRR_DP83816_A5 0x0505
515
516/* The Rx and Tx buffer descriptors. */
517/* Note that using only 32 bit fields simplifies conversion to big-endian
518 architectures. */
519struct netdev_desc {
520 u32 next_desc;
521 s32 cmd_status;
522 u32 addr;
523 u32 software_use;
524};
525
526/* Bits in network_desc.status */
527enum desc_status_bits {
528 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
529 DescNoCRC=0x10000000, DescPktOK=0x08000000,
530 DescSizeMask=0xfff,
531
532 DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
533 DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
534 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
535 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
536
537 DescRxAbort=0x04000000, DescRxOver=0x02000000,
538 DescRxDest=0x01800000, DescRxLong=0x00400000,
539 DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
540 DescRxCRC=0x00080000, DescRxAlign=0x00040000,
541 DescRxLoop=0x00020000, DesRxColl=0x00010000,
542};
543
544struct netdev_private {
545 /* Descriptor rings first for alignment */
546 dma_addr_t ring_dma;
547 struct netdev_desc *rx_ring;
548 struct netdev_desc *tx_ring;
549 /* The addresses of receive-in-place skbuffs */
550 struct sk_buff *rx_skbuff[RX_RING_SIZE];
551 dma_addr_t rx_dma[RX_RING_SIZE];
552 /* address of a sent-in-place packet/buffer, for later free() */
553 struct sk_buff *tx_skbuff[TX_RING_SIZE];
554 dma_addr_t tx_dma[TX_RING_SIZE];
555 struct net_device_stats stats;
556 /* Media monitoring timer */
557 struct timer_list timer;
558 /* Frequently used values: keep some adjacent for cache effect */
559 struct pci_dev *pci_dev;
560 struct netdev_desc *rx_head_desc;
561 /* Producer/consumer ring indices */
562 unsigned int cur_rx, dirty_rx;
563 unsigned int cur_tx, dirty_tx;
564 /* Based on MTU+slack. */
565 unsigned int rx_buf_sz;
566 int oom;
Mark Brownb27a16b2006-02-02 00:00:01 +0000567 /* Interrupt status */
568 u32 intr_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 /* Do not touch the nic registers */
570 int hands_off;
Mark Brown68c90162007-02-19 20:15:39 +0000571 /* Don't pay attention to the reported link state. */
572 int ignore_phy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 /* external phy that is used: only valid if dev->if_port != PORT_TP */
574 int mii;
575 int phy_addr_external;
576 unsigned int full_duplex;
577 /* Rx filter */
578 u32 cur_rx_mode;
579 u32 rx_filter[16];
580 /* FIFO and PCI burst thresholds */
581 u32 tx_config, rx_config;
582 /* original contents of ClkRun register */
583 u32 SavedClkRun;
584 /* silicon revision */
585 u32 srr;
586 /* expected DSPCFG value */
587 u16 dspcfg;
588 /* parms saved in ethtool format */
589 u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
590 u8 duplex; /* Duplex, half or full */
591 u8 autoneg; /* Autonegotiation enabled */
592 /* MII transceiver section */
593 u16 advertising;
594 unsigned int iosize;
595 spinlock_t lock;
596 u32 msg_enable;
Mark Browna8b4cf42006-03-28 14:08:55 -0800597 /* EEPROM data */
598 int eeprom_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599};
600
601static void move_int_phy(struct net_device *dev, int addr);
602static int eeprom_read(void __iomem *ioaddr, int location);
603static int mdio_read(struct net_device *dev, int reg);
604static void mdio_write(struct net_device *dev, int reg, u16 data);
605static void init_phy_fixup(struct net_device *dev);
606static int miiport_read(struct net_device *dev, int phy_id, int reg);
607static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
608static int find_mii(struct net_device *dev);
609static void natsemi_reset(struct net_device *dev);
610static void natsemi_reload_eeprom(struct net_device *dev);
611static void natsemi_stop_rxtx(struct net_device *dev);
612static int netdev_open(struct net_device *dev);
613static void do_cable_magic(struct net_device *dev);
614static void undo_cable_magic(struct net_device *dev);
615static void check_link(struct net_device *dev);
616static void netdev_timer(unsigned long data);
617static void dump_ring(struct net_device *dev);
618static void tx_timeout(struct net_device *dev);
619static int alloc_ring(struct net_device *dev);
620static void refill_rx(struct net_device *dev);
621static void init_ring(struct net_device *dev);
622static void drain_tx(struct net_device *dev);
623static void drain_ring(struct net_device *dev);
624static void free_ring(struct net_device *dev);
625static void reinit_ring(struct net_device *dev);
626static void init_registers(struct net_device *dev);
627static int start_tx(struct sk_buff *skb, struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100628static irqreturn_t intr_handler(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629static void netdev_error(struct net_device *dev, int intr_status);
Mark Brownb27a16b2006-02-02 00:00:01 +0000630static int natsemi_poll(struct net_device *dev, int *budget);
631static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632static void netdev_tx_done(struct net_device *dev);
633static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
634#ifdef CONFIG_NET_POLL_CONTROLLER
635static void natsemi_poll_controller(struct net_device *dev);
636#endif
637static void __set_rx_mode(struct net_device *dev);
638static void set_rx_mode(struct net_device *dev);
639static void __get_stats(struct net_device *dev);
640static struct net_device_stats *get_stats(struct net_device *dev);
641static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
642static int netdev_set_wol(struct net_device *dev, u32 newval);
643static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
644static int netdev_set_sopass(struct net_device *dev, u8 *newval);
645static int netdev_get_sopass(struct net_device *dev, u8 *data);
646static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
647static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
648static void enable_wol_mode(struct net_device *dev, int enable_intr);
649static int netdev_close(struct net_device *dev);
650static int netdev_get_regs(struct net_device *dev, u8 *buf);
651static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
Jeff Garzik7282d492006-09-13 14:30:00 -0400652static const struct ethtool_ops ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654static inline void __iomem *ns_ioaddr(struct net_device *dev)
655{
656 return (void __iomem *) dev->base_addr;
657}
658
Mark Brownb27a16b2006-02-02 00:00:01 +0000659static inline void natsemi_irq_enable(struct net_device *dev)
660{
661 writel(1, ns_ioaddr(dev) + IntrEnable);
662 readl(ns_ioaddr(dev) + IntrEnable);
663}
664
665static inline void natsemi_irq_disable(struct net_device *dev)
666{
667 writel(0, ns_ioaddr(dev) + IntrEnable);
668 readl(ns_ioaddr(dev) + IntrEnable);
669}
670
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671static void move_int_phy(struct net_device *dev, int addr)
672{
673 struct netdev_private *np = netdev_priv(dev);
674 void __iomem *ioaddr = ns_ioaddr(dev);
675 int target = 31;
676
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400677 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 * The internal phy is visible on the external mii bus. Therefore we must
679 * move it away before we can send commands to an external phy.
680 * There are two addresses we must avoid:
681 * - the address on the external phy that is used for transmission.
682 * - the address that we want to access. User space can access phys
683 * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the
684 * phy that is used for transmission.
685 */
686
687 if (target == addr)
688 target--;
689 if (target == np->phy_addr_external)
690 target--;
691 writew(target, ioaddr + PhyCtrl);
692 readw(ioaddr + PhyCtrl);
693 udelay(1);
694}
695
Jeff Garzik5a40f092006-06-26 22:24:03 -0400696static void __devinit natsemi_init_media (struct net_device *dev)
697{
698 struct netdev_private *np = netdev_priv(dev);
699 u32 tmp;
700
Mark Brown68c90162007-02-19 20:15:39 +0000701 if (np->ignore_phy)
702 netif_carrier_on(dev);
703 else
704 netif_carrier_off(dev);
Jeff Garzik5a40f092006-06-26 22:24:03 -0400705
706 /* get the initial settings from hardware */
707 tmp = mdio_read(dev, MII_BMCR);
708 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
709 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
710 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
711 np->advertising= mdio_read(dev, MII_ADVERTISE);
712
713 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
714 && netif_msg_probe(np)) {
715 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
716 "10%s %s duplex.\n",
717 pci_name(np->pci_dev),
718 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
719 "enabled, advertise" : "disabled, force",
720 (np->advertising &
721 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
722 "0" : "",
723 (np->advertising &
724 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
725 "full" : "half");
726 }
727 if (netif_msg_probe(np))
728 printk(KERN_INFO
729 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
730 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
731 np->advertising);
732
733}
734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735static int __devinit natsemi_probe1 (struct pci_dev *pdev,
736 const struct pci_device_id *ent)
737{
738 struct net_device *dev;
739 struct netdev_private *np;
740 int i, option, irq, chip_idx = ent->driver_data;
741 static int find_cnt = -1;
742 unsigned long iostart, iosize;
743 void __iomem *ioaddr;
744 const int pcibar = 1; /* PCI base address register */
745 int prev_eedata;
746 u32 tmp;
747
748/* when built into the kernel, we only print version if device is found */
749#ifndef MODULE
750 static int printed_version;
751 if (!printed_version++)
752 printk(version);
753#endif
754
755 i = pci_enable_device(pdev);
756 if (i) return i;
757
758 /* natsemi has a non-standard PM control register
759 * in PCI config space. Some boards apparently need
760 * to be brought to D0 in this manner.
761 */
762 pci_read_config_dword(pdev, PCIPM, &tmp);
763 if (tmp & PCI_PM_CTRL_STATE_MASK) {
764 /* D0 state, disable PME assertion */
765 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
766 pci_write_config_dword(pdev, PCIPM, newtmp);
767 }
768
769 find_cnt++;
770 iostart = pci_resource_start(pdev, pcibar);
771 iosize = pci_resource_len(pdev, pcibar);
772 irq = pdev->irq;
773
Jeff Garzika2b524b2006-06-26 22:48:38 -0400774 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776 dev = alloc_etherdev(sizeof (struct netdev_private));
777 if (!dev)
778 return -ENOMEM;
779 SET_MODULE_OWNER(dev);
780 SET_NETDEV_DEV(dev, &pdev->dev);
781
782 i = pci_request_regions(pdev, DRV_NAME);
783 if (i)
784 goto err_pci_request_regions;
785
786 ioaddr = ioremap(iostart, iosize);
787 if (!ioaddr) {
788 i = -ENOMEM;
789 goto err_ioremap;
790 }
791
792 /* Work around the dropped serial bit. */
793 prev_eedata = eeprom_read(ioaddr, 6);
794 for (i = 0; i < 3; i++) {
795 int eedata = eeprom_read(ioaddr, i + 7);
796 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
797 dev->dev_addr[i*2+1] = eedata >> 7;
798 prev_eedata = eedata;
799 }
800
801 dev->base_addr = (unsigned long __force) ioaddr;
802 dev->irq = irq;
803
804 np = netdev_priv(dev);
805
806 np->pci_dev = pdev;
807 pci_set_drvdata(pdev, dev);
808 np->iosize = iosize;
809 spin_lock_init(&np->lock);
810 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
811 np->hands_off = 0;
Mark Brownb27a16b2006-02-02 00:00:01 +0000812 np->intr_status = 0;
Jeff Garzika2b524b2006-06-26 22:48:38 -0400813 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
Mark Brown68c90162007-02-19 20:15:39 +0000814 np->ignore_phy = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816 /* Initial port:
Mark Brown68c90162007-02-19 20:15:39 +0000817 * - If configured to ignore the PHY set up for external.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 * - If the nic was configured to use an external phy and if find_mii
819 * finds a phy: use external port, first phy that replies.
820 * - Otherwise: internal port.
821 * Note that the phy address for the internal phy doesn't matter:
822 * The address would be used to access a phy over the mii bus, but
823 * the internal phy is accessed through mapped registers.
824 */
Mark Brown68c90162007-02-19 20:15:39 +0000825 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 dev->if_port = PORT_MII;
827 else
828 dev->if_port = PORT_TP;
829 /* Reset the chip to erase previous misconfiguration. */
830 natsemi_reload_eeprom(dev);
831 natsemi_reset(dev);
832
833 if (dev->if_port != PORT_TP) {
834 np->phy_addr_external = find_mii(dev);
Mark Brown68c90162007-02-19 20:15:39 +0000835 /* If we're ignoring the PHY it doesn't matter if we can't
836 * find one. */
837 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 dev->if_port = PORT_TP;
839 np->phy_addr_external = PHY_ADDR_INTERNAL;
840 }
841 } else {
842 np->phy_addr_external = PHY_ADDR_INTERNAL;
843 }
844
845 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
846 if (dev->mem_start)
847 option = dev->mem_start;
848
849 /* The lower four bits are the media type. */
850 if (option) {
851 if (option & 0x200)
852 np->full_duplex = 1;
853 if (option & 15)
854 printk(KERN_INFO
855 "natsemi %s: ignoring user supplied media type %d",
856 pci_name(np->pci_dev), option & 15);
857 }
858 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
859 np->full_duplex = 1;
860
861 /* The chip-specific entries in the device structure. */
862 dev->open = &netdev_open;
863 dev->hard_start_xmit = &start_tx;
864 dev->stop = &netdev_close;
865 dev->get_stats = &get_stats;
866 dev->set_multicast_list = &set_rx_mode;
867 dev->change_mtu = &natsemi_change_mtu;
868 dev->do_ioctl = &netdev_ioctl;
869 dev->tx_timeout = &tx_timeout;
870 dev->watchdog_timeo = TX_TIMEOUT;
Mark Brownb27a16b2006-02-02 00:00:01 +0000871 dev->poll = natsemi_poll;
872 dev->weight = 64;
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874#ifdef CONFIG_NET_POLL_CONTROLLER
875 dev->poll_controller = &natsemi_poll_controller;
876#endif
877 SET_ETHTOOL_OPS(dev, &ethtool_ops);
878
879 if (mtu)
880 dev->mtu = mtu;
881
Jeff Garzik5a40f092006-06-26 22:24:03 -0400882 natsemi_init_media(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
884 /* save the silicon revision for later querying */
885 np->srr = readl(ioaddr + SiliconRev);
886 if (netif_msg_hw(np))
887 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
888 pci_name(np->pci_dev), np->srr);
889
890 i = register_netdev(dev);
891 if (i)
892 goto err_register_netdev;
893
894 if (netif_msg_drv(np)) {
895 printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ",
896 dev->name, natsemi_pci_info[chip_idx].name, iostart,
897 pci_name(np->pci_dev));
898 for (i = 0; i < ETH_ALEN-1; i++)
899 printk("%02x:", dev->dev_addr[i]);
900 printk("%02x, IRQ %d", dev->dev_addr[i], irq);
901 if (dev->if_port == PORT_TP)
902 printk(", port TP.\n");
Mark Brown68c90162007-02-19 20:15:39 +0000903 else if (np->ignore_phy)
904 printk(", port MII, ignoring PHY\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 else
906 printk(", port MII, phy ad %d.\n", np->phy_addr_external);
907 }
908 return 0;
909
910 err_register_netdev:
911 iounmap(ioaddr);
912
913 err_ioremap:
914 pci_release_regions(pdev);
915 pci_set_drvdata(pdev, NULL);
916
917 err_pci_request_regions:
918 free_netdev(dev);
919 return i;
920}
921
922
923/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
924 The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
925
926/* Delay between EEPROM clock transitions.
927 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
928 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
929 made udelay() unreliable.
930 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
931 depricated.
932*/
933#define eeprom_delay(ee_addr) readl(ee_addr)
934
935#define EE_Write0 (EE_ChipSelect)
936#define EE_Write1 (EE_ChipSelect | EE_DataIn)
937
938/* The EEPROM commands include the alway-set leading bit. */
939enum EEPROM_Cmds {
940 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
941};
942
943static int eeprom_read(void __iomem *addr, int location)
944{
945 int i;
946 int retval = 0;
947 void __iomem *ee_addr = addr + EECtrl;
948 int read_cmd = location | EE_ReadCmd;
949
950 writel(EE_Write0, ee_addr);
951
952 /* Shift the read command bits out. */
953 for (i = 10; i >= 0; i--) {
954 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
955 writel(dataval, ee_addr);
956 eeprom_delay(ee_addr);
957 writel(dataval | EE_ShiftClk, ee_addr);
958 eeprom_delay(ee_addr);
959 }
960 writel(EE_ChipSelect, ee_addr);
961 eeprom_delay(ee_addr);
962
963 for (i = 0; i < 16; i++) {
964 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
965 eeprom_delay(ee_addr);
966 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
967 writel(EE_ChipSelect, ee_addr);
968 eeprom_delay(ee_addr);
969 }
970
971 /* Terminate the EEPROM access. */
972 writel(EE_Write0, ee_addr);
973 writel(0, ee_addr);
974 return retval;
975}
976
977/* MII transceiver control section.
978 * The 83815 series has an internal transceiver, and we present the
979 * internal management registers as if they were MII connected.
980 * External Phy registers are referenced through the MII interface.
981 */
982
983/* clock transitions >= 20ns (25MHz)
984 * One readl should be good to PCI @ 100MHz
985 */
986#define mii_delay(ioaddr) readl(ioaddr + EECtrl)
987
988static int mii_getbit (struct net_device *dev)
989{
990 int data;
991 void __iomem *ioaddr = ns_ioaddr(dev);
992
993 writel(MII_ShiftClk, ioaddr + EECtrl);
994 data = readl(ioaddr + EECtrl);
995 writel(0, ioaddr + EECtrl);
996 mii_delay(ioaddr);
997 return (data & MII_Data)? 1 : 0;
998}
999
1000static void mii_send_bits (struct net_device *dev, u32 data, int len)
1001{
1002 u32 i;
1003 void __iomem *ioaddr = ns_ioaddr(dev);
1004
1005 for (i = (1 << (len-1)); i; i >>= 1)
1006 {
1007 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1008 writel(mdio_val, ioaddr + EECtrl);
1009 mii_delay(ioaddr);
1010 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1011 mii_delay(ioaddr);
1012 }
1013 writel(0, ioaddr + EECtrl);
1014 mii_delay(ioaddr);
1015}
1016
1017static int miiport_read(struct net_device *dev, int phy_id, int reg)
1018{
1019 u32 cmd;
1020 int i;
1021 u32 retval = 0;
1022
1023 /* Ensure sync */
1024 mii_send_bits (dev, 0xffffffff, 32);
1025 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1026 /* ST,OP = 0110'b for read operation */
1027 cmd = (0x06 << 10) | (phy_id << 5) | reg;
1028 mii_send_bits (dev, cmd, 14);
1029 /* Turnaround */
1030 if (mii_getbit (dev))
1031 return 0;
1032 /* Read data */
1033 for (i = 0; i < 16; i++) {
1034 retval <<= 1;
1035 retval |= mii_getbit (dev);
1036 }
1037 /* End cycle */
1038 mii_getbit (dev);
1039 return retval;
1040}
1041
1042static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1043{
1044 u32 cmd;
1045
1046 /* Ensure sync */
1047 mii_send_bits (dev, 0xffffffff, 32);
1048 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1049 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1050 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1051 mii_send_bits (dev, cmd, 32);
1052 /* End cycle */
1053 mii_getbit (dev);
1054}
1055
1056static int mdio_read(struct net_device *dev, int reg)
1057{
1058 struct netdev_private *np = netdev_priv(dev);
1059 void __iomem *ioaddr = ns_ioaddr(dev);
1060
1061 /* The 83815 series has two ports:
1062 * - an internal transceiver
1063 * - an external mii bus
1064 */
1065 if (dev->if_port == PORT_TP)
1066 return readw(ioaddr+BasicControl+(reg<<2));
1067 else
1068 return miiport_read(dev, np->phy_addr_external, reg);
1069}
1070
1071static void mdio_write(struct net_device *dev, int reg, u16 data)
1072{
1073 struct netdev_private *np = netdev_priv(dev);
1074 void __iomem *ioaddr = ns_ioaddr(dev);
1075
1076 /* The 83815 series has an internal transceiver; handle separately */
1077 if (dev->if_port == PORT_TP)
1078 writew(data, ioaddr+BasicControl+(reg<<2));
1079 else
1080 miiport_write(dev, np->phy_addr_external, reg, data);
1081}
1082
1083static void init_phy_fixup(struct net_device *dev)
1084{
1085 struct netdev_private *np = netdev_priv(dev);
1086 void __iomem *ioaddr = ns_ioaddr(dev);
1087 int i;
1088 u32 cfg;
1089 u16 tmp;
1090
1091 /* restore stuff lost when power was out */
1092 tmp = mdio_read(dev, MII_BMCR);
1093 if (np->autoneg == AUTONEG_ENABLE) {
1094 /* renegotiate if something changed */
1095 if ((tmp & BMCR_ANENABLE) == 0
1096 || np->advertising != mdio_read(dev, MII_ADVERTISE))
1097 {
1098 /* turn on autonegotiation and force negotiation */
1099 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1100 mdio_write(dev, MII_ADVERTISE, np->advertising);
1101 }
1102 } else {
1103 /* turn off auto negotiation, set speed and duplexity */
1104 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1105 if (np->speed == SPEED_100)
1106 tmp |= BMCR_SPEED100;
1107 if (np->duplex == DUPLEX_FULL)
1108 tmp |= BMCR_FULLDPLX;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001109 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 * Note: there is no good way to inform the link partner
1111 * that our capabilities changed. The user has to unplug
1112 * and replug the network cable after some changes, e.g.
1113 * after switching from 10HD, autoneg off to 100 HD,
1114 * autoneg off.
1115 */
1116 }
1117 mdio_write(dev, MII_BMCR, tmp);
1118 readl(ioaddr + ChipConfig);
1119 udelay(1);
1120
1121 /* find out what phy this is */
1122 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1123 + mdio_read(dev, MII_PHYSID2);
1124
1125 /* handle external phys here */
1126 switch (np->mii) {
1127 case PHYID_AM79C874:
1128 /* phy specific configuration for fibre/tp operation */
1129 tmp = mdio_read(dev, MII_MCTRL);
1130 tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1131 if (dev->if_port == PORT_FIBRE)
1132 tmp |= MII_FX_SEL;
1133 else
1134 tmp |= MII_EN_SCRM;
1135 mdio_write(dev, MII_MCTRL, tmp);
1136 break;
1137 default:
1138 break;
1139 }
1140 cfg = readl(ioaddr + ChipConfig);
1141 if (cfg & CfgExtPhy)
1142 return;
1143
1144 /* On page 78 of the spec, they recommend some settings for "optimum
1145 performance" to be done in sequence. These settings optimize some
1146 of the 100Mbit autodetection circuitry. They say we only want to
1147 do this for rev C of the chip, but engineers at NSC (Bradley
1148 Kennedy) recommends always setting them. If you don't, you get
1149 errors on some autonegotiations that make the device unusable.
1150
1151 It seems that the DSP needs a few usec to reinitialize after
1152 the start of the phy. Just retry writing these values until they
1153 stick.
1154 */
1155 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1156
1157 int dspcfg;
1158 writew(1, ioaddr + PGSEL);
1159 writew(PMDCSR_VAL, ioaddr + PMDCSR);
1160 writew(TSTDAT_VAL, ioaddr + TSTDAT);
1161 np->dspcfg = (np->srr <= SRR_DP83815_C)?
1162 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1163 writew(np->dspcfg, ioaddr + DSPCFG);
1164 writew(SDCFG_VAL, ioaddr + SDCFG);
1165 writew(0, ioaddr + PGSEL);
1166 readl(ioaddr + ChipConfig);
1167 udelay(10);
1168
1169 writew(1, ioaddr + PGSEL);
1170 dspcfg = readw(ioaddr + DSPCFG);
1171 writew(0, ioaddr + PGSEL);
1172 if (np->dspcfg == dspcfg)
1173 break;
1174 }
1175
1176 if (netif_msg_link(np)) {
1177 if (i==NATSEMI_HW_TIMEOUT) {
1178 printk(KERN_INFO
1179 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1180 dev->name, i*10);
1181 } else {
1182 printk(KERN_INFO
1183 "%s: DSPCFG accepted after %d usec.\n",
1184 dev->name, i*10);
1185 }
1186 }
1187 /*
1188 * Enable PHY Specific event based interrupts. Link state change
1189 * and Auto-Negotiation Completion are among the affected.
1190 * Read the intr status to clear it (needed for wake events).
1191 */
1192 readw(ioaddr + MIntrStatus);
1193 writew(MICRIntEn, ioaddr + MIntrCtrl);
1194}
1195
1196static int switch_port_external(struct net_device *dev)
1197{
1198 struct netdev_private *np = netdev_priv(dev);
1199 void __iomem *ioaddr = ns_ioaddr(dev);
1200 u32 cfg;
1201
1202 cfg = readl(ioaddr + ChipConfig);
1203 if (cfg & CfgExtPhy)
1204 return 0;
1205
1206 if (netif_msg_link(np)) {
1207 printk(KERN_INFO "%s: switching to external transceiver.\n",
1208 dev->name);
1209 }
1210
1211 /* 1) switch back to external phy */
1212 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1213 readl(ioaddr + ChipConfig);
1214 udelay(1);
1215
1216 /* 2) reset the external phy: */
1217 /* resetting the external PHY has been known to cause a hub supplying
1218 * power over Ethernet to kill the power. We don't want to kill
1219 * power to this computer, so we avoid resetting the phy.
1220 */
1221
1222 /* 3) reinit the phy fixup, it got lost during power down. */
1223 move_int_phy(dev, np->phy_addr_external);
1224 init_phy_fixup(dev);
1225
1226 return 1;
1227}
1228
1229static int switch_port_internal(struct net_device *dev)
1230{
1231 struct netdev_private *np = netdev_priv(dev);
1232 void __iomem *ioaddr = ns_ioaddr(dev);
1233 int i;
1234 u32 cfg;
1235 u16 bmcr;
1236
1237 cfg = readl(ioaddr + ChipConfig);
1238 if (!(cfg &CfgExtPhy))
1239 return 0;
1240
1241 if (netif_msg_link(np)) {
1242 printk(KERN_INFO "%s: switching to internal transceiver.\n",
1243 dev->name);
1244 }
1245 /* 1) switch back to internal phy: */
1246 cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1247 writel(cfg, ioaddr + ChipConfig);
1248 readl(ioaddr + ChipConfig);
1249 udelay(1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 /* 2) reset the internal phy: */
1252 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1253 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1254 readl(ioaddr + ChipConfig);
1255 udelay(10);
1256 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1257 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1258 if (!(bmcr & BMCR_RESET))
1259 break;
1260 udelay(10);
1261 }
1262 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1263 printk(KERN_INFO
1264 "%s: phy reset did not complete in %d usec.\n",
1265 dev->name, i*10);
1266 }
1267 /* 3) reinit the phy fixup, it got lost during power down. */
1268 init_phy_fixup(dev);
1269
1270 return 1;
1271}
1272
1273/* Scan for a PHY on the external mii bus.
1274 * There are two tricky points:
1275 * - Do not scan while the internal phy is enabled. The internal phy will
1276 * crash: e.g. reads from the DSPCFG register will return odd values and
1277 * the nasty random phy reset code will reset the nic every few seconds.
1278 * - The internal phy must be moved around, an external phy could
1279 * have the same address as the internal phy.
1280 */
1281static int find_mii(struct net_device *dev)
1282{
1283 struct netdev_private *np = netdev_priv(dev);
1284 int tmp;
1285 int i;
1286 int did_switch;
1287
1288 /* Switch to external phy */
1289 did_switch = switch_port_external(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /* Scan the possible phy addresses:
1292 *
1293 * PHY address 0 means that the phy is in isolate mode. Not yet
1294 * supported due to lack of test hardware. User space should
1295 * handle it through ethtool.
1296 */
1297 for (i = 1; i <= 31; i++) {
1298 move_int_phy(dev, i);
1299 tmp = miiport_read(dev, i, MII_BMSR);
1300 if (tmp != 0xffff && tmp != 0x0000) {
1301 /* found something! */
1302 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1303 + mdio_read(dev, MII_PHYSID2);
1304 if (netif_msg_probe(np)) {
1305 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1306 pci_name(np->pci_dev), np->mii, i);
1307 }
1308 break;
1309 }
1310 }
1311 /* And switch back to internal phy: */
1312 if (did_switch)
1313 switch_port_internal(dev);
1314 return i;
1315}
1316
1317/* CFG bits [13:16] [18:23] */
1318#define CFG_RESET_SAVE 0xfde000
1319/* WCSR bits [0:4] [9:10] */
1320#define WCSR_RESET_SAVE 0x61f
1321/* RFCR bits [20] [22] [27:31] */
1322#define RFCR_RESET_SAVE 0xf8500000;
1323
1324static void natsemi_reset(struct net_device *dev)
1325{
1326 int i;
1327 u32 cfg;
1328 u32 wcsr;
1329 u32 rfcr;
1330 u16 pmatch[3];
1331 u16 sopass[3];
1332 struct netdev_private *np = netdev_priv(dev);
1333 void __iomem *ioaddr = ns_ioaddr(dev);
1334
1335 /*
1336 * Resetting the chip causes some registers to be lost.
1337 * Natsemi suggests NOT reloading the EEPROM while live, so instead
1338 * we save the state that would have been loaded from EEPROM
1339 * on a normal power-up (see the spec EEPROM map). This assumes
1340 * whoever calls this will follow up with init_registers() eventually.
1341 */
1342
1343 /* CFG */
1344 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1345 /* WCSR */
1346 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1347 /* RFCR */
1348 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1349 /* PMATCH */
1350 for (i = 0; i < 3; i++) {
1351 writel(i*2, ioaddr + RxFilterAddr);
1352 pmatch[i] = readw(ioaddr + RxFilterData);
1353 }
1354 /* SOPAS */
1355 for (i = 0; i < 3; i++) {
1356 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1357 sopass[i] = readw(ioaddr + RxFilterData);
1358 }
1359
1360 /* now whack the chip */
1361 writel(ChipReset, ioaddr + ChipCmd);
1362 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1363 if (!(readl(ioaddr + ChipCmd) & ChipReset))
1364 break;
1365 udelay(5);
1366 }
1367 if (i==NATSEMI_HW_TIMEOUT) {
1368 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1369 dev->name, i*5);
1370 } else if (netif_msg_hw(np)) {
1371 printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1372 dev->name, i*5);
1373 }
1374
1375 /* restore CFG */
1376 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1377 /* turn on external phy if it was selected */
1378 if (dev->if_port == PORT_TP)
1379 cfg &= ~(CfgExtPhy | CfgPhyDis);
1380 else
1381 cfg |= (CfgExtPhy | CfgPhyDis);
1382 writel(cfg, ioaddr + ChipConfig);
1383 /* restore WCSR */
1384 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1385 writel(wcsr, ioaddr + WOLCmd);
1386 /* read RFCR */
1387 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1388 /* restore PMATCH */
1389 for (i = 0; i < 3; i++) {
1390 writel(i*2, ioaddr + RxFilterAddr);
1391 writew(pmatch[i], ioaddr + RxFilterData);
1392 }
1393 for (i = 0; i < 3; i++) {
1394 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1395 writew(sopass[i], ioaddr + RxFilterData);
1396 }
1397 /* restore RFCR */
1398 writel(rfcr, ioaddr + RxFilterAddr);
1399}
1400
Mark Browne72fd962006-02-02 00:00:02 +00001401static void reset_rx(struct net_device *dev)
1402{
1403 int i;
1404 struct netdev_private *np = netdev_priv(dev);
1405 void __iomem *ioaddr = ns_ioaddr(dev);
1406
1407 np->intr_status &= ~RxResetDone;
1408
1409 writel(RxReset, ioaddr + ChipCmd);
1410
1411 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1412 np->intr_status |= readl(ioaddr + IntrStatus);
1413 if (np->intr_status & RxResetDone)
1414 break;
1415 udelay(15);
1416 }
1417 if (i==NATSEMI_HW_TIMEOUT) {
1418 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1419 dev->name, i*15);
1420 } else if (netif_msg_hw(np)) {
1421 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1422 dev->name, i*15);
1423 }
1424}
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426static void natsemi_reload_eeprom(struct net_device *dev)
1427{
1428 struct netdev_private *np = netdev_priv(dev);
1429 void __iomem *ioaddr = ns_ioaddr(dev);
1430 int i;
1431
1432 writel(EepromReload, ioaddr + PCIBusCfg);
1433 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1434 udelay(50);
1435 if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1436 break;
1437 }
1438 if (i==NATSEMI_HW_TIMEOUT) {
1439 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1440 pci_name(np->pci_dev), i*50);
1441 } else if (netif_msg_hw(np)) {
1442 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1443 pci_name(np->pci_dev), i*50);
1444 }
1445}
1446
1447static void natsemi_stop_rxtx(struct net_device *dev)
1448{
1449 void __iomem * ioaddr = ns_ioaddr(dev);
1450 struct netdev_private *np = netdev_priv(dev);
1451 int i;
1452
1453 writel(RxOff | TxOff, ioaddr + ChipCmd);
1454 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1455 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1456 break;
1457 udelay(5);
1458 }
1459 if (i==NATSEMI_HW_TIMEOUT) {
1460 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1461 dev->name, i*5);
1462 } else if (netif_msg_hw(np)) {
1463 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1464 dev->name, i*5);
1465 }
1466}
1467
1468static int netdev_open(struct net_device *dev)
1469{
1470 struct netdev_private *np = netdev_priv(dev);
1471 void __iomem * ioaddr = ns_ioaddr(dev);
1472 int i;
1473
1474 /* Reset the chip, just in case. */
1475 natsemi_reset(dev);
1476
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07001477 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 if (i) return i;
1479
1480 if (netif_msg_ifup(np))
1481 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1482 dev->name, dev->irq);
1483 i = alloc_ring(dev);
1484 if (i < 0) {
1485 free_irq(dev->irq, dev);
1486 return i;
1487 }
1488 init_ring(dev);
1489 spin_lock_irq(&np->lock);
1490 init_registers(dev);
1491 /* now set the MAC address according to dev->dev_addr */
1492 for (i = 0; i < 3; i++) {
1493 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1494
1495 writel(i*2, ioaddr + RxFilterAddr);
1496 writew(mac, ioaddr + RxFilterData);
1497 }
1498 writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1499 spin_unlock_irq(&np->lock);
1500
1501 netif_start_queue(dev);
1502
1503 if (netif_msg_ifup(np))
1504 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1505 dev->name, (int)readl(ioaddr + ChipCmd));
1506
1507 /* Set the timer to check for link beat. */
1508 init_timer(&np->timer);
1509 np->timer.expires = jiffies + NATSEMI_TIMER_FREQ;
1510 np->timer.data = (unsigned long)dev;
1511 np->timer.function = &netdev_timer; /* timer handler */
1512 add_timer(&np->timer);
1513
1514 return 0;
1515}
1516
1517static void do_cable_magic(struct net_device *dev)
1518{
1519 struct netdev_private *np = netdev_priv(dev);
1520 void __iomem *ioaddr = ns_ioaddr(dev);
1521
1522 if (dev->if_port != PORT_TP)
1523 return;
1524
1525 if (np->srr >= SRR_DP83816_A5)
1526 return;
1527
1528 /*
1529 * 100 MBit links with short cables can trip an issue with the chip.
1530 * The problem manifests as lots of CRC errors and/or flickering
1531 * activity LED while idle. This process is based on instructions
1532 * from engineers at National.
1533 */
1534 if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1535 u16 data;
1536
1537 writew(1, ioaddr + PGSEL);
1538 /*
1539 * coefficient visibility should already be enabled via
1540 * DSPCFG | 0x1000
1541 */
1542 data = readw(ioaddr + TSTDAT) & 0xff;
1543 /*
1544 * the value must be negative, and within certain values
1545 * (these values all come from National)
1546 */
1547 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1548 struct netdev_private *np = netdev_priv(dev);
1549
1550 /* the bug has been triggered - fix the coefficient */
1551 writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1552 /* lock the value */
1553 data = readw(ioaddr + DSPCFG);
1554 np->dspcfg = data | DSPCFG_LOCK;
1555 writew(np->dspcfg, ioaddr + DSPCFG);
1556 }
1557 writew(0, ioaddr + PGSEL);
1558 }
1559}
1560
1561static void undo_cable_magic(struct net_device *dev)
1562{
1563 u16 data;
1564 struct netdev_private *np = netdev_priv(dev);
1565 void __iomem * ioaddr = ns_ioaddr(dev);
1566
1567 if (dev->if_port != PORT_TP)
1568 return;
1569
1570 if (np->srr >= SRR_DP83816_A5)
1571 return;
1572
1573 writew(1, ioaddr + PGSEL);
1574 /* make sure the lock bit is clear */
1575 data = readw(ioaddr + DSPCFG);
1576 np->dspcfg = data & ~DSPCFG_LOCK;
1577 writew(np->dspcfg, ioaddr + DSPCFG);
1578 writew(0, ioaddr + PGSEL);
1579}
1580
1581static void check_link(struct net_device *dev)
1582{
1583 struct netdev_private *np = netdev_priv(dev);
1584 void __iomem * ioaddr = ns_ioaddr(dev);
Mark Brown68c90162007-02-19 20:15:39 +00001585 int duplex = np->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 u16 bmsr;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001587
Mark Brown68c90162007-02-19 20:15:39 +00001588 /* If we are ignoring the PHY then don't try reading it. */
1589 if (np->ignore_phy)
1590 goto propagate_state;
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 /* The link status field is latched: it remains low after a temporary
1593 * link failure until it's read. We need the current link status,
1594 * thus read twice.
1595 */
1596 mdio_read(dev, MII_BMSR);
1597 bmsr = mdio_read(dev, MII_BMSR);
1598
1599 if (!(bmsr & BMSR_LSTATUS)) {
1600 if (netif_carrier_ok(dev)) {
1601 if (netif_msg_link(np))
1602 printk(KERN_NOTICE "%s: link down.\n",
Mark Brown68c90162007-02-19 20:15:39 +00001603 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 netif_carrier_off(dev);
1605 undo_cable_magic(dev);
1606 }
1607 return;
1608 }
1609 if (!netif_carrier_ok(dev)) {
1610 if (netif_msg_link(np))
1611 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1612 netif_carrier_on(dev);
1613 do_cable_magic(dev);
1614 }
1615
1616 duplex = np->full_duplex;
1617 if (!duplex) {
1618 if (bmsr & BMSR_ANEGCOMPLETE) {
1619 int tmp = mii_nway_result(
1620 np->advertising & mdio_read(dev, MII_LPA));
1621 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1622 duplex = 1;
1623 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1624 duplex = 1;
1625 }
1626
Mark Brown68c90162007-02-19 20:15:39 +00001627propagate_state:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 /* if duplex is set then bit 28 must be set, too */
1629 if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1630 if (netif_msg_link(np))
1631 printk(KERN_INFO
1632 "%s: Setting %s-duplex based on negotiated "
1633 "link capability.\n", dev->name,
1634 duplex ? "full" : "half");
1635 if (duplex) {
1636 np->rx_config |= RxAcceptTx;
1637 np->tx_config |= TxCarrierIgn | TxHeartIgn;
1638 } else {
1639 np->rx_config &= ~RxAcceptTx;
1640 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1641 }
1642 writel(np->tx_config, ioaddr + TxConfig);
1643 writel(np->rx_config, ioaddr + RxConfig);
1644 }
1645}
1646
1647static void init_registers(struct net_device *dev)
1648{
1649 struct netdev_private *np = netdev_priv(dev);
1650 void __iomem * ioaddr = ns_ioaddr(dev);
1651
1652 init_phy_fixup(dev);
1653
1654 /* clear any interrupts that are pending, such as wake events */
1655 readl(ioaddr + IntrStatus);
1656
1657 writel(np->ring_dma, ioaddr + RxRingPtr);
1658 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1659 ioaddr + TxRingPtr);
1660
1661 /* Initialize other registers.
1662 * Configure the PCI bus bursts and FIFO thresholds.
1663 * Configure for standard, in-spec Ethernet.
1664 * Start with half-duplex. check_link will update
1665 * to the correct settings.
1666 */
1667
1668 /* DRTH: 2: start tx if 64 bytes are in the fifo
1669 * FLTH: 0x10: refill with next packet if 512 bytes are free
1670 * MXDMA: 0: up to 256 byte bursts.
1671 * MXDMA must be <= FLTH
1672 * ECRETRY=1
1673 * ATP=1
1674 */
1675 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1676 TX_FLTH_VAL | TX_DRTH_VAL_START;
1677 writel(np->tx_config, ioaddr + TxConfig);
1678
1679 /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1680 * MXDMA 0: up to 256 byte bursts
1681 */
1682 np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1683 /* if receive ring now has bigger buffers than normal, enable jumbo */
1684 if (np->rx_buf_sz > NATSEMI_LONGPKT)
1685 np->rx_config |= RxAcceptLong;
1686
1687 writel(np->rx_config, ioaddr + RxConfig);
1688
1689 /* Disable PME:
1690 * The PME bit is initialized from the EEPROM contents.
1691 * PCI cards probably have PME disabled, but motherboard
1692 * implementations may have PME set to enable WakeOnLan.
1693 * With PME set the chip will scan incoming packets but
1694 * nothing will be written to memory. */
1695 np->SavedClkRun = readl(ioaddr + ClkRun);
1696 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1697 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1698 printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1699 dev->name, readl(ioaddr + WOLCmd));
1700 }
1701
1702 check_link(dev);
1703 __set_rx_mode(dev);
1704
1705 /* Enable interrupts by setting the interrupt mask. */
1706 writel(DEFAULT_INTR, ioaddr + IntrMask);
1707 writel(1, ioaddr + IntrEnable);
1708
1709 writel(RxOn | TxOn, ioaddr + ChipCmd);
1710 writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1711}
1712
1713/*
1714 * netdev_timer:
1715 * Purpose:
1716 * 1) check for link changes. Usually they are handled by the MII interrupt
1717 * but it doesn't hurt to check twice.
1718 * 2) check for sudden death of the NIC:
1719 * It seems that a reference set for this chip went out with incorrect info,
1720 * and there exist boards that aren't quite right. An unexpected voltage
1721 * drop can cause the PHY to get itself in a weird state (basically reset).
1722 * NOTE: this only seems to affect revC chips.
1723 * 3) check of death of the RX path due to OOM
1724 */
1725static void netdev_timer(unsigned long data)
1726{
1727 struct net_device *dev = (struct net_device *)data;
1728 struct netdev_private *np = netdev_priv(dev);
1729 void __iomem * ioaddr = ns_ioaddr(dev);
1730 int next_tick = 5*HZ;
1731
1732 if (netif_msg_timer(np)) {
1733 /* DO NOT read the IntrStatus register,
1734 * a read clears any pending interrupts.
1735 */
1736 printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1737 dev->name);
1738 }
1739
1740 if (dev->if_port == PORT_TP) {
1741 u16 dspcfg;
1742
1743 spin_lock_irq(&np->lock);
1744 /* check for a nasty random phy-reset - use dspcfg as a flag */
1745 writew(1, ioaddr+PGSEL);
1746 dspcfg = readw(ioaddr+DSPCFG);
1747 writew(0, ioaddr+PGSEL);
1748 if (dspcfg != np->dspcfg) {
1749 if (!netif_queue_stopped(dev)) {
1750 spin_unlock_irq(&np->lock);
1751 if (netif_msg_hw(np))
1752 printk(KERN_NOTICE "%s: possible phy reset: "
1753 "re-initializing\n", dev->name);
1754 disable_irq(dev->irq);
1755 spin_lock_irq(&np->lock);
1756 natsemi_stop_rxtx(dev);
1757 dump_ring(dev);
1758 reinit_ring(dev);
1759 init_registers(dev);
1760 spin_unlock_irq(&np->lock);
1761 enable_irq(dev->irq);
1762 } else {
1763 /* hurry back */
1764 next_tick = HZ;
1765 spin_unlock_irq(&np->lock);
1766 }
1767 } else {
1768 /* init_registers() calls check_link() for the above case */
1769 check_link(dev);
1770 spin_unlock_irq(&np->lock);
1771 }
1772 } else {
1773 spin_lock_irq(&np->lock);
1774 check_link(dev);
1775 spin_unlock_irq(&np->lock);
1776 }
1777 if (np->oom) {
1778 disable_irq(dev->irq);
1779 np->oom = 0;
1780 refill_rx(dev);
1781 enable_irq(dev->irq);
1782 if (!np->oom) {
1783 writel(RxOn, ioaddr + ChipCmd);
1784 } else {
1785 next_tick = 1;
1786 }
1787 }
1788 mod_timer(&np->timer, jiffies + next_tick);
1789}
1790
1791static void dump_ring(struct net_device *dev)
1792{
1793 struct netdev_private *np = netdev_priv(dev);
1794
1795 if (netif_msg_pktdata(np)) {
1796 int i;
1797 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1798 for (i = 0; i < TX_RING_SIZE; i++) {
1799 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1800 i, np->tx_ring[i].next_desc,
1801 np->tx_ring[i].cmd_status,
1802 np->tx_ring[i].addr);
1803 }
1804 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1805 for (i = 0; i < RX_RING_SIZE; i++) {
1806 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1807 i, np->rx_ring[i].next_desc,
1808 np->rx_ring[i].cmd_status,
1809 np->rx_ring[i].addr);
1810 }
1811 }
1812}
1813
1814static void tx_timeout(struct net_device *dev)
1815{
1816 struct netdev_private *np = netdev_priv(dev);
1817 void __iomem * ioaddr = ns_ioaddr(dev);
1818
1819 disable_irq(dev->irq);
1820 spin_lock_irq(&np->lock);
1821 if (!np->hands_off) {
1822 if (netif_msg_tx_err(np))
1823 printk(KERN_WARNING
1824 "%s: Transmit timed out, status %#08x,"
1825 " resetting...\n",
1826 dev->name, readl(ioaddr + IntrStatus));
1827 dump_ring(dev);
1828
1829 natsemi_reset(dev);
1830 reinit_ring(dev);
1831 init_registers(dev);
1832 } else {
1833 printk(KERN_WARNING
1834 "%s: tx_timeout while in hands_off state?\n",
1835 dev->name);
1836 }
1837 spin_unlock_irq(&np->lock);
1838 enable_irq(dev->irq);
1839
1840 dev->trans_start = jiffies;
1841 np->stats.tx_errors++;
1842 netif_wake_queue(dev);
1843}
1844
1845static int alloc_ring(struct net_device *dev)
1846{
1847 struct netdev_private *np = netdev_priv(dev);
1848 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1849 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1850 &np->ring_dma);
1851 if (!np->rx_ring)
1852 return -ENOMEM;
1853 np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1854 return 0;
1855}
1856
1857static void refill_rx(struct net_device *dev)
1858{
1859 struct netdev_private *np = netdev_priv(dev);
1860
1861 /* Refill the Rx ring buffers. */
1862 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1863 struct sk_buff *skb;
1864 int entry = np->dirty_rx % RX_RING_SIZE;
1865 if (np->rx_skbuff[entry] == NULL) {
1866 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1867 skb = dev_alloc_skb(buflen);
1868 np->rx_skbuff[entry] = skb;
1869 if (skb == NULL)
1870 break; /* Better luck next round. */
1871 skb->dev = dev; /* Mark as being used by this device. */
1872 np->rx_dma[entry] = pci_map_single(np->pci_dev,
David S. Miller689be432005-06-28 15:25:31 -07001873 skb->data, buflen, PCI_DMA_FROMDEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1875 }
1876 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1877 }
1878 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1879 if (netif_msg_rx_err(np))
1880 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1881 np->oom = 1;
1882 }
1883}
1884
1885static void set_bufsize(struct net_device *dev)
1886{
1887 struct netdev_private *np = netdev_priv(dev);
1888 if (dev->mtu <= ETH_DATA_LEN)
1889 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1890 else
1891 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1892}
1893
1894/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1895static void init_ring(struct net_device *dev)
1896{
1897 struct netdev_private *np = netdev_priv(dev);
1898 int i;
1899
1900 /* 1) TX ring */
1901 np->dirty_tx = np->cur_tx = 0;
1902 for (i = 0; i < TX_RING_SIZE; i++) {
1903 np->tx_skbuff[i] = NULL;
1904 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1905 +sizeof(struct netdev_desc)
1906 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1907 np->tx_ring[i].cmd_status = 0;
1908 }
1909
1910 /* 2) RX ring */
1911 np->dirty_rx = 0;
1912 np->cur_rx = RX_RING_SIZE;
1913 np->oom = 0;
1914 set_bufsize(dev);
1915
1916 np->rx_head_desc = &np->rx_ring[0];
1917
1918 /* Please be carefull before changing this loop - at least gcc-2.95.1
1919 * miscompiles it otherwise.
1920 */
1921 /* Initialize all Rx descriptors. */
1922 for (i = 0; i < RX_RING_SIZE; i++) {
1923 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1924 +sizeof(struct netdev_desc)
1925 *((i+1)%RX_RING_SIZE));
1926 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1927 np->rx_skbuff[i] = NULL;
1928 }
1929 refill_rx(dev);
1930 dump_ring(dev);
1931}
1932
1933static void drain_tx(struct net_device *dev)
1934{
1935 struct netdev_private *np = netdev_priv(dev);
1936 int i;
1937
1938 for (i = 0; i < TX_RING_SIZE; i++) {
1939 if (np->tx_skbuff[i]) {
1940 pci_unmap_single(np->pci_dev,
1941 np->tx_dma[i], np->tx_skbuff[i]->len,
1942 PCI_DMA_TODEVICE);
1943 dev_kfree_skb(np->tx_skbuff[i]);
1944 np->stats.tx_dropped++;
1945 }
1946 np->tx_skbuff[i] = NULL;
1947 }
1948}
1949
1950static void drain_rx(struct net_device *dev)
1951{
1952 struct netdev_private *np = netdev_priv(dev);
1953 unsigned int buflen = np->rx_buf_sz;
1954 int i;
1955
1956 /* Free all the skbuffs in the Rx queue. */
1957 for (i = 0; i < RX_RING_SIZE; i++) {
1958 np->rx_ring[i].cmd_status = 0;
1959 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1960 if (np->rx_skbuff[i]) {
1961 pci_unmap_single(np->pci_dev,
1962 np->rx_dma[i], buflen,
1963 PCI_DMA_FROMDEVICE);
1964 dev_kfree_skb(np->rx_skbuff[i]);
1965 }
1966 np->rx_skbuff[i] = NULL;
1967 }
1968}
1969
1970static void drain_ring(struct net_device *dev)
1971{
1972 drain_rx(dev);
1973 drain_tx(dev);
1974}
1975
1976static void free_ring(struct net_device *dev)
1977{
1978 struct netdev_private *np = netdev_priv(dev);
1979 pci_free_consistent(np->pci_dev,
1980 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1981 np->rx_ring, np->ring_dma);
1982}
1983
1984static void reinit_rx(struct net_device *dev)
1985{
1986 struct netdev_private *np = netdev_priv(dev);
1987 int i;
1988
1989 /* RX Ring */
1990 np->dirty_rx = 0;
1991 np->cur_rx = RX_RING_SIZE;
1992 np->rx_head_desc = &np->rx_ring[0];
1993 /* Initialize all Rx descriptors. */
1994 for (i = 0; i < RX_RING_SIZE; i++)
1995 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1996
1997 refill_rx(dev);
1998}
1999
2000static void reinit_ring(struct net_device *dev)
2001{
2002 struct netdev_private *np = netdev_priv(dev);
2003 int i;
2004
2005 /* drain TX ring */
2006 drain_tx(dev);
2007 np->dirty_tx = np->cur_tx = 0;
2008 for (i=0;i<TX_RING_SIZE;i++)
2009 np->tx_ring[i].cmd_status = 0;
2010
2011 reinit_rx(dev);
2012}
2013
2014static int start_tx(struct sk_buff *skb, struct net_device *dev)
2015{
2016 struct netdev_private *np = netdev_priv(dev);
2017 void __iomem * ioaddr = ns_ioaddr(dev);
2018 unsigned entry;
2019
2020 /* Note: Ordering is important here, set the field with the
2021 "ownership" bit last, and only then increment cur_tx. */
2022
2023 /* Calculate the next Tx descriptor entry. */
2024 entry = np->cur_tx % TX_RING_SIZE;
2025
2026 np->tx_skbuff[entry] = skb;
2027 np->tx_dma[entry] = pci_map_single(np->pci_dev,
2028 skb->data,skb->len, PCI_DMA_TODEVICE);
2029
2030 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2031
2032 spin_lock_irq(&np->lock);
2033
2034 if (!np->hands_off) {
2035 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2036 /* StrongARM: Explicitly cache flush np->tx_ring and
2037 * skb->data,skb->len. */
2038 wmb();
2039 np->cur_tx++;
2040 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2041 netdev_tx_done(dev);
2042 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2043 netif_stop_queue(dev);
2044 }
2045 /* Wake the potentially-idle transmit channel. */
2046 writel(TxOn, ioaddr + ChipCmd);
2047 } else {
2048 dev_kfree_skb_irq(skb);
2049 np->stats.tx_dropped++;
2050 }
2051 spin_unlock_irq(&np->lock);
2052
2053 dev->trans_start = jiffies;
2054
2055 if (netif_msg_tx_queued(np)) {
2056 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2057 dev->name, np->cur_tx, entry);
2058 }
2059 return 0;
2060}
2061
2062static void netdev_tx_done(struct net_device *dev)
2063{
2064 struct netdev_private *np = netdev_priv(dev);
2065
2066 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2067 int entry = np->dirty_tx % TX_RING_SIZE;
2068 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2069 break;
2070 if (netif_msg_tx_done(np))
2071 printk(KERN_DEBUG
2072 "%s: tx frame #%d finished, status %#08x.\n",
2073 dev->name, np->dirty_tx,
2074 le32_to_cpu(np->tx_ring[entry].cmd_status));
2075 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2076 np->stats.tx_packets++;
2077 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
2078 } else { /* Various Tx errors */
2079 int tx_status =
2080 le32_to_cpu(np->tx_ring[entry].cmd_status);
2081 if (tx_status & (DescTxAbort|DescTxExcColl))
2082 np->stats.tx_aborted_errors++;
2083 if (tx_status & DescTxFIFO)
2084 np->stats.tx_fifo_errors++;
2085 if (tx_status & DescTxCarrier)
2086 np->stats.tx_carrier_errors++;
2087 if (tx_status & DescTxOOWCol)
2088 np->stats.tx_window_errors++;
2089 np->stats.tx_errors++;
2090 }
2091 pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2092 np->tx_skbuff[entry]->len,
2093 PCI_DMA_TODEVICE);
2094 /* Free the original skb. */
2095 dev_kfree_skb_irq(np->tx_skbuff[entry]);
2096 np->tx_skbuff[entry] = NULL;
2097 }
2098 if (netif_queue_stopped(dev)
2099 && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2100 /* The ring is no longer full, wake queue. */
2101 netif_wake_queue(dev);
2102 }
2103}
2104
Mark Brownb27a16b2006-02-02 00:00:01 +00002105/* The interrupt handler doesn't actually handle interrupts itself, it
2106 * schedules a NAPI poll if there is anything to do. */
David Howells7d12e782006-10-05 14:55:46 +01002107static irqreturn_t intr_handler(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
2109 struct net_device *dev = dev_instance;
2110 struct netdev_private *np = netdev_priv(dev);
2111 void __iomem * ioaddr = ns_ioaddr(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113 if (np->hands_off)
2114 return IRQ_NONE;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002115
Mark Brownb27a16b2006-02-02 00:00:01 +00002116 /* Reading automatically acknowledges. */
2117 np->intr_status = readl(ioaddr + IntrStatus);
2118
2119 if (netif_msg_intr(np))
2120 printk(KERN_DEBUG
2121 "%s: Interrupt, status %#08x, mask %#08x.\n",
2122 dev->name, np->intr_status,
2123 readl(ioaddr + IntrMask));
2124
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002125 if (!np->intr_status)
Mark Brownb27a16b2006-02-02 00:00:01 +00002126 return IRQ_NONE;
2127
2128 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2129
2130 if (netif_rx_schedule_prep(dev)) {
2131 /* Disable interrupts and register for poll */
2132 natsemi_irq_disable(dev);
2133 __netif_rx_schedule(dev);
2134 }
2135 return IRQ_HANDLED;
2136}
2137
2138/* This is the NAPI poll routine. As well as the standard RX handling
2139 * it also handles all other interrupts that the chip might raise.
2140 */
2141static int natsemi_poll(struct net_device *dev, int *budget)
2142{
2143 struct netdev_private *np = netdev_priv(dev);
2144 void __iomem * ioaddr = ns_ioaddr(dev);
2145
2146 int work_to_do = min(*budget, dev->quota);
2147 int work_done = 0;
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 do {
Mark Brownb27a16b2006-02-02 00:00:01 +00002150 if (np->intr_status &
2151 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 spin_lock(&np->lock);
2153 netdev_tx_done(dev);
2154 spin_unlock(&np->lock);
2155 }
2156
2157 /* Abnormal error summary/uncommon events handlers. */
Mark Brownb27a16b2006-02-02 00:00:01 +00002158 if (np->intr_status & IntrAbnormalSummary)
2159 netdev_error(dev, np->intr_status);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002160
Mark Brownb27a16b2006-02-02 00:00:01 +00002161 if (np->intr_status &
2162 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2163 IntrRxErr | IntrRxOverrun)) {
2164 netdev_rx(dev, &work_done, work_to_do);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002166
Mark Brownb27a16b2006-02-02 00:00:01 +00002167 *budget -= work_done;
2168 dev->quota -= work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
Mark Brownb27a16b2006-02-02 00:00:01 +00002170 if (work_done >= work_to_do)
2171 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Mark Brownb27a16b2006-02-02 00:00:01 +00002173 np->intr_status = readl(ioaddr + IntrStatus);
2174 } while (np->intr_status);
2175
2176 netif_rx_complete(dev);
2177
2178 /* Reenable interrupts providing nothing is trying to shut
2179 * the chip down. */
2180 spin_lock(&np->lock);
2181 if (!np->hands_off && netif_running(dev))
2182 natsemi_irq_enable(dev);
2183 spin_unlock(&np->lock);
2184
2185 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186}
2187
2188/* This routine is logically part of the interrupt handler, but separated
2189 for clarity and better register allocation. */
Mark Brownb27a16b2006-02-02 00:00:01 +00002190static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191{
2192 struct netdev_private *np = netdev_priv(dev);
2193 int entry = np->cur_rx % RX_RING_SIZE;
2194 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2195 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2196 unsigned int buflen = np->rx_buf_sz;
2197 void __iomem * ioaddr = ns_ioaddr(dev);
2198
2199 /* If the driver owns the next entry it's a new packet. Send it up. */
2200 while (desc_status < 0) { /* e.g. & DescOwn */
2201 int pkt_len;
2202 if (netif_msg_rx_status(np))
2203 printk(KERN_DEBUG
2204 " netdev_rx() entry %d status was %#08x.\n",
2205 entry, desc_status);
2206 if (--boguscnt < 0)
2207 break;
Mark Brownb27a16b2006-02-02 00:00:01 +00002208
2209 if (*work_done >= work_to_do)
2210 break;
2211
2212 (*work_done)++;
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 pkt_len = (desc_status & DescSizeMask) - 4;
2215 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2216 if (desc_status & DescMore) {
2217 if (netif_msg_rx_err(np))
2218 printk(KERN_WARNING
2219 "%s: Oversized(?) Ethernet "
2220 "frame spanned multiple "
2221 "buffers, entry %#08x "
2222 "status %#08x.\n", dev->name,
2223 np->cur_rx, desc_status);
2224 np->stats.rx_length_errors++;
Mark Browne72fd962006-02-02 00:00:02 +00002225
2226 /* The RX state machine has probably
2227 * locked up beneath us. Follow the
2228 * reset procedure documented in
2229 * AN-1287. */
2230
2231 spin_lock_irq(&np->lock);
2232 reset_rx(dev);
2233 reinit_rx(dev);
2234 writel(np->ring_dma, ioaddr + RxRingPtr);
2235 check_link(dev);
2236 spin_unlock_irq(&np->lock);
2237
2238 /* We'll enable RX on exit from this
2239 * function. */
2240 break;
2241
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 } else {
2243 /* There was an error. */
2244 np->stats.rx_errors++;
2245 if (desc_status & (DescRxAbort|DescRxOver))
2246 np->stats.rx_over_errors++;
2247 if (desc_status & (DescRxLong|DescRxRunt))
2248 np->stats.rx_length_errors++;
2249 if (desc_status & (DescRxInvalid|DescRxAlign))
2250 np->stats.rx_frame_errors++;
2251 if (desc_status & DescRxCRC)
2252 np->stats.rx_crc_errors++;
2253 }
2254 } else if (pkt_len > np->rx_buf_sz) {
2255 /* if this is the tail of a double buffer
2256 * packet, we've already counted the error
2257 * on the first part. Ignore the second half.
2258 */
2259 } else {
2260 struct sk_buff *skb;
2261 /* Omit CRC size. */
2262 /* Check if the packet is long enough to accept
2263 * without copying to a minimally-sized skbuff. */
2264 if (pkt_len < rx_copybreak
2265 && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
2266 skb->dev = dev;
2267 /* 16 byte align the IP header */
2268 skb_reserve(skb, RX_OFFSET);
2269 pci_dma_sync_single_for_cpu(np->pci_dev,
2270 np->rx_dma[entry],
2271 buflen,
2272 PCI_DMA_FROMDEVICE);
2273 eth_copy_and_sum(skb,
David S. Miller689be432005-06-28 15:25:31 -07002274 np->rx_skbuff[entry]->data, pkt_len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 skb_put(skb, pkt_len);
2276 pci_dma_sync_single_for_device(np->pci_dev,
2277 np->rx_dma[entry],
2278 buflen,
2279 PCI_DMA_FROMDEVICE);
2280 } else {
2281 pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2282 buflen, PCI_DMA_FROMDEVICE);
2283 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2284 np->rx_skbuff[entry] = NULL;
2285 }
2286 skb->protocol = eth_type_trans(skb, dev);
Mark Brownb27a16b2006-02-02 00:00:01 +00002287 netif_receive_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 dev->last_rx = jiffies;
2289 np->stats.rx_packets++;
2290 np->stats.rx_bytes += pkt_len;
2291 }
2292 entry = (++np->cur_rx) % RX_RING_SIZE;
2293 np->rx_head_desc = &np->rx_ring[entry];
2294 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2295 }
2296 refill_rx(dev);
2297
2298 /* Restart Rx engine if stopped. */
2299 if (np->oom)
2300 mod_timer(&np->timer, jiffies + 1);
2301 else
2302 writel(RxOn, ioaddr + ChipCmd);
2303}
2304
2305static void netdev_error(struct net_device *dev, int intr_status)
2306{
2307 struct netdev_private *np = netdev_priv(dev);
2308 void __iomem * ioaddr = ns_ioaddr(dev);
2309
2310 spin_lock(&np->lock);
2311 if (intr_status & LinkChange) {
2312 u16 lpa = mdio_read(dev, MII_LPA);
2313 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE
2314 && netif_msg_link(np)) {
2315 printk(KERN_INFO
2316 "%s: Autonegotiation advertising"
2317 " %#04x partner %#04x.\n", dev->name,
2318 np->advertising, lpa);
2319 }
2320
2321 /* read MII int status to clear the flag */
2322 readw(ioaddr + MIntrStatus);
2323 check_link(dev);
2324 }
2325 if (intr_status & StatsMax) {
2326 __get_stats(dev);
2327 }
2328 if (intr_status & IntrTxUnderrun) {
2329 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2330 np->tx_config += TX_DRTH_VAL_INC;
2331 if (netif_msg_tx_err(np))
2332 printk(KERN_NOTICE
2333 "%s: increased tx threshold, txcfg %#08x.\n",
2334 dev->name, np->tx_config);
2335 } else {
2336 if (netif_msg_tx_err(np))
2337 printk(KERN_NOTICE
2338 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2339 dev->name, np->tx_config);
2340 }
2341 writel(np->tx_config, ioaddr + TxConfig);
2342 }
2343 if (intr_status & WOLPkt && netif_msg_wol(np)) {
2344 int wol_status = readl(ioaddr + WOLCmd);
2345 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2346 dev->name, wol_status);
2347 }
2348 if (intr_status & RxStatusFIFOOver) {
2349 if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2350 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2351 dev->name);
2352 }
2353 np->stats.rx_fifo_errors++;
2354 }
2355 /* Hmmmmm, it's not clear how to recover from PCI faults. */
2356 if (intr_status & IntrPCIErr) {
2357 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2358 intr_status & IntrPCIErr);
2359 np->stats.tx_fifo_errors++;
2360 np->stats.rx_fifo_errors++;
2361 }
2362 spin_unlock(&np->lock);
2363}
2364
2365static void __get_stats(struct net_device *dev)
2366{
2367 void __iomem * ioaddr = ns_ioaddr(dev);
2368 struct netdev_private *np = netdev_priv(dev);
2369
2370 /* The chip only need report frame silently dropped. */
2371 np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2372 np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2373}
2374
2375static struct net_device_stats *get_stats(struct net_device *dev)
2376{
2377 struct netdev_private *np = netdev_priv(dev);
2378
2379 /* The chip only need report frame silently dropped. */
2380 spin_lock_irq(&np->lock);
2381 if (netif_running(dev) && !np->hands_off)
2382 __get_stats(dev);
2383 spin_unlock_irq(&np->lock);
2384
2385 return &np->stats;
2386}
2387
2388#ifdef CONFIG_NET_POLL_CONTROLLER
2389static void natsemi_poll_controller(struct net_device *dev)
2390{
2391 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +01002392 intr_handler(dev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 enable_irq(dev->irq);
2394}
2395#endif
2396
2397#define HASH_TABLE 0x200
2398static void __set_rx_mode(struct net_device *dev)
2399{
2400 void __iomem * ioaddr = ns_ioaddr(dev);
2401 struct netdev_private *np = netdev_priv(dev);
2402 u8 mc_filter[64]; /* Multicast hash filter */
2403 u32 rx_mode;
2404
2405 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 rx_mode = RxFilterEnable | AcceptBroadcast
2407 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2408 } else if ((dev->mc_count > multicast_filter_limit)
2409 || (dev->flags & IFF_ALLMULTI)) {
2410 rx_mode = RxFilterEnable | AcceptBroadcast
2411 | AcceptAllMulticast | AcceptMyPhys;
2412 } else {
2413 struct dev_mc_list *mclist;
2414 int i;
2415 memset(mc_filter, 0, sizeof(mc_filter));
2416 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2417 i++, mclist = mclist->next) {
2418 int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
2419 mc_filter[i/8] |= (1 << (i & 0x07));
2420 }
2421 rx_mode = RxFilterEnable | AcceptBroadcast
2422 | AcceptMulticast | AcceptMyPhys;
2423 for (i = 0; i < 64; i += 2) {
Herbert Xu760f86d2005-04-16 16:24:16 +10002424 writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2425 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2426 ioaddr + RxFilterData);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 }
2428 }
2429 writel(rx_mode, ioaddr + RxFilterAddr);
2430 np->cur_rx_mode = rx_mode;
2431}
2432
2433static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2434{
2435 if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
2436 return -EINVAL;
2437
2438 dev->mtu = new_mtu;
2439
2440 /* synchronized against open : rtnl_lock() held by caller */
2441 if (netif_running(dev)) {
2442 struct netdev_private *np = netdev_priv(dev);
2443 void __iomem * ioaddr = ns_ioaddr(dev);
2444
2445 disable_irq(dev->irq);
2446 spin_lock(&np->lock);
2447 /* stop engines */
2448 natsemi_stop_rxtx(dev);
2449 /* drain rx queue */
2450 drain_rx(dev);
2451 /* change buffers */
2452 set_bufsize(dev);
2453 reinit_rx(dev);
2454 writel(np->ring_dma, ioaddr + RxRingPtr);
2455 /* restart engines */
2456 writel(RxOn | TxOn, ioaddr + ChipCmd);
2457 spin_unlock(&np->lock);
2458 enable_irq(dev->irq);
2459 }
2460 return 0;
2461}
2462
2463static void set_rx_mode(struct net_device *dev)
2464{
2465 struct netdev_private *np = netdev_priv(dev);
2466 spin_lock_irq(&np->lock);
2467 if (!np->hands_off)
2468 __set_rx_mode(dev);
2469 spin_unlock_irq(&np->lock);
2470}
2471
2472static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2473{
2474 struct netdev_private *np = netdev_priv(dev);
2475 strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
2476 strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
2477 strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
2478}
2479
2480static int get_regs_len(struct net_device *dev)
2481{
2482 return NATSEMI_REGS_SIZE;
2483}
2484
2485static int get_eeprom_len(struct net_device *dev)
2486{
Mark Browna8b4cf42006-03-28 14:08:55 -08002487 struct netdev_private *np = netdev_priv(dev);
2488 return np->eeprom_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489}
2490
2491static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2492{
2493 struct netdev_private *np = netdev_priv(dev);
2494 spin_lock_irq(&np->lock);
2495 netdev_get_ecmd(dev, ecmd);
2496 spin_unlock_irq(&np->lock);
2497 return 0;
2498}
2499
2500static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2501{
2502 struct netdev_private *np = netdev_priv(dev);
2503 int res;
2504 spin_lock_irq(&np->lock);
2505 res = netdev_set_ecmd(dev, ecmd);
2506 spin_unlock_irq(&np->lock);
2507 return res;
2508}
2509
2510static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2511{
2512 struct netdev_private *np = netdev_priv(dev);
2513 spin_lock_irq(&np->lock);
2514 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2515 netdev_get_sopass(dev, wol->sopass);
2516 spin_unlock_irq(&np->lock);
2517}
2518
2519static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2520{
2521 struct netdev_private *np = netdev_priv(dev);
2522 int res;
2523 spin_lock_irq(&np->lock);
2524 netdev_set_wol(dev, wol->wolopts);
2525 res = netdev_set_sopass(dev, wol->sopass);
2526 spin_unlock_irq(&np->lock);
2527 return res;
2528}
2529
2530static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2531{
2532 struct netdev_private *np = netdev_priv(dev);
2533 regs->version = NATSEMI_REGS_VER;
2534 spin_lock_irq(&np->lock);
2535 netdev_get_regs(dev, buf);
2536 spin_unlock_irq(&np->lock);
2537}
2538
2539static u32 get_msglevel(struct net_device *dev)
2540{
2541 struct netdev_private *np = netdev_priv(dev);
2542 return np->msg_enable;
2543}
2544
2545static void set_msglevel(struct net_device *dev, u32 val)
2546{
2547 struct netdev_private *np = netdev_priv(dev);
2548 np->msg_enable = val;
2549}
2550
2551static int nway_reset(struct net_device *dev)
2552{
2553 int tmp;
2554 int r = -EINVAL;
2555 /* if autoneg is off, it's an error */
2556 tmp = mdio_read(dev, MII_BMCR);
2557 if (tmp & BMCR_ANENABLE) {
2558 tmp |= (BMCR_ANRESTART);
2559 mdio_write(dev, MII_BMCR, tmp);
2560 r = 0;
2561 }
2562 return r;
2563}
2564
2565static u32 get_link(struct net_device *dev)
2566{
2567 /* LSTATUS is latched low until a read - so read twice */
2568 mdio_read(dev, MII_BMSR);
2569 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2570}
2571
2572static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2573{
2574 struct netdev_private *np = netdev_priv(dev);
Mark Browna8b4cf42006-03-28 14:08:55 -08002575 u8 *eebuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 int res;
2577
Mark Browna8b4cf42006-03-28 14:08:55 -08002578 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2579 if (!eebuf)
2580 return -ENOMEM;
2581
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2583 spin_lock_irq(&np->lock);
2584 res = netdev_get_eeprom(dev, eebuf);
2585 spin_unlock_irq(&np->lock);
2586 if (!res)
2587 memcpy(data, eebuf+eeprom->offset, eeprom->len);
Mark Browna8b4cf42006-03-28 14:08:55 -08002588 kfree(eebuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 return res;
2590}
2591
Jeff Garzik7282d492006-09-13 14:30:00 -04002592static const struct ethtool_ops ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 .get_drvinfo = get_drvinfo,
2594 .get_regs_len = get_regs_len,
2595 .get_eeprom_len = get_eeprom_len,
2596 .get_settings = get_settings,
2597 .set_settings = set_settings,
2598 .get_wol = get_wol,
2599 .set_wol = set_wol,
2600 .get_regs = get_regs,
2601 .get_msglevel = get_msglevel,
2602 .set_msglevel = set_msglevel,
2603 .nway_reset = nway_reset,
2604 .get_link = get_link,
2605 .get_eeprom = get_eeprom,
2606};
2607
2608static int netdev_set_wol(struct net_device *dev, u32 newval)
2609{
2610 struct netdev_private *np = netdev_priv(dev);
2611 void __iomem * ioaddr = ns_ioaddr(dev);
2612 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2613
2614 /* translate to bitmasks this chip understands */
2615 if (newval & WAKE_PHY)
2616 data |= WakePhy;
2617 if (newval & WAKE_UCAST)
2618 data |= WakeUnicast;
2619 if (newval & WAKE_MCAST)
2620 data |= WakeMulticast;
2621 if (newval & WAKE_BCAST)
2622 data |= WakeBroadcast;
2623 if (newval & WAKE_ARP)
2624 data |= WakeArp;
2625 if (newval & WAKE_MAGIC)
2626 data |= WakeMagic;
2627 if (np->srr >= SRR_DP83815_D) {
2628 if (newval & WAKE_MAGICSECURE) {
2629 data |= WakeMagicSecure;
2630 }
2631 }
2632
2633 writel(data, ioaddr + WOLCmd);
2634
2635 return 0;
2636}
2637
2638static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2639{
2640 struct netdev_private *np = netdev_priv(dev);
2641 void __iomem * ioaddr = ns_ioaddr(dev);
2642 u32 regval = readl(ioaddr + WOLCmd);
2643
2644 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2645 | WAKE_ARP | WAKE_MAGIC);
2646
2647 if (np->srr >= SRR_DP83815_D) {
2648 /* SOPASS works on revD and higher */
2649 *supported |= WAKE_MAGICSECURE;
2650 }
2651 *cur = 0;
2652
2653 /* translate from chip bitmasks */
2654 if (regval & WakePhy)
2655 *cur |= WAKE_PHY;
2656 if (regval & WakeUnicast)
2657 *cur |= WAKE_UCAST;
2658 if (regval & WakeMulticast)
2659 *cur |= WAKE_MCAST;
2660 if (regval & WakeBroadcast)
2661 *cur |= WAKE_BCAST;
2662 if (regval & WakeArp)
2663 *cur |= WAKE_ARP;
2664 if (regval & WakeMagic)
2665 *cur |= WAKE_MAGIC;
2666 if (regval & WakeMagicSecure) {
2667 /* this can be on in revC, but it's broken */
2668 *cur |= WAKE_MAGICSECURE;
2669 }
2670
2671 return 0;
2672}
2673
2674static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2675{
2676 struct netdev_private *np = netdev_priv(dev);
2677 void __iomem * ioaddr = ns_ioaddr(dev);
2678 u16 *sval = (u16 *)newval;
2679 u32 addr;
2680
2681 if (np->srr < SRR_DP83815_D) {
2682 return 0;
2683 }
2684
2685 /* enable writing to these registers by disabling the RX filter */
2686 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2687 addr &= ~RxFilterEnable;
2688 writel(addr, ioaddr + RxFilterAddr);
2689
2690 /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2691 writel(addr | 0xa, ioaddr + RxFilterAddr);
2692 writew(sval[0], ioaddr + RxFilterData);
2693
2694 writel(addr | 0xc, ioaddr + RxFilterAddr);
2695 writew(sval[1], ioaddr + RxFilterData);
2696
2697 writel(addr | 0xe, ioaddr + RxFilterAddr);
2698 writew(sval[2], ioaddr + RxFilterData);
2699
2700 /* re-enable the RX filter */
2701 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2702
2703 return 0;
2704}
2705
2706static int netdev_get_sopass(struct net_device *dev, u8 *data)
2707{
2708 struct netdev_private *np = netdev_priv(dev);
2709 void __iomem * ioaddr = ns_ioaddr(dev);
2710 u16 *sval = (u16 *)data;
2711 u32 addr;
2712
2713 if (np->srr < SRR_DP83815_D) {
2714 sval[0] = sval[1] = sval[2] = 0;
2715 return 0;
2716 }
2717
2718 /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2719 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2720
2721 writel(addr | 0xa, ioaddr + RxFilterAddr);
2722 sval[0] = readw(ioaddr + RxFilterData);
2723
2724 writel(addr | 0xc, ioaddr + RxFilterAddr);
2725 sval[1] = readw(ioaddr + RxFilterData);
2726
2727 writel(addr | 0xe, ioaddr + RxFilterAddr);
2728 sval[2] = readw(ioaddr + RxFilterData);
2729
2730 writel(addr, ioaddr + RxFilterAddr);
2731
2732 return 0;
2733}
2734
2735static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2736{
2737 struct netdev_private *np = netdev_priv(dev);
2738 u32 tmp;
2739
2740 ecmd->port = dev->if_port;
2741 ecmd->speed = np->speed;
2742 ecmd->duplex = np->duplex;
2743 ecmd->autoneg = np->autoneg;
2744 ecmd->advertising = 0;
2745 if (np->advertising & ADVERTISE_10HALF)
2746 ecmd->advertising |= ADVERTISED_10baseT_Half;
2747 if (np->advertising & ADVERTISE_10FULL)
2748 ecmd->advertising |= ADVERTISED_10baseT_Full;
2749 if (np->advertising & ADVERTISE_100HALF)
2750 ecmd->advertising |= ADVERTISED_100baseT_Half;
2751 if (np->advertising & ADVERTISE_100FULL)
2752 ecmd->advertising |= ADVERTISED_100baseT_Full;
2753 ecmd->supported = (SUPPORTED_Autoneg |
2754 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2755 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2756 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2757 ecmd->phy_address = np->phy_addr_external;
2758 /*
2759 * We intentionally report the phy address of the external
2760 * phy, even if the internal phy is used. This is necessary
2761 * to work around a deficiency of the ethtool interface:
2762 * It's only possible to query the settings of the active
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002763 * port. Therefore
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 * # ethtool -s ethX port mii
2765 * actually sends an ioctl to switch to port mii with the
2766 * settings that are used for the current active port.
2767 * If we would report a different phy address in this
2768 * command, then
2769 * # ethtool -s ethX port tp;ethtool -s ethX port mii
2770 * would unintentionally change the phy address.
2771 *
2772 * Fortunately the phy address doesn't matter with the
2773 * internal phy...
2774 */
2775
2776 /* set information based on active port type */
2777 switch (ecmd->port) {
2778 default:
2779 case PORT_TP:
2780 ecmd->advertising |= ADVERTISED_TP;
2781 ecmd->transceiver = XCVR_INTERNAL;
2782 break;
2783 case PORT_MII:
2784 ecmd->advertising |= ADVERTISED_MII;
2785 ecmd->transceiver = XCVR_EXTERNAL;
2786 break;
2787 case PORT_FIBRE:
2788 ecmd->advertising |= ADVERTISED_FIBRE;
2789 ecmd->transceiver = XCVR_EXTERNAL;
2790 break;
2791 }
2792
2793 /* if autonegotiation is on, try to return the active speed/duplex */
2794 if (ecmd->autoneg == AUTONEG_ENABLE) {
2795 ecmd->advertising |= ADVERTISED_Autoneg;
2796 tmp = mii_nway_result(
2797 np->advertising & mdio_read(dev, MII_LPA));
2798 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2799 ecmd->speed = SPEED_100;
2800 else
2801 ecmd->speed = SPEED_10;
2802 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2803 ecmd->duplex = DUPLEX_FULL;
2804 else
2805 ecmd->duplex = DUPLEX_HALF;
2806 }
2807
2808 /* ignore maxtxpkt, maxrxpkt for now */
2809
2810 return 0;
2811}
2812
2813static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2814{
2815 struct netdev_private *np = netdev_priv(dev);
2816
2817 if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
2818 return -EINVAL;
2819 if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
2820 return -EINVAL;
2821 if (ecmd->autoneg == AUTONEG_ENABLE) {
2822 if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
2823 ADVERTISED_10baseT_Full |
2824 ADVERTISED_100baseT_Half |
2825 ADVERTISED_100baseT_Full)) == 0) {
2826 return -EINVAL;
2827 }
2828 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2829 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
2830 return -EINVAL;
2831 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2832 return -EINVAL;
2833 } else {
2834 return -EINVAL;
2835 }
2836
2837 /*
Mark Brown68c90162007-02-19 20:15:39 +00002838 * If we're ignoring the PHY then autoneg and the internal
2839 * transciever are really not going to work so don't let the
2840 * user select them.
2841 */
2842 if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
2843 ecmd->port == PORT_TP))
2844 return -EINVAL;
2845
2846 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 * maxtxpkt, maxrxpkt: ignored for now.
2848 *
2849 * transceiver:
2850 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2851 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2852 * selects based on ecmd->port.
2853 *
2854 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2855 * phys that are connected to the mii bus. It's used to apply fibre
2856 * specific updates.
2857 */
2858
2859 /* WHEW! now lets bang some bits */
2860
2861 /* save the parms */
2862 dev->if_port = ecmd->port;
2863 np->autoneg = ecmd->autoneg;
2864 np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
2865 if (np->autoneg == AUTONEG_ENABLE) {
2866 /* advertise only what has been requested */
2867 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2868 if (ecmd->advertising & ADVERTISED_10baseT_Half)
2869 np->advertising |= ADVERTISE_10HALF;
2870 if (ecmd->advertising & ADVERTISED_10baseT_Full)
2871 np->advertising |= ADVERTISE_10FULL;
2872 if (ecmd->advertising & ADVERTISED_100baseT_Half)
2873 np->advertising |= ADVERTISE_100HALF;
2874 if (ecmd->advertising & ADVERTISED_100baseT_Full)
2875 np->advertising |= ADVERTISE_100FULL;
2876 } else {
2877 np->speed = ecmd->speed;
2878 np->duplex = ecmd->duplex;
2879 /* user overriding the initial full duplex parm? */
2880 if (np->duplex == DUPLEX_HALF)
2881 np->full_duplex = 0;
2882 }
2883
2884 /* get the right phy enabled */
2885 if (ecmd->port == PORT_TP)
2886 switch_port_internal(dev);
2887 else
2888 switch_port_external(dev);
2889
2890 /* set parms and see how this affected our link status */
2891 init_phy_fixup(dev);
2892 check_link(dev);
2893 return 0;
2894}
2895
2896static int netdev_get_regs(struct net_device *dev, u8 *buf)
2897{
2898 int i;
2899 int j;
2900 u32 rfcr;
2901 u32 *rbuf = (u32 *)buf;
2902 void __iomem * ioaddr = ns_ioaddr(dev);
2903
2904 /* read non-mii page 0 of registers */
2905 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
2906 rbuf[i] = readl(ioaddr + i*4);
2907 }
2908
2909 /* read current mii registers */
2910 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
2911 rbuf[i] = mdio_read(dev, i & 0x1f);
2912
2913 /* read only the 'magic' registers from page 1 */
2914 writew(1, ioaddr + PGSEL);
2915 rbuf[i++] = readw(ioaddr + PMDCSR);
2916 rbuf[i++] = readw(ioaddr + TSTDAT);
2917 rbuf[i++] = readw(ioaddr + DSPCFG);
2918 rbuf[i++] = readw(ioaddr + SDCFG);
2919 writew(0, ioaddr + PGSEL);
2920
2921 /* read RFCR indexed registers */
2922 rfcr = readl(ioaddr + RxFilterAddr);
2923 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
2924 writel(j*2, ioaddr + RxFilterAddr);
2925 rbuf[i++] = readw(ioaddr + RxFilterData);
2926 }
2927 writel(rfcr, ioaddr + RxFilterAddr);
2928
2929 /* the interrupt status is clear-on-read - see if we missed any */
2930 if (rbuf[4] & rbuf[5]) {
2931 printk(KERN_WARNING
2932 "%s: shoot, we dropped an interrupt (%#08x)\n",
2933 dev->name, rbuf[4] & rbuf[5]);
2934 }
2935
2936 return 0;
2937}
2938
2939#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
2940 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
2941 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
2942 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
2943 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
2944 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
2945 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
2946 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
2947
2948static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
2949{
2950 int i;
2951 u16 *ebuf = (u16 *)buf;
2952 void __iomem * ioaddr = ns_ioaddr(dev);
Mark Browna8b4cf42006-03-28 14:08:55 -08002953 struct netdev_private *np = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
2955 /* eeprom_read reads 16 bits, and indexes by 16 bits */
Mark Browna8b4cf42006-03-28 14:08:55 -08002956 for (i = 0; i < np->eeprom_size/2; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 ebuf[i] = eeprom_read(ioaddr, i);
2958 /* The EEPROM itself stores data bit-swapped, but eeprom_read
2959 * reads it back "sanely". So we swap it back here in order to
2960 * present it to userland as it is stored. */
2961 ebuf[i] = SWAP_BITS(ebuf[i]);
2962 }
2963 return 0;
2964}
2965
2966static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2967{
2968 struct mii_ioctl_data *data = if_mii(rq);
2969 struct netdev_private *np = netdev_priv(dev);
2970
2971 switch(cmd) {
2972 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2973 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
2974 data->phy_id = np->phy_addr_external;
2975 /* Fall Through */
2976
2977 case SIOCGMIIREG: /* Read MII PHY register. */
2978 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
2979 /* The phy_id is not enough to uniquely identify
2980 * the intended target. Therefore the command is sent to
2981 * the given mii on the current port.
2982 */
2983 if (dev->if_port == PORT_TP) {
2984 if ((data->phy_id & 0x1f) == np->phy_addr_external)
2985 data->val_out = mdio_read(dev,
2986 data->reg_num & 0x1f);
2987 else
2988 data->val_out = 0;
2989 } else {
2990 move_int_phy(dev, data->phy_id & 0x1f);
2991 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
2992 data->reg_num & 0x1f);
2993 }
2994 return 0;
2995
2996 case SIOCSMIIREG: /* Write MII PHY register. */
2997 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
2998 if (!capable(CAP_NET_ADMIN))
2999 return -EPERM;
3000 if (dev->if_port == PORT_TP) {
3001 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3002 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3003 np->advertising = data->val_in;
3004 mdio_write(dev, data->reg_num & 0x1f,
3005 data->val_in);
3006 }
3007 } else {
3008 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3009 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3010 np->advertising = data->val_in;
3011 }
3012 move_int_phy(dev, data->phy_id & 0x1f);
3013 miiport_write(dev, data->phy_id & 0x1f,
3014 data->reg_num & 0x1f,
3015 data->val_in);
3016 }
3017 return 0;
3018 default:
3019 return -EOPNOTSUPP;
3020 }
3021}
3022
3023static void enable_wol_mode(struct net_device *dev, int enable_intr)
3024{
3025 void __iomem * ioaddr = ns_ioaddr(dev);
3026 struct netdev_private *np = netdev_priv(dev);
3027
3028 if (netif_msg_wol(np))
3029 printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3030 dev->name);
3031
3032 /* For WOL we must restart the rx process in silent mode.
3033 * Write NULL to the RxRingPtr. Only possible if
3034 * rx process is stopped
3035 */
3036 writel(0, ioaddr + RxRingPtr);
3037
3038 /* read WoL status to clear */
3039 readl(ioaddr + WOLCmd);
3040
3041 /* PME on, clear status */
3042 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3043
3044 /* and restart the rx process */
3045 writel(RxOn, ioaddr + ChipCmd);
3046
3047 if (enable_intr) {
3048 /* enable the WOL interrupt.
3049 * Could be used to send a netlink message.
3050 */
3051 writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3052 writel(1, ioaddr + IntrEnable);
3053 }
3054}
3055
3056static int netdev_close(struct net_device *dev)
3057{
3058 void __iomem * ioaddr = ns_ioaddr(dev);
3059 struct netdev_private *np = netdev_priv(dev);
3060
3061 if (netif_msg_ifdown(np))
3062 printk(KERN_DEBUG
3063 "%s: Shutting down ethercard, status was %#04x.\n",
3064 dev->name, (int)readl(ioaddr + ChipCmd));
3065 if (netif_msg_pktdata(np))
3066 printk(KERN_DEBUG
3067 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3068 dev->name, np->cur_tx, np->dirty_tx,
3069 np->cur_rx, np->dirty_rx);
3070
3071 /*
3072 * FIXME: what if someone tries to close a device
3073 * that is suspended?
3074 * Should we reenable the nic to switch to
3075 * the final WOL settings?
3076 */
3077
3078 del_timer_sync(&np->timer);
3079 disable_irq(dev->irq);
3080 spin_lock_irq(&np->lock);
Mark Brownb27a16b2006-02-02 00:00:01 +00003081 natsemi_irq_disable(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 np->hands_off = 1;
3083 spin_unlock_irq(&np->lock);
3084 enable_irq(dev->irq);
3085
3086 free_irq(dev->irq, dev);
3087
3088 /* Interrupt disabled, interrupt handler released,
3089 * queue stopped, timer deleted, rtnl_lock held
3090 * All async codepaths that access the driver are disabled.
3091 */
3092 spin_lock_irq(&np->lock);
3093 np->hands_off = 0;
3094 readl(ioaddr + IntrMask);
3095 readw(ioaddr + MIntrStatus);
3096
3097 /* Freeze Stats */
3098 writel(StatsFreeze, ioaddr + StatsCtrl);
3099
3100 /* Stop the chip's Tx and Rx processes. */
3101 natsemi_stop_rxtx(dev);
3102
3103 __get_stats(dev);
3104 spin_unlock_irq(&np->lock);
3105
3106 /* clear the carrier last - an interrupt could reenable it otherwise */
3107 netif_carrier_off(dev);
3108 netif_stop_queue(dev);
3109
3110 dump_ring(dev);
3111 drain_ring(dev);
3112 free_ring(dev);
3113
3114 {
3115 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3116 if (wol) {
3117 /* restart the NIC in WOL mode.
3118 * The nic must be stopped for this.
3119 */
3120 enable_wol_mode(dev, 0);
3121 } else {
3122 /* Restore PME enable bit unmolested */
3123 writel(np->SavedClkRun, ioaddr + ClkRun);
3124 }
3125 }
3126 return 0;
3127}
3128
3129
3130static void __devexit natsemi_remove1 (struct pci_dev *pdev)
3131{
3132 struct net_device *dev = pci_get_drvdata(pdev);
3133 void __iomem * ioaddr = ns_ioaddr(dev);
3134
3135 unregister_netdev (dev);
3136 pci_release_regions (pdev);
3137 iounmap(ioaddr);
3138 free_netdev (dev);
3139 pci_set_drvdata(pdev, NULL);
3140}
3141
3142#ifdef CONFIG_PM
3143
3144/*
3145 * The ns83815 chip doesn't have explicit RxStop bits.
3146 * Kicking the Rx or Tx process for a new packet reenables the Rx process
3147 * of the nic, thus this function must be very careful:
3148 *
3149 * suspend/resume synchronization:
3150 * entry points:
3151 * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3152 * start_tx, tx_timeout
3153 *
3154 * No function accesses the hardware without checking np->hands_off.
3155 * the check occurs under spin_lock_irq(&np->lock);
3156 * exceptions:
3157 * * netdev_ioctl: noncritical access.
3158 * * netdev_open: cannot happen due to the device_detach
3159 * * netdev_close: doesn't hurt.
3160 * * netdev_timer: timer stopped by natsemi_suspend.
3161 * * intr_handler: doesn't acquire the spinlock. suspend calls
3162 * disable_irq() to enforce synchronization.
Mark Brownb27a16b2006-02-02 00:00:01 +00003163 * * natsemi_poll: checks before reenabling interrupts. suspend
3164 * sets hands_off, disables interrupts and then waits with
3165 * netif_poll_disable().
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 *
3167 * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3168 */
3169
3170static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3171{
3172 struct net_device *dev = pci_get_drvdata (pdev);
3173 struct netdev_private *np = netdev_priv(dev);
3174 void __iomem * ioaddr = ns_ioaddr(dev);
3175
3176 rtnl_lock();
3177 if (netif_running (dev)) {
3178 del_timer_sync(&np->timer);
3179
3180 disable_irq(dev->irq);
3181 spin_lock_irq(&np->lock);
3182
3183 writel(0, ioaddr + IntrEnable);
3184 np->hands_off = 1;
3185 natsemi_stop_rxtx(dev);
3186 netif_stop_queue(dev);
3187
3188 spin_unlock_irq(&np->lock);
3189 enable_irq(dev->irq);
3190
Mark Brownb27a16b2006-02-02 00:00:01 +00003191 netif_poll_disable(dev);
3192
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 /* Update the error counts. */
3194 __get_stats(dev);
3195
3196 /* pci_power_off(pdev, -1); */
3197 drain_ring(dev);
3198 {
3199 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3200 /* Restore PME enable bit */
3201 if (wol) {
3202 /* restart the NIC in WOL mode.
3203 * The nic must be stopped for this.
3204 * FIXME: use the WOL interrupt
3205 */
3206 enable_wol_mode(dev, 0);
3207 } else {
3208 /* Restore PME enable bit unmolested */
3209 writel(np->SavedClkRun, ioaddr + ClkRun);
3210 }
3211 }
3212 }
3213 netif_device_detach(dev);
3214 rtnl_unlock();
3215 return 0;
3216}
3217
3218
3219static int natsemi_resume (struct pci_dev *pdev)
3220{
3221 struct net_device *dev = pci_get_drvdata (pdev);
3222 struct netdev_private *np = netdev_priv(dev);
3223
3224 rtnl_lock();
3225 if (netif_device_present(dev))
3226 goto out;
3227 if (netif_running(dev)) {
3228 BUG_ON(!np->hands_off);
3229 pci_enable_device(pdev);
3230 /* pci_power_on(pdev); */
3231
3232 natsemi_reset(dev);
3233 init_ring(dev);
3234 disable_irq(dev->irq);
3235 spin_lock_irq(&np->lock);
3236 np->hands_off = 0;
3237 init_registers(dev);
3238 netif_device_attach(dev);
3239 spin_unlock_irq(&np->lock);
3240 enable_irq(dev->irq);
3241
3242 mod_timer(&np->timer, jiffies + 1*HZ);
3243 }
3244 netif_device_attach(dev);
Mark Brownb27a16b2006-02-02 00:00:01 +00003245 netif_poll_enable(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246out:
3247 rtnl_unlock();
3248 return 0;
3249}
3250
3251#endif /* CONFIG_PM */
3252
3253static struct pci_driver natsemi_driver = {
3254 .name = DRV_NAME,
3255 .id_table = natsemi_pci_tbl,
3256 .probe = natsemi_probe1,
3257 .remove = __devexit_p(natsemi_remove1),
3258#ifdef CONFIG_PM
3259 .suspend = natsemi_suspend,
3260 .resume = natsemi_resume,
3261#endif
3262};
3263
3264static int __init natsemi_init_mod (void)
3265{
3266/* when a module, this is printed whether or not devices are found in probe */
3267#ifdef MODULE
3268 printk(version);
3269#endif
3270
Jeff Garzik29917622006-08-19 17:48:59 -04003271 return pci_register_driver(&natsemi_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272}
3273
3274static void __exit natsemi_exit_mod (void)
3275{
3276 pci_unregister_driver (&natsemi_driver);
3277}
3278
3279module_init(natsemi_init_mod);
3280module_exit(natsemi_exit_mod);
3281