blob: 1548a80f917d52650feb344369ab065b16122a4f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2/*
3 Written 1996-1999 by Donald Becker.
4
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
9
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
12
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
15
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
28*/
29
Arjan van de Venf71e1302006-03-03 21:33:57 -050030static const char * const version =
Markus Dahms2c2a8c52007-05-09 07:58:10 +020031"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34/* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
36
37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41static int txdmacount = 128;
42static int rxdmacount /* = 0 */;
43
44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47# define rx_align(skb) skb_reserve((skb), 2)
48# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49#else
50# define rx_align(skb)
51# define RxFD_ALIGNMENT
52#endif
53
54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56static int rx_copybreak = 200;
57
58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59static int max_interrupt_work = 20;
60
61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62static int multicast_filter_limit = 64;
63
64/* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69/* A few values that may be tweaked. */
70/* The ring sizes should be a power of two for efficiency. */
71#define TX_RING_SIZE 64
72#define RX_RING_SIZE 64
73/* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75#define TX_MULTICAST_SIZE 2
76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77/* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80/* Hysteresis marking queue as no longer full. */
81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88#define PKT_BUF_SZ 1536
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#include <linux/module.h>
91
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
96#include <linux/slab.h>
97#include <linux/interrupt.h>
98#include <linux/timer.h>
99#include <linux/pci.h>
100#include <linux/spinlock.h>
101#include <linux/init.h>
102#include <linux/mii.h>
103#include <linux/delay.h>
104#include <linux/bitops.h>
105
106#include <asm/io.h>
107#include <asm/uaccess.h>
108#include <asm/irq.h>
109
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/rtnetlink.h>
113#include <linux/skbuff.h>
114#include <linux/ethtool.h>
115
116static int use_io;
117static int debug = -1;
118#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
119 NETIF_MSG_HW | \
120 NETIF_MSG_RX_ERR | \
121 NETIF_MSG_TX_ERR)
122#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
123
124
125MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
126MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
127MODULE_LICENSE("GPL");
128module_param(use_io, int, 0);
129module_param(debug, int, 0);
130module_param_array(options, int, NULL, 0);
131module_param_array(full_duplex, int, NULL, 0);
132module_param(congenb, int, 0);
133module_param(txfifo, int, 0);
134module_param(rxfifo, int, 0);
135module_param(txdmacount, int, 0);
136module_param(rxdmacount, int, 0);
137module_param(rx_copybreak, int, 0);
138module_param(max_interrupt_work, int, 0);
139module_param(multicast_filter_limit, int, 0);
140MODULE_PARM_DESC(debug, "debug level (0-6)");
141MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
142MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
143MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
144MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
145MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
146MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
147MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
148MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
149MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
150MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
151
152#define RUN_AT(x) (jiffies + (x))
153
154#define netdevice_start(dev)
155#define netdevice_stop(dev)
156#define netif_set_tx_timeout(dev, tf, tm) \
157 do { \
158 (dev)->tx_timeout = (tf); \
159 (dev)->watchdog_timeo = (tm); \
160 } while(0)
161
162
163
164/*
165 Theory of Operation
166
167I. Board Compatibility
168
169This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
170single-chip fast Ethernet controller for PCI, as used on the Intel
171EtherExpress Pro 100 adapter.
172
173II. Board-specific settings
174
175PCI bus devices are configured by the system at boot time, so no jumpers
176need to be set on the board. The system BIOS should be set to assign the
177PCI INTA signal to an otherwise unused system IRQ line. While it's
178possible to share PCI interrupt lines, it negatively impacts performance and
179only recent kernels support it.
180
181III. Driver operation
182
183IIIA. General
184The Speedo3 is very similar to other Intel network chips, that is to say
185"apparently designed on a different planet". This chips retains the complex
186Rx and Tx descriptors and multiple buffers pointers as previous chips, but
187also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
188Tx mode, but in a simplified lower-overhead manner: it associates only a
189single buffer descriptor with each frame descriptor.
190
191Despite the extra space overhead in each receive skbuff, the driver must use
192the simplified Rx buffer mode to assure that only a single data buffer is
193associated with each RxFD. The driver implements this by reserving space
194for the Rx descriptor at the head of each Rx skbuff.
195
196The Speedo-3 has receive and command unit base addresses that are added to
197almost all descriptor pointers. The driver sets these to zero, so that all
198pointer fields are absolute addresses.
199
200The System Control Block (SCB) of some previous Intel chips exists on the
201chip in both PCI I/O and memory space. This driver uses the I/O space
202registers, but might switch to memory mapped mode to better support non-x86
203processors.
204
205IIIB. Transmit structure
206
207The driver must use the complex Tx command+descriptor mode in order to
208have a indirect pointer to the skbuff data section. Each Tx command block
209(TxCB) is associated with two immediately appended Tx Buffer Descriptor
210(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
211speedo_private data structure for each adapter instance.
212
213The newer i82558 explicitly supports this structure, and can read the two
214TxBDs in the same PCI burst as the TxCB.
215
216This ring structure is used for all normal transmit packets, but the
217transmit packet descriptors aren't long enough for most non-Tx commands such
218as CmdConfigure. This is complicated by the possibility that the chip has
219already loaded the link address in the previous descriptor. So for these
220commands we convert the next free descriptor on the ring to a NoOp, and point
221that descriptor's link to the complex command.
222
223An additional complexity of these non-transmit commands are that they may be
224added asynchronous to the normal transmit queue, so we disable interrupts
225whenever the Tx descriptor ring is manipulated.
226
227A notable aspect of these special configure commands is that they do
228work with the normal Tx ring entry scavenge method. The Tx ring scavenge
229is done at interrupt time using the 'dirty_tx' index, and checking for the
230command-complete bit. While the setup frames may have the NoOp command on the
231Tx ring marked as complete, but not have completed the setup command, this
232is not a problem. The tx_ring entry can be still safely reused, as the
233tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
234
235Commands may have bits set e.g. CmdSuspend in the command word to either
236suspend or stop the transmit/command unit. This driver always flags the last
237command with CmdSuspend, erases the CmdSuspend in the previous command, and
238then issues a CU_RESUME.
239Note: Watch out for the potential race condition here: imagine
240 erasing the previous suspend
241 the chip processes the previous command
242 the chip processes the final command, and suspends
243 doing the CU_RESUME
244 the chip processes the next-yet-valid post-final-command.
245So blindly sending a CU_RESUME is only safe if we do it immediately after
246after erasing the previous CmdSuspend, without the possibility of an
247intervening delay. Thus the resume command is always within the
248interrupts-disabled region. This is a timing dependence, but handling this
249condition in a timing-independent way would considerably complicate the code.
250
251Note: In previous generation Intel chips, restarting the command unit was a
252notoriously slow process. This is presumably no longer true.
253
254IIIC. Receive structure
255
256Because of the bus-master support on the Speedo3 this driver uses the new
257SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
258This scheme allocates full-sized skbuffs as receive buffers. The value
259SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
260trade-off the memory wasted by passing the full-sized skbuff to the queue
261layer for all frames vs. the copying cost of copying a frame to a
262correctly-sized skbuff.
263
264For small frames the copying cost is negligible (esp. considering that we
265are pre-loading the cache with immediately useful header information), so we
266allocate a new, minimally-sized skbuff. For large frames the copying cost
267is non-trivial, and the larger copy might flush the cache of useful data, so
268we pass up the skbuff the packet was received into.
269
270IV. Notes
271
272Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
273that stated that I could disclose the information. But I still resent
274having to sign an Intel NDA when I'm helping Intel sell their own product!
275
276*/
277
278static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280/* Offsets to the various registers.
281 All accesses need not be longword aligned. */
282enum speedo_offsets {
283 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
284 SCBIntmask = 3,
285 SCBPointer = 4, /* General purpose pointer. */
286 SCBPort = 8, /* Misc. commands and operands. */
287 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
288 SCBCtrlMDI = 16, /* MDI interface control. */
289 SCBEarlyRx = 20, /* Early receive byte count. */
290};
291/* Commands that can be put in a command list entry. */
292enum commands {
293 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
294 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
295 CmdDump = 0x60000, CmdDiagnose = 0x70000,
296 CmdSuspend = 0x40000000, /* Suspend after completion. */
297 CmdIntr = 0x20000000, /* Interrupt after completion. */
298 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
299};
300/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
301 status bits. Previous driver versions used separate 16 bit fields for
302 commands and statuses. --SAW
303 */
304#if defined(__alpha__)
305# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
306#else
307# if defined(__LITTLE_ENDIAN)
308# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
309# elif defined(__BIG_ENDIAN)
310# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
311# else
312# error Unsupported byteorder
313# endif
314#endif
315
316enum SCBCmdBits {
317 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
318 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
319 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
320 /* The rest are Rx and Tx commands. */
321 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
322 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
323 CUDumpStats=0x0070, /* Dump then reset stats counters. */
324 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
325 RxResumeNoResources=0x0007,
326};
327
328enum SCBPort_cmds {
329 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
330};
331
332/* The Speedo3 Rx and Tx frame/buffer descriptors. */
333struct descriptor { /* A generic descriptor. */
334 volatile s32 cmd_status; /* All command and status fields. */
335 u32 link; /* struct descriptor * */
336 unsigned char params[0];
337};
338
339/* The Speedo3 Rx and Tx buffer descriptors. */
340struct RxFD { /* Receive frame descriptor. */
341 volatile s32 status;
342 u32 link; /* struct RxFD * */
343 u32 rx_buf_addr; /* void * */
344 u32 count;
345} RxFD_ALIGNMENT;
346
347/* Selected elements of the Tx/RxFD.status word. */
348enum RxFD_bits {
349 RxComplete=0x8000, RxOK=0x2000,
350 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
351 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
352 TxUnderrun=0x1000, StatusComplete=0x8000,
353};
354
355#define CONFIG_DATA_SIZE 22
356struct TxFD { /* Transmit frame descriptor set. */
357 s32 status;
358 u32 link; /* void * */
359 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
360 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
361 /* This constitutes two "TBD" entries -- we only use one. */
362#define TX_DESCR_BUF_OFFSET 16
363 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
364 s32 tx_buf_size0; /* Length of Tx frame. */
365 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
366 s32 tx_buf_size1; /* Length of Tx frame. */
367 /* the structure must have space for at least CONFIG_DATA_SIZE starting
368 * from tx_desc_addr field */
369};
370
371/* Multicast filter setting block. --SAW */
372struct speedo_mc_block {
373 struct speedo_mc_block *next;
374 unsigned int tx;
375 dma_addr_t frame_dma;
376 unsigned int len;
377 struct descriptor frame __attribute__ ((__aligned__(16)));
378};
379
380/* Elements of the dump_statistics block. This block must be lword aligned. */
381struct speedo_stats {
382 u32 tx_good_frames;
383 u32 tx_coll16_errs;
384 u32 tx_late_colls;
385 u32 tx_underruns;
386 u32 tx_lost_carrier;
387 u32 tx_deferred;
388 u32 tx_one_colls;
389 u32 tx_multi_colls;
390 u32 tx_total_colls;
391 u32 rx_good_frames;
392 u32 rx_crc_errs;
393 u32 rx_align_errs;
394 u32 rx_resource_errs;
395 u32 rx_overrun_errs;
396 u32 rx_colls_errs;
397 u32 rx_runt_errs;
398 u32 done_marker;
399};
400
401enum Rx_ring_state_bits {
402 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
403};
404
405/* Do not change the position (alignment) of the first few elements!
406 The later elements are grouped for cache locality.
407
408 Unfortunately, all the positions have been shifted since there.
409 A new re-alignment is required. 2000/03/06 SAW */
410struct speedo_private {
411 void __iomem *regs;
412 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
413 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
414 /* The addresses of a Tx/Rx-in-place packets/buffers. */
415 struct sk_buff *tx_skbuff[TX_RING_SIZE];
416 struct sk_buff *rx_skbuff[RX_RING_SIZE];
417 /* Mapped addresses of the rings. */
418 dma_addr_t tx_ring_dma;
419#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
420 dma_addr_t rx_ring_dma[RX_RING_SIZE];
421 struct descriptor *last_cmd; /* Last command sent. */
422 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
423 spinlock_t lock; /* Group with Tx control cache line. */
424 u32 tx_threshold; /* The value for txdesc.count. */
425 struct RxFD *last_rxf; /* Last filled RX buffer. */
426 dma_addr_t last_rxf_dma;
427 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
428 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
429 struct net_device_stats stats;
430 struct speedo_stats *lstats;
431 dma_addr_t lstats_dma;
432 int chip_id;
433 struct pci_dev *pdev;
434 struct timer_list timer; /* Media selection timer. */
435 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
436 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
437 long in_interrupt; /* Word-aligned dev->interrupt */
438 unsigned char acpi_pwr;
439 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
440 unsigned int tx_full:1; /* The Tx queue is full. */
441 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
442 unsigned int rx_bug:1; /* Work around receiver hang errata. */
443 unsigned char default_port:8; /* Last dev->if_port value. */
444 unsigned char rx_ring_state; /* RX ring status flags. */
445 unsigned short phy[2]; /* PHY media interfaces available. */
446 unsigned short partner; /* Link partner caps. */
447 struct mii_if_info mii_if; /* MII API hooks, info */
448 u32 msg_enable; /* debug message level */
449};
450
451/* The parameters for a CmdConfigure operation.
452 There are so many options that it would be difficult to document each bit.
453 We mostly use the default or recommended settings. */
454static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
455 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
456 0, 0x2E, 0, 0x60, 0,
457 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
458 0x3f, 0x05, };
459static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
460 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
461 0, 0x2E, 0, 0x60, 0x08, 0x88,
462 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
463 0x31, 0x05, };
464
465/* PHY media interface chips. */
Arjan van de Venf71e1302006-03-03 21:33:57 -0500466static const char * const phys[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 "None", "i82553-A/B", "i82553-C", "i82503",
468 "DP83840", "80c240", "80c24", "i82555",
469 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
470 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
471enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
472 S80C24, I82555, DP83840A=10, };
473static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
474#define EE_READ_CMD (6)
475
476static int eepro100_init_one(struct pci_dev *pdev,
477 const struct pci_device_id *ent);
478
479static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
480static int mdio_read(struct net_device *dev, int phy_id, int location);
481static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
482static int speedo_open(struct net_device *dev);
483static void speedo_resume(struct net_device *dev);
484static void speedo_timer(unsigned long data);
485static void speedo_init_rx_ring(struct net_device *dev);
486static void speedo_tx_timeout(struct net_device *dev);
487static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
488static void speedo_refill_rx_buffers(struct net_device *dev, int force);
489static int speedo_rx(struct net_device *dev);
490static void speedo_tx_buffer_gc(struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100491static irqreturn_t speedo_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492static int speedo_close(struct net_device *dev);
493static struct net_device_stats *speedo_get_stats(struct net_device *dev);
494static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
495static void set_rx_mode(struct net_device *dev);
496static void speedo_show_state(struct net_device *dev);
Jeff Garzik7282d492006-09-13 14:30:00 -0400497static const struct ethtool_ops ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501#ifdef honor_default_port
502/* Optional driver feature to allow forcing the transceiver setting.
503 Not recommended. */
504static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
505 0x2000, 0x2100, 0x0400, 0x3100};
506#endif
507
508/* How to wait for the command unit to accept a command.
509 Typically this takes 0 ticks. */
510static inline unsigned char wait_for_cmd_done(struct net_device *dev,
511 struct speedo_private *sp)
512{
513 int wait = 1000;
514 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
515 unsigned char r;
516
517 do {
518 udelay(1);
519 r = ioread8(cmd_ioaddr);
520 } while(r && --wait >= 0);
521
522 if (wait < 0)
523 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
524 return r;
525}
526
527static int __devinit eepro100_init_one (struct pci_dev *pdev,
528 const struct pci_device_id *ent)
529{
530 void __iomem *ioaddr;
531 int irq, pci_bar;
532 int acpi_idle_state = 0, pm;
533 static int cards_found /* = 0 */;
534 unsigned long pci_base;
535
536#ifndef MODULE
537 /* when built-in, we only print version if device is found */
538 static int did_version;
539 if (did_version++ == 0)
540 printk(version);
541#endif
542
543 /* save power state before pci_enable_device overwrites it */
544 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
545 if (pm) {
546 u16 pwr_command;
547 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
548 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
549 }
550
551 if (pci_enable_device(pdev))
552 goto err_out_free_mmio_region;
553
554 pci_set_master(pdev);
555
556 if (!request_region(pci_resource_start(pdev, 1),
557 pci_resource_len(pdev, 1), "eepro100")) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400558 dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 goto err_out_none;
560 }
561 if (!request_mem_region(pci_resource_start(pdev, 0),
562 pci_resource_len(pdev, 0), "eepro100")) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400563 dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 goto err_out_free_pio_region;
565 }
566
567 irq = pdev->irq;
568 pci_bar = use_io ? 1 : 0;
569 pci_base = pci_resource_start(pdev, pci_bar);
570 if (DEBUG & NETIF_MSG_PROBE)
571 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
572 pci_base, irq);
573
574 ioaddr = pci_iomap(pdev, pci_bar, 0);
575 if (!ioaddr) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400576 dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 goto err_out_free_mmio_region;
578 }
579
580 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
581 cards_found++;
582 else
583 goto err_out_iounmap;
584
585 return 0;
586
587err_out_iounmap: ;
588 pci_iounmap(pdev, ioaddr);
589err_out_free_mmio_region:
590 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
591err_out_free_pio_region:
592 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
593err_out_none:
594 return -ENODEV;
595}
596
597#ifdef CONFIG_NET_POLL_CONTROLLER
598/*
599 * Polling 'interrupt' - used by things like netconsole to send skbs
600 * without having to re-enable interrupts. It's not called while
601 * the interrupt routine is executing.
602 */
603
604static void poll_speedo (struct net_device *dev)
605{
606 /* disable_irq is not very nice, but with the funny lockless design
607 we have no other choice. */
608 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100609 speedo_interrupt (dev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 enable_irq(dev->irq);
611}
612#endif
613
614static int __devinit speedo_found1(struct pci_dev *pdev,
615 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
616{
617 struct net_device *dev;
618 struct speedo_private *sp;
619 const char *product;
620 int i, option;
621 u16 eeprom[0x100];
622 int size;
623 void *tx_ring_space;
624 dma_addr_t tx_ring_dma;
Joe Perches0795af52007-10-03 17:59:30 -0700625 DECLARE_MAC_BUF(mac);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
628 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
629 if (tx_ring_space == NULL)
630 return -1;
631
632 dev = alloc_etherdev(sizeof(struct speedo_private));
633 if (dev == NULL) {
634 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
635 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
636 return -1;
637 }
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 SET_NETDEV_DEV(dev, &pdev->dev);
640
641 if (dev->mem_start > 0)
642 option = dev->mem_start;
643 else if (card_idx >= 0 && options[card_idx] >= 0)
644 option = options[card_idx];
645 else
646 option = 0;
647
648 rtnl_lock();
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400649 if (dev_alloc_name(dev, dev->name) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 goto err_free_unlock;
651
652 /* Read the station address EEPROM before doing the reset.
653 Nominally his should even be done before accepting the device, but
654 then we wouldn't have a device name with which to report the error.
655 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
656 */
657 {
658 void __iomem *iobase;
659 int read_cmd, ee_size;
660 u16 sum;
661 int j;
662
663 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
664 requirements. */
665 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
666 if (!iobase)
667 goto err_free_unlock;
668 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
669 == 0xffe0000) {
670 ee_size = 0x100;
671 read_cmd = EE_READ_CMD << 24;
672 } else {
673 ee_size = 0x40;
674 read_cmd = EE_READ_CMD << 22;
675 }
676
677 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
678 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
679 eeprom[i] = value;
680 sum += value;
681 if (i < 3) {
682 dev->dev_addr[j++] = value;
683 dev->dev_addr[j++] = value >> 8;
684 }
685 }
686 if (sum != 0xBABA)
687 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
688 "check settings before activating this device!\n",
689 dev->name, sum);
690 /* Don't unregister_netdev(dev); as the EEPro may actually be
691 usable, especially if the MAC address is set later.
692 On the other hand, it may be unusable if MDI data is corrupted. */
693
694 pci_iounmap(pdev, iobase);
695 }
696
697 /* Reset the chip: stop Tx and Rx processes and clear counters.
698 This takes less than 10usec and will easily finish before the next
699 action. */
700 iowrite32(PortReset, ioaddr + SCBPort);
701 ioread32(ioaddr + SCBPort);
702 udelay(10);
703
704 if (eeprom[3] & 0x0100)
705 product = "OEM i82557/i82558 10/100 Ethernet";
706 else
707 product = pci_name(pdev);
708
Joe Perches0795af52007-10-03 17:59:30 -0700709 printk(KERN_INFO "%s: %s, %s, IRQ %d.\n", dev->name, product,
710 print_mac(mac, dev->dev_addr), pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 sp = netdev_priv(dev);
713
714 /* we must initialize this early, for mdio_{read,write} */
715 sp->regs = ioaddr;
716
717#if 1 || defined(kernel_bloat)
718 /* OK, this is pure kernel bloat. I don't like it when other drivers
719 waste non-pageable kernel space to emit similar messages, but I need
720 them for bug reports. */
721 {
722 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
723 /* The self-test results must be paragraph aligned. */
724 volatile s32 *self_test_results;
725 int boguscnt = 16000; /* Timeout for set-test. */
726 if ((eeprom[3] & 0x03) != 0x03)
727 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
728 " work-around.\n");
729 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
730 " connectors present:",
731 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
732 for (i = 0; i < 4; i++)
733 if (eeprom[5] & (1<<i))
734 printk(connectors[i]);
735 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
736 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
737 if (eeprom[7] & 0x0700)
738 printk(KERN_INFO " Secondary interface chip %s.\n",
739 phys[(eeprom[7]>>8)&7]);
740 if (((eeprom[6]>>8) & 0x3f) == DP83840
741 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
742 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
743 if (congenb)
744 mdi_reg23 |= 0x0100;
745 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
746 mdi_reg23);
747 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
748 }
749 if ((option >= 0) && (option & 0x70)) {
750 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
751 (option & 0x20 ? 100 : 10),
752 (option & 0x10 ? "full" : "half"));
753 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
754 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
755 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
756 }
757
758 /* Perform a system self-test. */
759 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
760 self_test_results[0] = 0;
761 self_test_results[1] = -1;
762 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
763 do {
764 udelay(10);
765 } while (self_test_results[1] == -1 && --boguscnt >= 0);
766
767 if (boguscnt < 0) { /* Test optimized out. */
768 printk(KERN_ERR "Self test failed, status %8.8x:\n"
769 KERN_ERR " Failure to initialize the i82557.\n"
770 KERN_ERR " Verify that the card is a bus-master"
771 " capable slot.\n",
772 self_test_results[1]);
773 } else
774 printk(KERN_INFO " General self-test: %s.\n"
775 KERN_INFO " Serial sub-system self-test: %s.\n"
776 KERN_INFO " Internal registers self-test: %s.\n"
777 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
778 self_test_results[1] & 0x1000 ? "failed" : "passed",
779 self_test_results[1] & 0x0020 ? "failed" : "passed",
780 self_test_results[1] & 0x0008 ? "failed" : "passed",
781 self_test_results[1] & 0x0004 ? "failed" : "passed",
782 self_test_results[0]);
783 }
784#endif /* kernel_bloat */
785
786 iowrite32(PortReset, ioaddr + SCBPort);
787 ioread32(ioaddr + SCBPort);
788 udelay(10);
789
790 /* Return the chip to its original power state. */
791 pci_set_power_state(pdev, acpi_idle_state);
792
793 pci_set_drvdata (pdev, dev);
794 SET_NETDEV_DEV(dev, &pdev->dev);
795
796 dev->irq = pdev->irq;
797
798 sp->pdev = pdev;
799 sp->msg_enable = DEBUG;
800 sp->acpi_pwr = acpi_idle_state;
801 sp->tx_ring = tx_ring_space;
802 sp->tx_ring_dma = tx_ring_dma;
803 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
804 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
805 init_timer(&sp->timer); /* used in ioctl() */
806 spin_lock_init(&sp->lock);
807
808 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
809 if (card_idx >= 0) {
810 if (full_duplex[card_idx] >= 0)
811 sp->mii_if.full_duplex = full_duplex[card_idx];
812 }
813 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
814
815 sp->phy[0] = eeprom[6];
816 sp->phy[1] = eeprom[7];
817
818 sp->mii_if.phy_id = eeprom[6] & 0x1f;
819 sp->mii_if.phy_id_mask = 0x1f;
820 sp->mii_if.reg_num_mask = 0x1f;
821 sp->mii_if.dev = dev;
822 sp->mii_if.mdio_read = mdio_read;
823 sp->mii_if.mdio_write = mdio_write;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400826 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
827 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 || (pdev->device == 0x245D)) {
829 sp->chip_id = 1;
830 }
831
832 if (sp->rx_bug)
833 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
834
835 /* The Speedo-specific entries in the device structure. */
836 dev->open = &speedo_open;
837 dev->hard_start_xmit = &speedo_start_xmit;
838 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
839 dev->stop = &speedo_close;
840 dev->get_stats = &speedo_get_stats;
841 dev->set_multicast_list = &set_rx_mode;
842 dev->do_ioctl = &speedo_ioctl;
843 SET_ETHTOOL_OPS(dev, &ethtool_ops);
844#ifdef CONFIG_NET_POLL_CONTROLLER
845 dev->poll_controller = &poll_speedo;
846#endif
847
848 if (register_netdevice(dev))
849 goto err_free_unlock;
850 rtnl_unlock();
851
852 return 0;
853
854 err_free_unlock:
855 rtnl_unlock();
856 free_netdev(dev);
857 return -1;
858}
859
860static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
861{
862 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
863 int wait = 0;
864 do
865 if (ioread8(cmd_ioaddr) == 0) break;
866 while(++wait <= 200);
867 if (wait > 100)
868 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
869 ioread8(cmd_ioaddr), wait);
870
871 iowrite8(cmd, cmd_ioaddr);
872
873 for (wait = 0; wait <= 100; wait++)
874 if (ioread8(cmd_ioaddr) == 0) return;
875 for (; wait <= 20000; wait++)
876 if (ioread8(cmd_ioaddr) == 0) return;
877 else udelay(1);
878 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
879 " Current status %8.8x.\n",
880 cmd, wait, ioread32(sp->regs + SCBStatus));
881}
882
883/* Serial EEPROM section.
884 A "bit" grungy, but we work our way through bit-by-bit :->. */
885/* EEPROM_Ctrl bits. */
886#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
887#define EE_CS 0x02 /* EEPROM chip select. */
888#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
889#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
890#define EE_ENB (0x4800 | EE_CS)
891#define EE_WRITE_0 0x4802
892#define EE_WRITE_1 0x4806
893#define EE_OFFSET SCBeeprom
894
895/* The fixes for the code were kindly provided by Dragan Stancevic
896 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
897 access timing.
898 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
899 interval for serial EEPROM. However, it looks like that there is an
900 additional requirement dictating larger udelay's in the code below.
901 2000/05/24 SAW */
902static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
903{
904 unsigned retval = 0;
905 void __iomem *ee_addr = ioaddr + SCBeeprom;
906
907 iowrite16(EE_ENB, ee_addr); udelay(2);
908 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
909
910 /* Shift the command bits out. */
911 do {
912 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
913 iowrite16(dataval, ee_addr); udelay(2);
914 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
915 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
916 } while (--cmd_len >= 0);
917 iowrite16(EE_ENB, ee_addr); udelay(2);
918
919 /* Terminate the EEPROM access. */
920 iowrite16(EE_ENB & ~EE_CS, ee_addr);
921 return retval;
922}
923
924static int mdio_read(struct net_device *dev, int phy_id, int location)
925{
926 struct speedo_private *sp = netdev_priv(dev);
927 void __iomem *ioaddr = sp->regs;
928 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
929 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
930 do {
931 val = ioread32(ioaddr + SCBCtrlMDI);
932 if (--boguscnt < 0) {
933 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
934 break;
935 }
936 } while (! (val & 0x10000000));
937 return val & 0xffff;
938}
939
940static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
941{
942 struct speedo_private *sp = netdev_priv(dev);
943 void __iomem *ioaddr = sp->regs;
944 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
945 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
946 ioaddr + SCBCtrlMDI);
947 do {
948 val = ioread32(ioaddr + SCBCtrlMDI);
949 if (--boguscnt < 0) {
950 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
951 break;
952 }
953 } while (! (val & 0x10000000));
954}
955
956static int
957speedo_open(struct net_device *dev)
958{
959 struct speedo_private *sp = netdev_priv(dev);
960 void __iomem *ioaddr = sp->regs;
961 int retval;
962
963 if (netif_msg_ifup(sp))
964 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
965
966 pci_set_power_state(sp->pdev, PCI_D0);
967
968 /* Set up the Tx queue early.. */
969 sp->cur_tx = 0;
970 sp->dirty_tx = 0;
971 sp->last_cmd = NULL;
972 sp->tx_full = 0;
973 sp->in_interrupt = 0;
974
975 /* .. we can safely take handler calls during init. */
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700976 retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (retval) {
978 return retval;
979 }
980
981 dev->if_port = sp->default_port;
982
983#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
984 /* Retrigger negotiation to reset previous errors. */
985 if ((sp->phy[0] & 0x8000) == 0) {
986 int phy_addr = sp->phy[0] & 0x1f ;
987 /* Use 0x3300 for restarting NWay, other values to force xcvr:
988 0x0000 10-HD
989 0x0100 10-FD
990 0x2000 100-HD
991 0x2100 100-FD
992 */
993#ifdef honor_default_port
994 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
995#else
996 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
997#endif
998 }
999#endif
1000
1001 speedo_init_rx_ring(dev);
1002
1003 /* Fire up the hardware. */
1004 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1005 speedo_resume(dev);
1006
1007 netdevice_start(dev);
1008 netif_start_queue(dev);
1009
1010 /* Setup the chip and configure the multicast list. */
1011 sp->mc_setup_head = NULL;
1012 sp->mc_setup_tail = NULL;
1013 sp->flow_ctrl = sp->partner = 0;
1014 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1015 set_rx_mode(dev);
1016 if ((sp->phy[0] & 0x8000) == 0)
1017 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1018
1019 mii_check_link(&sp->mii_if);
1020
1021 if (netif_msg_ifup(sp)) {
1022 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1023 dev->name, ioread16(ioaddr + SCBStatus));
1024 }
1025
1026 /* Set the timer. The timer serves a dual purpose:
1027 1) to monitor the media interface (e.g. link beat) and perhaps switch
1028 to an alternate media type
1029 2) to monitor Rx activity, and restart the Rx process if the receiver
1030 hangs. */
1031 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1032 sp->timer.data = (unsigned long)dev;
1033 sp->timer.function = &speedo_timer; /* timer handler */
1034 add_timer(&sp->timer);
1035
1036 /* No need to wait for the command unit to accept here. */
1037 if ((sp->phy[0] & 0x8000) == 0)
1038 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1039
1040 return 0;
1041}
1042
1043/* Start the chip hardware after a full reset. */
1044static void speedo_resume(struct net_device *dev)
1045{
1046 struct speedo_private *sp = netdev_priv(dev);
1047 void __iomem *ioaddr = sp->regs;
1048
1049 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1050 sp->tx_threshold = 0x01208000;
1051
1052 /* Set the segment registers to '0'. */
1053 if (wait_for_cmd_done(dev, sp) != 0) {
1054 iowrite32(PortPartialReset, ioaddr + SCBPort);
1055 udelay(10);
1056 }
1057
1058 iowrite32(0, ioaddr + SCBPointer);
1059 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1060 udelay(10); /* Bogus, but it avoids the bug. */
1061
1062 /* Note: these next two operations can take a while. */
1063 do_slow_command(dev, sp, RxAddrLoad);
1064 do_slow_command(dev, sp, CUCmdBase);
1065
1066 /* Load the statistics block and rx ring addresses. */
1067 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1068 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1069
1070 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1071 sp->lstats->done_marker = 0;
1072 wait_for_cmd_done(dev, sp);
1073
1074 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1075 if (netif_msg_rx_err(sp))
1076 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1077 dev->name);
1078 } else {
1079 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1080 ioaddr + SCBPointer);
1081 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1082 }
1083
1084 /* Note: RxStart should complete instantly. */
1085 do_slow_command(dev, sp, RxStart);
1086 do_slow_command(dev, sp, CUDumpStats);
1087
1088 /* Fill the first command with our physical address. */
1089 {
1090 struct descriptor *ias_cmd;
1091
1092 ias_cmd =
1093 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1094 /* Avoid a bug(?!) here by marking the command already completed. */
1095 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1096 ias_cmd->link =
1097 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1098 memcpy(ias_cmd->params, dev->dev_addr, 6);
1099 if (sp->last_cmd)
1100 clear_suspend(sp->last_cmd);
1101 sp->last_cmd = ias_cmd;
1102 }
1103
1104 /* Start the chip's Tx process and unmask interrupts. */
1105 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1106 ioaddr + SCBPointer);
1107 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1108 remain masked --Dragan */
1109 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1110}
1111
1112/*
1113 * Sometimes the receiver stops making progress. This routine knows how to
1114 * get it going again, without losing packets or being otherwise nasty like
1115 * a chip reset would be. Previously the driver had a whole sequence
1116 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1117 * do another, etc. But those things don't really matter. Separate logic
1118 * in the ISR provides for allocating buffers--the other half of operation
1119 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1120 * This problem with the old, more involved algorithm is shown up under
1121 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1122 */
1123static void
1124speedo_rx_soft_reset(struct net_device *dev)
1125{
1126 struct speedo_private *sp = netdev_priv(dev);
1127 struct RxFD *rfd;
1128 void __iomem *ioaddr;
1129
1130 ioaddr = sp->regs;
1131 if (wait_for_cmd_done(dev, sp) != 0) {
1132 printk("%s: previous command stalled\n", dev->name);
1133 return;
1134 }
1135 /*
1136 * Put the hardware into a known state.
1137 */
1138 iowrite8(RxAbort, ioaddr + SCBCmd);
1139
1140 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1141
1142 rfd->rx_buf_addr = 0xffffffff;
1143
1144 if (wait_for_cmd_done(dev, sp) != 0) {
1145 printk("%s: RxAbort command stalled\n", dev->name);
1146 return;
1147 }
1148 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1149 ioaddr + SCBPointer);
1150 iowrite8(RxStart, ioaddr + SCBCmd);
1151}
1152
1153
1154/* Media monitoring and control. */
1155static void speedo_timer(unsigned long data)
1156{
1157 struct net_device *dev = (struct net_device *)data;
1158 struct speedo_private *sp = netdev_priv(dev);
1159 void __iomem *ioaddr = sp->regs;
1160 int phy_num = sp->phy[0] & 0x1f;
1161
1162 /* We have MII and lost link beat. */
1163 if ((sp->phy[0] & 0x8000) == 0) {
1164 int partner = mdio_read(dev, phy_num, MII_LPA);
1165 if (partner != sp->partner) {
1166 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1167 if (netif_msg_link(sp)) {
1168 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1169 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1170 dev->name, sp->partner, partner, sp->mii_if.advertising);
1171 }
1172 sp->partner = partner;
1173 if (flow_ctrl != sp->flow_ctrl) {
1174 sp->flow_ctrl = flow_ctrl;
1175 sp->rx_mode = -1; /* Trigger a reload. */
1176 }
1177 }
1178 }
1179 mii_check_link(&sp->mii_if);
1180 if (netif_msg_timer(sp)) {
1181 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1182 dev->name, ioread16(ioaddr + SCBStatus));
1183 }
1184 if (sp->rx_mode < 0 ||
1185 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1186 /* We haven't received a packet in a Long Time. We might have been
1187 bitten by the receiver hang bug. This can be cleared by sending
1188 a set multicast list command. */
1189 if (netif_msg_timer(sp))
1190 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1191 " from a timer routine,"
1192 " m=%d, j=%ld, l=%ld.\n",
1193 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1194 set_rx_mode(dev);
1195 }
1196 /* We must continue to monitor the media. */
1197 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1198 add_timer(&sp->timer);
1199}
1200
1201static void speedo_show_state(struct net_device *dev)
1202{
1203 struct speedo_private *sp = netdev_priv(dev);
1204 int i;
1205
1206 if (netif_msg_pktdata(sp)) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001207 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 dev->name, sp->cur_tx, sp->dirty_tx);
1209 for (i = 0; i < TX_RING_SIZE; i++)
1210 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1211 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1212 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1213 i, sp->tx_ring[i].status);
1214
1215 printk(KERN_DEBUG "%s: Printing Rx ring"
1216 " (next to receive into %u, dirty index %u).\n",
1217 dev->name, sp->cur_rx, sp->dirty_rx);
1218 for (i = 0; i < RX_RING_SIZE; i++)
1219 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1220 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1221 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1222 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1223 i, (sp->rx_ringp[i] != NULL) ?
1224 (unsigned)sp->rx_ringp[i]->status : 0);
1225 }
1226
1227#if 0
1228 {
1229 void __iomem *ioaddr = sp->regs;
1230 int phy_num = sp->phy[0] & 0x1f;
1231 for (i = 0; i < 16; i++) {
1232 /* FIXME: what does it mean? --SAW */
1233 if (i == 6) i = 21;
1234 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1235 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1236 }
1237 }
1238#endif
1239
1240}
1241
1242/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1243static void
1244speedo_init_rx_ring(struct net_device *dev)
1245{
1246 struct speedo_private *sp = netdev_priv(dev);
1247 struct RxFD *rxf, *last_rxf = NULL;
1248 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1249 int i;
1250
1251 sp->cur_rx = 0;
1252
1253 for (i = 0; i < RX_RING_SIZE; i++) {
1254 struct sk_buff *skb;
1255 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
Jeff Garzik9f7f0092005-08-19 03:52:49 -04001256 if (skb)
1257 rx_align(skb); /* Align IP on 16 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 sp->rx_skbuff[i] = skb;
1259 if (skb == NULL)
1260 break; /* OK. Just initially short of Rx bufs. */
1261 skb->dev = dev; /* Mark as being used by this device. */
David S. Miller689be432005-06-28 15:25:31 -07001262 rxf = (struct RxFD *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 sp->rx_ringp[i] = rxf;
1264 sp->rx_ring_dma[i] =
1265 pci_map_single(sp->pdev, rxf,
1266 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1267 skb_reserve(skb, sizeof(struct RxFD));
1268 if (last_rxf) {
1269 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1270 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1271 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1272 }
1273 last_rxf = rxf;
1274 last_rxf_dma = sp->rx_ring_dma[i];
1275 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1276 rxf->link = 0; /* None yet. */
1277 /* This field unused by i82557. */
1278 rxf->rx_buf_addr = 0xffffffff;
1279 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1280 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1281 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1282 }
1283 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1284 /* Mark the last entry as end-of-list. */
1285 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1286 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1287 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1288 sp->last_rxf = last_rxf;
1289 sp->last_rxf_dma = last_rxf_dma;
1290}
1291
1292static void speedo_purge_tx(struct net_device *dev)
1293{
1294 struct speedo_private *sp = netdev_priv(dev);
1295 int entry;
1296
1297 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1298 entry = sp->dirty_tx % TX_RING_SIZE;
1299 if (sp->tx_skbuff[entry]) {
1300 sp->stats.tx_errors++;
1301 pci_unmap_single(sp->pdev,
1302 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1303 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1304 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1305 sp->tx_skbuff[entry] = NULL;
1306 }
1307 sp->dirty_tx++;
1308 }
1309 while (sp->mc_setup_head != NULL) {
1310 struct speedo_mc_block *t;
1311 if (netif_msg_tx_err(sp))
1312 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1313 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1314 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1315 t = sp->mc_setup_head->next;
1316 kfree(sp->mc_setup_head);
1317 sp->mc_setup_head = t;
1318 }
1319 sp->mc_setup_tail = NULL;
1320 sp->tx_full = 0;
1321 netif_wake_queue(dev);
1322}
1323
1324static void reset_mii(struct net_device *dev)
1325{
1326 struct speedo_private *sp = netdev_priv(dev);
1327
1328 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1329 if ((sp->phy[0] & 0x8000) == 0) {
1330 int phy_addr = sp->phy[0] & 0x1f;
1331 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1332 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1333 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1334 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1335 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1336 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1337#ifdef honor_default_port
1338 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1339#else
1340 mdio_read(dev, phy_addr, MII_BMCR);
1341 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1342 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1343#endif
1344 }
1345}
1346
1347static void speedo_tx_timeout(struct net_device *dev)
1348{
1349 struct speedo_private *sp = netdev_priv(dev);
1350 void __iomem *ioaddr = sp->regs;
1351 int status = ioread16(ioaddr + SCBStatus);
1352 unsigned long flags;
1353
1354 if (netif_msg_tx_err(sp)) {
1355 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1356 " %4.4x at %d/%d command %8.8x.\n",
1357 dev->name, status, ioread16(ioaddr + SCBCmd),
1358 sp->dirty_tx, sp->cur_tx,
1359 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1360
1361 }
1362 speedo_show_state(dev);
1363#if 0
1364 if ((status & 0x00C0) != 0x0080
1365 && (status & 0x003C) == 0x0010) {
1366 /* Only the command unit has stopped. */
1367 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1368 dev->name);
1369 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1370 ioaddr + SCBPointer);
1371 iowrite16(CUStart, ioaddr + SCBCmd);
1372 reset_mii(dev);
1373 } else {
1374#else
1375 {
1376#endif
1377 del_timer_sync(&sp->timer);
1378 /* Reset the Tx and Rx units. */
1379 iowrite32(PortReset, ioaddr + SCBPort);
1380 /* We may get spurious interrupts here. But I don't think that they
1381 may do much harm. 1999/12/09 SAW */
1382 udelay(10);
1383 /* Disable interrupts. */
1384 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1385 synchronize_irq(dev->irq);
1386 speedo_tx_buffer_gc(dev);
1387 /* Free as much as possible.
1388 It helps to recover from a hang because of out-of-memory.
1389 It also simplifies speedo_resume() in case TX ring is full or
1390 close-to-be full. */
1391 speedo_purge_tx(dev);
1392 speedo_refill_rx_buffers(dev, 1);
1393 spin_lock_irqsave(&sp->lock, flags);
1394 speedo_resume(dev);
1395 sp->rx_mode = -1;
1396 dev->trans_start = jiffies;
1397 spin_unlock_irqrestore(&sp->lock, flags);
1398 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1399 /* Reset MII transceiver. Do it before starting the timer to serialize
1400 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1401 reset_mii(dev);
1402 sp->timer.expires = RUN_AT(2*HZ);
1403 add_timer(&sp->timer);
1404 }
1405 return;
1406}
1407
1408static int
1409speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1410{
1411 struct speedo_private *sp = netdev_priv(dev);
1412 void __iomem *ioaddr = sp->regs;
1413 int entry;
1414
1415 /* Prevent interrupts from changing the Tx ring from underneath us. */
1416 unsigned long flags;
1417
1418 spin_lock_irqsave(&sp->lock, flags);
1419
1420 /* Check if there are enough space. */
1421 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1422 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1423 netif_stop_queue(dev);
1424 sp->tx_full = 1;
1425 spin_unlock_irqrestore(&sp->lock, flags);
1426 return 1;
1427 }
1428
1429 /* Calculate the Tx descriptor entry. */
1430 entry = sp->cur_tx++ % TX_RING_SIZE;
1431
1432 sp->tx_skbuff[entry] = skb;
1433 sp->tx_ring[entry].status =
1434 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1435 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1436 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1437 sp->tx_ring[entry].link =
1438 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1439 sp->tx_ring[entry].tx_desc_addr =
1440 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1441 /* The data region is always in one buffer descriptor. */
1442 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1443 sp->tx_ring[entry].tx_buf_addr0 =
1444 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1445 skb->len, PCI_DMA_TODEVICE));
1446 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1447
1448 /* workaround for hardware bug on 10 mbit half duplex */
1449
1450 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1451 wait_for_cmd_done(dev, sp);
1452 iowrite8(0 , ioaddr + SCBCmd);
1453 udelay(1);
1454 }
1455
1456 /* Trigger the command unit resume. */
1457 wait_for_cmd_done(dev, sp);
1458 clear_suspend(sp->last_cmd);
1459 /* We want the time window between clearing suspend flag on the previous
1460 command and resuming CU to be as small as possible.
1461 Interrupts in between are very undesired. --SAW */
1462 iowrite8(CUResume, ioaddr + SCBCmd);
1463 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1464
1465 /* Leave room for set_rx_mode(). If there is no more space than reserved
1466 for multicast filter mark the ring as full. */
1467 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1468 netif_stop_queue(dev);
1469 sp->tx_full = 1;
1470 }
1471
1472 spin_unlock_irqrestore(&sp->lock, flags);
1473
1474 dev->trans_start = jiffies;
1475
1476 return 0;
1477}
1478
1479static void speedo_tx_buffer_gc(struct net_device *dev)
1480{
1481 unsigned int dirty_tx;
1482 struct speedo_private *sp = netdev_priv(dev);
1483
1484 dirty_tx = sp->dirty_tx;
1485 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1486 int entry = dirty_tx % TX_RING_SIZE;
1487 int status = le32_to_cpu(sp->tx_ring[entry].status);
1488
1489 if (netif_msg_tx_done(sp))
1490 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1491 entry, status);
1492 if ((status & StatusComplete) == 0)
1493 break; /* It still hasn't been processed. */
1494 if (status & TxUnderrun)
1495 if (sp->tx_threshold < 0x01e08000) {
1496 if (netif_msg_tx_err(sp))
1497 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1498 dev->name);
1499 sp->tx_threshold += 0x00040000;
1500 }
1501 /* Free the original skb. */
1502 if (sp->tx_skbuff[entry]) {
1503 sp->stats.tx_packets++; /* Count only user packets. */
1504 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1505 pci_unmap_single(sp->pdev,
1506 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1507 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1508 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1509 sp->tx_skbuff[entry] = NULL;
1510 }
1511 dirty_tx++;
1512 }
1513
1514 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1515 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1516 " full=%d.\n",
1517 dirty_tx, sp->cur_tx, sp->tx_full);
1518 dirty_tx += TX_RING_SIZE;
1519 }
1520
1521 while (sp->mc_setup_head != NULL
1522 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1523 struct speedo_mc_block *t;
1524 if (netif_msg_tx_err(sp))
1525 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1526 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1527 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1528 t = sp->mc_setup_head->next;
1529 kfree(sp->mc_setup_head);
1530 sp->mc_setup_head = t;
1531 }
1532 if (sp->mc_setup_head == NULL)
1533 sp->mc_setup_tail = NULL;
1534
1535 sp->dirty_tx = dirty_tx;
1536}
1537
1538/* The interrupt handler does all of the Rx thread work and cleans up
1539 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001540static irqreturn_t speedo_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541{
1542 struct net_device *dev = (struct net_device *)dev_instance;
1543 struct speedo_private *sp;
1544 void __iomem *ioaddr;
1545 long boguscnt = max_interrupt_work;
1546 unsigned short status;
1547 unsigned int handled = 0;
1548
1549 sp = netdev_priv(dev);
1550 ioaddr = sp->regs;
1551
1552#ifndef final_version
1553 /* A lock to prevent simultaneous entry on SMP machines. */
1554 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1555 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1556 dev->name);
1557 sp->in_interrupt = 0; /* Avoid halting machine. */
1558 return IRQ_NONE;
1559 }
1560#endif
1561
1562 do {
1563 status = ioread16(ioaddr + SCBStatus);
1564 /* Acknowledge all of the current interrupt sources ASAP. */
1565 /* Will change from 0xfc00 to 0xff00 when we start handling
1566 FCP and ER interrupts --Dragan */
1567 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1568
1569 if (netif_msg_intr(sp))
1570 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1571 dev->name, status);
1572
1573 if ((status & 0xfc00) == 0)
1574 break;
1575 handled = 1;
1576
1577
1578 if ((status & 0x5000) || /* Packet received, or Rx error. */
1579 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1580 /* Need to gather the postponed packet. */
1581 speedo_rx(dev);
1582
1583 /* Always check if all rx buffers are allocated. --SAW */
1584 speedo_refill_rx_buffers(dev, 0);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 spin_lock(&sp->lock);
1587 /*
1588 * The chip may have suspended reception for various reasons.
1589 * Check for that, and re-prime it should this be the case.
1590 */
1591 switch ((status >> 2) & 0xf) {
1592 case 0: /* Idle */
1593 break;
1594 case 1: /* Suspended */
1595 case 2: /* No resources (RxFDs) */
1596 case 9: /* Suspended with no more RBDs */
1597 case 10: /* No resources due to no RBDs */
1598 case 12: /* Ready with no RBDs */
1599 speedo_rx_soft_reset(dev);
1600 break;
1601 case 3: case 5: case 6: case 7: case 8:
1602 case 11: case 13: case 14: case 15:
1603 /* these are all reserved values */
1604 break;
1605 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001606
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1609 if (status & 0xA400) {
1610 speedo_tx_buffer_gc(dev);
1611 if (sp->tx_full
1612 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1613 /* The ring is no longer full. */
1614 sp->tx_full = 0;
1615 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1616 }
1617 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 spin_unlock(&sp->lock);
1620
1621 if (--boguscnt < 0) {
1622 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1623 dev->name, status);
1624 /* Clear all interrupt sources. */
1625 /* Will change from 0xfc00 to 0xff00 when we start handling
1626 FCP and ER interrupts --Dragan */
1627 iowrite16(0xfc00, ioaddr + SCBStatus);
1628 break;
1629 }
1630 } while (1);
1631
1632 if (netif_msg_intr(sp))
1633 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1634 dev->name, ioread16(ioaddr + SCBStatus));
1635
1636 clear_bit(0, (void*)&sp->in_interrupt);
1637 return IRQ_RETVAL(handled);
1638}
1639
1640static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1641{
1642 struct speedo_private *sp = netdev_priv(dev);
1643 struct RxFD *rxf;
1644 struct sk_buff *skb;
1645 /* Get a fresh skbuff to replace the consumed one. */
1646 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
Jeff Garzik9f7f0092005-08-19 03:52:49 -04001647 if (skb)
1648 rx_align(skb); /* Align IP on 16 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 sp->rx_skbuff[entry] = skb;
1650 if (skb == NULL) {
1651 sp->rx_ringp[entry] = NULL;
1652 return NULL;
1653 }
David S. Miller689be432005-06-28 15:25:31 -07001654 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 sp->rx_ring_dma[entry] =
1656 pci_map_single(sp->pdev, rxf,
1657 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1658 skb->dev = dev;
1659 skb_reserve(skb, sizeof(struct RxFD));
1660 rxf->rx_buf_addr = 0xffffffff;
1661 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1662 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1663 return rxf;
1664}
1665
1666static inline void speedo_rx_link(struct net_device *dev, int entry,
1667 struct RxFD *rxf, dma_addr_t rxf_dma)
1668{
1669 struct speedo_private *sp = netdev_priv(dev);
1670 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1671 rxf->link = 0; /* None yet. */
1672 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1673 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1674 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1675 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1676 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1677 sp->last_rxf = rxf;
1678 sp->last_rxf_dma = rxf_dma;
1679}
1680
1681static int speedo_refill_rx_buf(struct net_device *dev, int force)
1682{
1683 struct speedo_private *sp = netdev_priv(dev);
1684 int entry;
1685 struct RxFD *rxf;
1686
1687 entry = sp->dirty_rx % RX_RING_SIZE;
1688 if (sp->rx_skbuff[entry] == NULL) {
1689 rxf = speedo_rx_alloc(dev, entry);
1690 if (rxf == NULL) {
1691 unsigned int forw;
1692 int forw_entry;
1693 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1694 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1695 dev->name, force);
1696 sp->rx_ring_state |= RrOOMReported;
1697 }
1698 speedo_show_state(dev);
1699 if (!force)
1700 return -1; /* Better luck next time! */
1701 /* Borrow an skb from one of next entries. */
1702 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1703 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1704 break;
1705 if (forw == sp->cur_rx)
1706 return -1;
1707 forw_entry = forw % RX_RING_SIZE;
1708 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1709 sp->rx_skbuff[forw_entry] = NULL;
1710 rxf = sp->rx_ringp[forw_entry];
1711 sp->rx_ringp[forw_entry] = NULL;
1712 sp->rx_ringp[entry] = rxf;
1713 }
1714 } else {
1715 rxf = sp->rx_ringp[entry];
1716 }
1717 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1718 sp->dirty_rx++;
1719 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1720 return 0;
1721}
1722
1723static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1724{
1725 struct speedo_private *sp = netdev_priv(dev);
1726
1727 /* Refill the RX ring. */
1728 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1729 speedo_refill_rx_buf(dev, force) != -1);
1730}
1731
1732static int
1733speedo_rx(struct net_device *dev)
1734{
1735 struct speedo_private *sp = netdev_priv(dev);
1736 int entry = sp->cur_rx % RX_RING_SIZE;
1737 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1738 int alloc_ok = 1;
1739 int npkts = 0;
1740
1741 if (netif_msg_intr(sp))
1742 printk(KERN_DEBUG " In speedo_rx().\n");
1743 /* If we own the next entry, it's a new packet. Send it up. */
1744 while (sp->rx_ringp[entry] != NULL) {
1745 int status;
1746 int pkt_len;
1747
1748 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1749 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1750 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1751 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1752
1753 if (!(status & RxComplete))
1754 break;
1755
1756 if (--rx_work_limit < 0)
1757 break;
1758
1759 /* Check for a rare out-of-memory case: the current buffer is
1760 the last buffer allocated in the RX ring. --SAW */
1761 if (sp->last_rxf == sp->rx_ringp[entry]) {
1762 /* Postpone the packet. It'll be reaped at an interrupt when this
1763 packet is no longer the last packet in the ring. */
1764 if (netif_msg_rx_err(sp))
1765 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1766 dev->name);
1767 sp->rx_ring_state |= RrPostponed;
1768 break;
1769 }
1770
1771 if (netif_msg_rx_status(sp))
1772 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1773 pkt_len);
1774 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1775 if (status & RxErrTooBig)
1776 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1777 "status %8.8x!\n", dev->name, status);
1778 else if (! (status & RxOK)) {
1779 /* There was a fatal error. This *should* be impossible. */
1780 sp->stats.rx_errors++;
1781 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1782 "status %8.8x.\n",
1783 dev->name, status);
1784 }
1785 } else {
1786 struct sk_buff *skb;
1787
1788 /* Check if the packet is long enough to just accept without
1789 copying to a properly sized skbuff. */
1790 if (pkt_len < rx_copybreak
1791 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1793 /* 'skb_put()' points to the start of sk_buff data area. */
1794 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1795 sizeof(struct RxFD) + pkt_len,
1796 PCI_DMA_FROMDEVICE);
1797
1798#if 1 || USE_IP_CSUM
1799 /* Packet is in one chunk -- we can copy + cksum. */
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001800 skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 skb_put(skb, pkt_len);
1802#else
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001803 skb_copy_from_linear_data(sp->rx_skbuff[entry],
1804 skb_put(skb, pkt_len),
1805 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806#endif
1807 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1808 sizeof(struct RxFD) + pkt_len,
1809 PCI_DMA_FROMDEVICE);
1810 npkts++;
1811 } else {
1812 /* Pass up the already-filled skbuff. */
1813 skb = sp->rx_skbuff[entry];
1814 if (skb == NULL) {
1815 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1816 dev->name);
1817 break;
1818 }
1819 sp->rx_skbuff[entry] = NULL;
1820 skb_put(skb, pkt_len);
1821 npkts++;
1822 sp->rx_ringp[entry] = NULL;
1823 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1824 PKT_BUF_SZ + sizeof(struct RxFD),
1825 PCI_DMA_FROMDEVICE);
1826 }
1827 skb->protocol = eth_type_trans(skb, dev);
1828 netif_rx(skb);
1829 dev->last_rx = jiffies;
1830 sp->stats.rx_packets++;
1831 sp->stats.rx_bytes += pkt_len;
1832 }
1833 entry = (++sp->cur_rx) % RX_RING_SIZE;
1834 sp->rx_ring_state &= ~RrPostponed;
1835 /* Refill the recently taken buffers.
1836 Do it one-by-one to handle traffic bursts better. */
1837 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1838 alloc_ok = 0;
1839 }
1840
1841 /* Try hard to refill the recently taken buffers. */
1842 speedo_refill_rx_buffers(dev, 1);
1843
1844 if (npkts)
1845 sp->last_rx_time = jiffies;
1846
1847 return 0;
1848}
1849
1850static int
1851speedo_close(struct net_device *dev)
1852{
1853 struct speedo_private *sp = netdev_priv(dev);
1854 void __iomem *ioaddr = sp->regs;
1855 int i;
1856
1857 netdevice_stop(dev);
1858 netif_stop_queue(dev);
1859
1860 if (netif_msg_ifdown(sp))
1861 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1862 dev->name, ioread16(ioaddr + SCBStatus));
1863
1864 /* Shut off the media monitoring timer. */
1865 del_timer_sync(&sp->timer);
1866
1867 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1868
1869 /* Shutting down the chip nicely fails to disable flow control. So.. */
1870 iowrite32(PortPartialReset, ioaddr + SCBPort);
1871 ioread32(ioaddr + SCBPort); /* flush posted write */
1872 /*
1873 * The chip requires a 10 microsecond quiet period. Wait here!
1874 */
1875 udelay(10);
1876
1877 free_irq(dev->irq, dev);
1878 speedo_show_state(dev);
1879
1880 /* Free all the skbuffs in the Rx and Tx queues. */
1881 for (i = 0; i < RX_RING_SIZE; i++) {
1882 struct sk_buff *skb = sp->rx_skbuff[i];
1883 sp->rx_skbuff[i] = NULL;
1884 /* Clear the Rx descriptors. */
1885 if (skb) {
1886 pci_unmap_single(sp->pdev,
1887 sp->rx_ring_dma[i],
1888 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1889 dev_kfree_skb(skb);
1890 }
1891 }
1892
1893 for (i = 0; i < TX_RING_SIZE; i++) {
1894 struct sk_buff *skb = sp->tx_skbuff[i];
1895 sp->tx_skbuff[i] = NULL;
1896 /* Clear the Tx descriptors. */
1897 if (skb) {
1898 pci_unmap_single(sp->pdev,
1899 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1900 skb->len, PCI_DMA_TODEVICE);
1901 dev_kfree_skb(skb);
1902 }
1903 }
1904
1905 /* Free multicast setting blocks. */
1906 for (i = 0; sp->mc_setup_head != NULL; i++) {
1907 struct speedo_mc_block *t;
1908 t = sp->mc_setup_head->next;
1909 kfree(sp->mc_setup_head);
1910 sp->mc_setup_head = t;
1911 }
1912 sp->mc_setup_tail = NULL;
1913 if (netif_msg_ifdown(sp))
1914 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1915
1916 pci_set_power_state(sp->pdev, PCI_D2);
1917
1918 return 0;
1919}
1920
1921/* The Speedo-3 has an especially awkward and unusable method of getting
1922 statistics out of the chip. It takes an unpredictable length of time
1923 for the dump-stats command to complete. To avoid a busy-wait loop we
1924 update the stats with the previous dump results, and then trigger a
1925 new dump.
1926
1927 Oh, and incoming frames are dropped while executing dump-stats!
1928 */
1929static struct net_device_stats *
1930speedo_get_stats(struct net_device *dev)
1931{
1932 struct speedo_private *sp = netdev_priv(dev);
1933 void __iomem *ioaddr = sp->regs;
1934
1935 /* Update only if the previous dump finished. */
1936 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1937 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1938 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1939 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1940 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1941 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1942 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1943 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1944 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1945 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1946 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1947 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1948 sp->lstats->done_marker = 0x0000;
1949 if (netif_running(dev)) {
1950 unsigned long flags;
1951 /* Take a spinlock to make wait_for_cmd_done and sending the
1952 command atomic. --SAW */
1953 spin_lock_irqsave(&sp->lock, flags);
1954 wait_for_cmd_done(dev, sp);
1955 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1956 spin_unlock_irqrestore(&sp->lock, flags);
1957 }
1958 }
1959 return &sp->stats;
1960}
1961
1962static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1963{
1964 struct speedo_private *sp = netdev_priv(dev);
1965 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1966 strncpy(info->version, version, sizeof(info->version)-1);
1967 if (sp->pdev)
1968 strcpy(info->bus_info, pci_name(sp->pdev));
1969}
1970
1971static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1972{
1973 struct speedo_private *sp = netdev_priv(dev);
1974 spin_lock_irq(&sp->lock);
1975 mii_ethtool_gset(&sp->mii_if, ecmd);
1976 spin_unlock_irq(&sp->lock);
1977 return 0;
1978}
1979
1980static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1981{
1982 struct speedo_private *sp = netdev_priv(dev);
1983 int res;
1984 spin_lock_irq(&sp->lock);
1985 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1986 spin_unlock_irq(&sp->lock);
1987 return res;
1988}
1989
1990static int speedo_nway_reset(struct net_device *dev)
1991{
1992 struct speedo_private *sp = netdev_priv(dev);
1993 return mii_nway_restart(&sp->mii_if);
1994}
1995
1996static u32 speedo_get_link(struct net_device *dev)
1997{
1998 struct speedo_private *sp = netdev_priv(dev);
1999 return mii_link_ok(&sp->mii_if);
2000}
2001
2002static u32 speedo_get_msglevel(struct net_device *dev)
2003{
2004 struct speedo_private *sp = netdev_priv(dev);
2005 return sp->msg_enable;
2006}
2007
2008static void speedo_set_msglevel(struct net_device *dev, u32 v)
2009{
2010 struct speedo_private *sp = netdev_priv(dev);
2011 sp->msg_enable = v;
2012}
2013
Jeff Garzik7282d492006-09-13 14:30:00 -04002014static const struct ethtool_ops ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 .get_drvinfo = speedo_get_drvinfo,
2016 .get_settings = speedo_get_settings,
2017 .set_settings = speedo_set_settings,
2018 .nway_reset = speedo_nway_reset,
2019 .get_link = speedo_get_link,
2020 .get_msglevel = speedo_get_msglevel,
2021 .set_msglevel = speedo_set_msglevel,
2022};
2023
2024static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2025{
2026 struct speedo_private *sp = netdev_priv(dev);
2027 struct mii_ioctl_data *data = if_mii(rq);
2028 int phy = sp->phy[0] & 0x1f;
2029 int saved_acpi;
2030 int t;
2031
2032 switch(cmd) {
2033 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2034 data->phy_id = phy;
2035
2036 case SIOCGMIIREG: /* Read MII PHY register. */
2037 /* FIXME: these operations need to be serialized with MDIO
2038 access from the timeout handler.
2039 They are currently serialized only with MDIO access from the
2040 timer routine. 2000/05/09 SAW */
2041 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2042 t = del_timer_sync(&sp->timer);
2043 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2044 if (t)
2045 add_timer(&sp->timer); /* may be set to the past --SAW */
2046 pci_set_power_state(sp->pdev, saved_acpi);
2047 return 0;
2048
2049 case SIOCSMIIREG: /* Write MII PHY register. */
2050 if (!capable(CAP_NET_ADMIN))
2051 return -EPERM;
2052 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2053 t = del_timer_sync(&sp->timer);
2054 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2055 if (t)
2056 add_timer(&sp->timer); /* may be set to the past --SAW */
2057 pci_set_power_state(sp->pdev, saved_acpi);
2058 return 0;
2059 default:
2060 return -EOPNOTSUPP;
2061 }
2062}
2063
2064/* Set or clear the multicast filter for this adaptor.
2065 This is very ugly with Intel chips -- we usually have to execute an
2066 entire configuration command, plus process a multicast command.
2067 This is complicated. We must put a large configuration command and
2068 an arbitrarily-sized multicast command in the transmit list.
2069 To minimize the disruption -- the previous command might have already
2070 loaded the link -- we convert the current command block, normally a Tx
2071 command, into a no-op and link it to the new command.
2072*/
2073static void set_rx_mode(struct net_device *dev)
2074{
2075 struct speedo_private *sp = netdev_priv(dev);
2076 void __iomem *ioaddr = sp->regs;
2077 struct descriptor *last_cmd;
2078 char new_rx_mode;
2079 unsigned long flags;
2080 int entry, i;
2081
2082 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2083 new_rx_mode = 3;
2084 } else if ((dev->flags & IFF_ALLMULTI) ||
2085 dev->mc_count > multicast_filter_limit) {
2086 new_rx_mode = 1;
2087 } else
2088 new_rx_mode = 0;
2089
2090 if (netif_msg_rx_status(sp))
2091 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2092 sp->rx_mode, new_rx_mode);
2093
2094 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2095 /* The Tx ring is full -- don't add anything! Hope the mode will be
2096 * set again later. */
2097 sp->rx_mode = -1;
2098 return;
2099 }
2100
2101 if (new_rx_mode != sp->rx_mode) {
2102 u8 *config_cmd_data;
2103
2104 spin_lock_irqsave(&sp->lock, flags);
2105 entry = sp->cur_tx++ % TX_RING_SIZE;
2106 last_cmd = sp->last_cmd;
2107 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2108
2109 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2110 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2111 sp->tx_ring[entry].link =
2112 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2113 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2114 /* Construct a full CmdConfig frame. */
2115 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2116 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2117 config_cmd_data[4] = rxdmacount;
2118 config_cmd_data[5] = txdmacount + 0x80;
2119 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2120 /* 0x80 doesn't disable FC 0x84 does.
2121 Disable Flow control since we are not ACK-ing any FC interrupts
2122 for now. --Dragan */
2123 config_cmd_data[19] = 0x84;
2124 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2125 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2126 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2127 config_cmd_data[15] |= 0x80;
2128 config_cmd_data[8] = 0;
2129 }
2130 /* Trigger the command unit resume. */
2131 wait_for_cmd_done(dev, sp);
2132 clear_suspend(last_cmd);
2133 iowrite8(CUResume, ioaddr + SCBCmd);
2134 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2135 netif_stop_queue(dev);
2136 sp->tx_full = 1;
2137 }
2138 spin_unlock_irqrestore(&sp->lock, flags);
2139 }
2140
2141 if (new_rx_mode == 0 && dev->mc_count < 4) {
2142 /* The simple case of 0-3 multicast list entries occurs often, and
2143 fits within one tx_ring[] entry. */
2144 struct dev_mc_list *mclist;
2145 u16 *setup_params, *eaddrs;
2146
2147 spin_lock_irqsave(&sp->lock, flags);
2148 entry = sp->cur_tx++ % TX_RING_SIZE;
2149 last_cmd = sp->last_cmd;
2150 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2151
2152 sp->tx_skbuff[entry] = NULL;
2153 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2154 sp->tx_ring[entry].link =
2155 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2156 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2157 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2158 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2159 /* Fill in the multicast addresses. */
2160 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2161 i++, mclist = mclist->next) {
2162 eaddrs = (u16 *)mclist->dmi_addr;
2163 *setup_params++ = *eaddrs++;
2164 *setup_params++ = *eaddrs++;
2165 *setup_params++ = *eaddrs++;
2166 }
2167
2168 wait_for_cmd_done(dev, sp);
2169 clear_suspend(last_cmd);
2170 /* Immediately trigger the command unit resume. */
2171 iowrite8(CUResume, ioaddr + SCBCmd);
2172
2173 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2174 netif_stop_queue(dev);
2175 sp->tx_full = 1;
2176 }
2177 spin_unlock_irqrestore(&sp->lock, flags);
2178 } else if (new_rx_mode == 0) {
2179 struct dev_mc_list *mclist;
2180 u16 *setup_params, *eaddrs;
2181 struct speedo_mc_block *mc_blk;
2182 struct descriptor *mc_setup_frm;
2183 int i;
2184
2185 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2186 GFP_ATOMIC);
2187 if (mc_blk == NULL) {
2188 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2189 dev->name);
2190 sp->rx_mode = -1; /* We failed, try again. */
2191 return;
2192 }
2193 mc_blk->next = NULL;
2194 mc_blk->len = 2 + multicast_filter_limit*6;
2195 mc_blk->frame_dma =
2196 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2197 PCI_DMA_TODEVICE);
2198 mc_setup_frm = &mc_blk->frame;
2199
2200 /* Fill the setup frame. */
2201 if (netif_msg_ifup(sp))
2202 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2203 dev->name, mc_setup_frm);
2204 mc_setup_frm->cmd_status =
2205 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2206 /* Link set below. */
2207 setup_params = (u16 *)&mc_setup_frm->params;
2208 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2209 /* Fill in the multicast addresses. */
2210 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2211 i++, mclist = mclist->next) {
2212 eaddrs = (u16 *)mclist->dmi_addr;
2213 *setup_params++ = *eaddrs++;
2214 *setup_params++ = *eaddrs++;
2215 *setup_params++ = *eaddrs++;
2216 }
2217
2218 /* Disable interrupts while playing with the Tx Cmd list. */
2219 spin_lock_irqsave(&sp->lock, flags);
2220
2221 if (sp->mc_setup_tail)
2222 sp->mc_setup_tail->next = mc_blk;
2223 else
2224 sp->mc_setup_head = mc_blk;
2225 sp->mc_setup_tail = mc_blk;
2226 mc_blk->tx = sp->cur_tx;
2227
2228 entry = sp->cur_tx++ % TX_RING_SIZE;
2229 last_cmd = sp->last_cmd;
2230 sp->last_cmd = mc_setup_frm;
2231
2232 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2233 sp->tx_skbuff[entry] = NULL;
2234 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2235 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2236
2237 /* Set the link in the setup frame. */
2238 mc_setup_frm->link =
2239 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2240
2241 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2242 mc_blk->len, PCI_DMA_TODEVICE);
2243
2244 wait_for_cmd_done(dev, sp);
2245 clear_suspend(last_cmd);
2246 /* Immediately trigger the command unit resume. */
2247 iowrite8(CUResume, ioaddr + SCBCmd);
2248
2249 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2250 netif_stop_queue(dev);
2251 sp->tx_full = 1;
2252 }
2253 spin_unlock_irqrestore(&sp->lock, flags);
2254
2255 if (netif_msg_rx_status(sp))
2256 printk(" CmdMCSetup frame length %d in entry %d.\n",
2257 dev->mc_count, entry);
2258 }
2259
2260 sp->rx_mode = new_rx_mode;
2261}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263#ifdef CONFIG_PM
2264static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2265{
2266 struct net_device *dev = pci_get_drvdata (pdev);
2267 struct speedo_private *sp = netdev_priv(dev);
2268 void __iomem *ioaddr = sp->regs;
2269
2270 pci_save_state(pdev);
2271
2272 if (!netif_running(dev))
2273 return 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 del_timer_sync(&sp->timer);
2276
2277 netif_device_detach(dev);
2278 iowrite32(PortPartialReset, ioaddr + SCBPort);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002279
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 /* XXX call pci_set_power_state ()? */
2281 pci_disable_device(pdev);
2282 pci_set_power_state (pdev, PCI_D3hot);
2283 return 0;
2284}
2285
2286static int eepro100_resume(struct pci_dev *pdev)
2287{
2288 struct net_device *dev = pci_get_drvdata (pdev);
2289 struct speedo_private *sp = netdev_priv(dev);
2290 void __iomem *ioaddr = sp->regs;
Jeff Garzikcad1b9d2007-07-17 00:15:54 -04002291 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
2293 pci_set_power_state(pdev, PCI_D0);
2294 pci_restore_state(pdev);
Jeff Garzikcad1b9d2007-07-17 00:15:54 -04002295
2296 rc = pci_enable_device(pdev);
2297 if (rc)
2298 return rc;
2299
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 pci_set_master(pdev);
2301
2302 if (!netif_running(dev))
2303 return 0;
2304
2305 /* I'm absolutely uncertain if this part of code may work.
2306 The problems are:
2307 - correct hardware reinitialization;
2308 - correct driver behavior between different steps of the
2309 reinitialization;
2310 - serialization with other driver calls.
2311 2000/03/08 SAW */
2312 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2313 speedo_resume(dev);
2314 netif_device_attach(dev);
2315 sp->rx_mode = -1;
2316 sp->flow_ctrl = sp->partner = 0;
2317 set_rx_mode(dev);
2318 sp->timer.expires = RUN_AT(2*HZ);
2319 add_timer(&sp->timer);
2320 return 0;
2321}
2322#endif /* CONFIG_PM */
2323
2324static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2325{
2326 struct net_device *dev = pci_get_drvdata (pdev);
2327 struct speedo_private *sp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 unregister_netdev(dev);
2330
2331 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2332 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2333
2334 pci_iounmap(pdev, sp->regs);
2335 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2336 + sizeof(struct speedo_stats),
2337 sp->tx_ring, sp->tx_ring_dma);
2338 pci_disable_device(pdev);
2339 free_netdev(dev);
2340}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002341
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342static struct pci_device_id eepro100_pci_tbl[] = {
2343 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2344 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2345 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2346 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2347 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2348 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2362 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2363 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2364 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2365 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2366 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2367 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2368 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2369 { 0,}
2370};
2371MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373static struct pci_driver eepro100_driver = {
2374 .name = "eepro100",
2375 .id_table = eepro100_pci_tbl,
2376 .probe = eepro100_init_one,
2377 .remove = __devexit_p(eepro100_remove_one),
2378#ifdef CONFIG_PM
2379 .suspend = eepro100_suspend,
2380 .resume = eepro100_resume,
2381#endif /* CONFIG_PM */
2382};
2383
2384static int __init eepro100_init_module(void)
2385{
2386#ifdef MODULE
2387 printk(version);
2388#endif
Jeff Garzik29917622006-08-19 17:48:59 -04002389 return pci_register_driver(&eepro100_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390}
2391
2392static void __exit eepro100_cleanup_module(void)
2393{
2394 pci_unregister_driver(&eepro100_driver);
2395}
2396
2397module_init(eepro100_init_module);
2398module_exit(eepro100_cleanup_module);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400/*
2401 * Local variables:
2402 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2403 * c-indent-level: 4
2404 * c-basic-offset: 4
2405 * tab-width: 4
2406 * End:
2407 */