blob: 3c54014acece68be4c3217a5565ec2f47eafd7f9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2/*
3 Written 1996-1999 by Donald Becker.
4
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
9
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
12
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
15
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
28*/
29
Arjan van de Venf71e1302006-03-03 21:33:57 -050030static const char * const version =
Markus Dahms2c2a8c52007-05-09 07:58:10 +020031"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34/* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
36
37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41static int txdmacount = 128;
42static int rxdmacount /* = 0 */;
43
44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47# define rx_align(skb) skb_reserve((skb), 2)
48# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49#else
50# define rx_align(skb)
51# define RxFD_ALIGNMENT
52#endif
53
54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56static int rx_copybreak = 200;
57
58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59static int max_interrupt_work = 20;
60
61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62static int multicast_filter_limit = 64;
63
64/* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69/* A few values that may be tweaked. */
70/* The ring sizes should be a power of two for efficiency. */
71#define TX_RING_SIZE 64
72#define RX_RING_SIZE 64
73/* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75#define TX_MULTICAST_SIZE 2
76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77/* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80/* Hysteresis marking queue as no longer full. */
81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88#define PKT_BUF_SZ 1536
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#include <linux/module.h>
91
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
96#include <linux/slab.h>
97#include <linux/interrupt.h>
98#include <linux/timer.h>
99#include <linux/pci.h>
100#include <linux/spinlock.h>
101#include <linux/init.h>
102#include <linux/mii.h>
103#include <linux/delay.h>
104#include <linux/bitops.h>
105
106#include <asm/io.h>
107#include <asm/uaccess.h>
108#include <asm/irq.h>
109
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/rtnetlink.h>
113#include <linux/skbuff.h>
114#include <linux/ethtool.h>
115
116static int use_io;
117static int debug = -1;
118#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
119 NETIF_MSG_HW | \
120 NETIF_MSG_RX_ERR | \
121 NETIF_MSG_TX_ERR)
122#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
123
124
125MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
126MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
127MODULE_LICENSE("GPL");
128module_param(use_io, int, 0);
129module_param(debug, int, 0);
130module_param_array(options, int, NULL, 0);
131module_param_array(full_duplex, int, NULL, 0);
132module_param(congenb, int, 0);
133module_param(txfifo, int, 0);
134module_param(rxfifo, int, 0);
135module_param(txdmacount, int, 0);
136module_param(rxdmacount, int, 0);
137module_param(rx_copybreak, int, 0);
138module_param(max_interrupt_work, int, 0);
139module_param(multicast_filter_limit, int, 0);
140MODULE_PARM_DESC(debug, "debug level (0-6)");
141MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
142MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
143MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
144MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
145MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
146MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
147MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
148MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
149MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
150MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
151
152#define RUN_AT(x) (jiffies + (x))
153
154#define netdevice_start(dev)
155#define netdevice_stop(dev)
156#define netif_set_tx_timeout(dev, tf, tm) \
157 do { \
158 (dev)->tx_timeout = (tf); \
159 (dev)->watchdog_timeo = (tm); \
160 } while(0)
161
162
163
164/*
165 Theory of Operation
166
167I. Board Compatibility
168
169This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
170single-chip fast Ethernet controller for PCI, as used on the Intel
171EtherExpress Pro 100 adapter.
172
173II. Board-specific settings
174
175PCI bus devices are configured by the system at boot time, so no jumpers
176need to be set on the board. The system BIOS should be set to assign the
177PCI INTA signal to an otherwise unused system IRQ line. While it's
178possible to share PCI interrupt lines, it negatively impacts performance and
179only recent kernels support it.
180
181III. Driver operation
182
183IIIA. General
184The Speedo3 is very similar to other Intel network chips, that is to say
185"apparently designed on a different planet". This chips retains the complex
186Rx and Tx descriptors and multiple buffers pointers as previous chips, but
187also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
188Tx mode, but in a simplified lower-overhead manner: it associates only a
189single buffer descriptor with each frame descriptor.
190
191Despite the extra space overhead in each receive skbuff, the driver must use
192the simplified Rx buffer mode to assure that only a single data buffer is
193associated with each RxFD. The driver implements this by reserving space
194for the Rx descriptor at the head of each Rx skbuff.
195
196The Speedo-3 has receive and command unit base addresses that are added to
197almost all descriptor pointers. The driver sets these to zero, so that all
198pointer fields are absolute addresses.
199
200The System Control Block (SCB) of some previous Intel chips exists on the
201chip in both PCI I/O and memory space. This driver uses the I/O space
202registers, but might switch to memory mapped mode to better support non-x86
203processors.
204
205IIIB. Transmit structure
206
207The driver must use the complex Tx command+descriptor mode in order to
208have a indirect pointer to the skbuff data section. Each Tx command block
209(TxCB) is associated with two immediately appended Tx Buffer Descriptor
210(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
211speedo_private data structure for each adapter instance.
212
213The newer i82558 explicitly supports this structure, and can read the two
214TxBDs in the same PCI burst as the TxCB.
215
216This ring structure is used for all normal transmit packets, but the
217transmit packet descriptors aren't long enough for most non-Tx commands such
218as CmdConfigure. This is complicated by the possibility that the chip has
219already loaded the link address in the previous descriptor. So for these
220commands we convert the next free descriptor on the ring to a NoOp, and point
221that descriptor's link to the complex command.
222
223An additional complexity of these non-transmit commands are that they may be
224added asynchronous to the normal transmit queue, so we disable interrupts
225whenever the Tx descriptor ring is manipulated.
226
227A notable aspect of these special configure commands is that they do
228work with the normal Tx ring entry scavenge method. The Tx ring scavenge
229is done at interrupt time using the 'dirty_tx' index, and checking for the
230command-complete bit. While the setup frames may have the NoOp command on the
231Tx ring marked as complete, but not have completed the setup command, this
232is not a problem. The tx_ring entry can be still safely reused, as the
233tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
234
235Commands may have bits set e.g. CmdSuspend in the command word to either
236suspend or stop the transmit/command unit. This driver always flags the last
237command with CmdSuspend, erases the CmdSuspend in the previous command, and
238then issues a CU_RESUME.
239Note: Watch out for the potential race condition here: imagine
240 erasing the previous suspend
241 the chip processes the previous command
242 the chip processes the final command, and suspends
243 doing the CU_RESUME
244 the chip processes the next-yet-valid post-final-command.
245So blindly sending a CU_RESUME is only safe if we do it immediately after
246after erasing the previous CmdSuspend, without the possibility of an
247intervening delay. Thus the resume command is always within the
248interrupts-disabled region. This is a timing dependence, but handling this
249condition in a timing-independent way would considerably complicate the code.
250
251Note: In previous generation Intel chips, restarting the command unit was a
252notoriously slow process. This is presumably no longer true.
253
254IIIC. Receive structure
255
256Because of the bus-master support on the Speedo3 this driver uses the new
257SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
258This scheme allocates full-sized skbuffs as receive buffers. The value
259SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
260trade-off the memory wasted by passing the full-sized skbuff to the queue
261layer for all frames vs. the copying cost of copying a frame to a
262correctly-sized skbuff.
263
264For small frames the copying cost is negligible (esp. considering that we
265are pre-loading the cache with immediately useful header information), so we
266allocate a new, minimally-sized skbuff. For large frames the copying cost
267is non-trivial, and the larger copy might flush the cache of useful data, so
268we pass up the skbuff the packet was received into.
269
270IV. Notes
271
272Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
273that stated that I could disclose the information. But I still resent
274having to sign an Intel NDA when I'm helping Intel sell their own product!
275
276*/
277
278static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280/* Offsets to the various registers.
281 All accesses need not be longword aligned. */
282enum speedo_offsets {
283 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
284 SCBIntmask = 3,
285 SCBPointer = 4, /* General purpose pointer. */
286 SCBPort = 8, /* Misc. commands and operands. */
287 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
288 SCBCtrlMDI = 16, /* MDI interface control. */
289 SCBEarlyRx = 20, /* Early receive byte count. */
290};
291/* Commands that can be put in a command list entry. */
292enum commands {
293 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
294 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
295 CmdDump = 0x60000, CmdDiagnose = 0x70000,
296 CmdSuspend = 0x40000000, /* Suspend after completion. */
297 CmdIntr = 0x20000000, /* Interrupt after completion. */
298 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
299};
300/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
301 status bits. Previous driver versions used separate 16 bit fields for
302 commands and statuses. --SAW
303 */
304#if defined(__alpha__)
305# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
306#else
307# if defined(__LITTLE_ENDIAN)
308# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
309# elif defined(__BIG_ENDIAN)
310# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
311# else
312# error Unsupported byteorder
313# endif
314#endif
315
316enum SCBCmdBits {
317 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
318 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
319 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
320 /* The rest are Rx and Tx commands. */
321 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
322 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
323 CUDumpStats=0x0070, /* Dump then reset stats counters. */
324 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
325 RxResumeNoResources=0x0007,
326};
327
328enum SCBPort_cmds {
329 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
330};
331
332/* The Speedo3 Rx and Tx frame/buffer descriptors. */
333struct descriptor { /* A generic descriptor. */
334 volatile s32 cmd_status; /* All command and status fields. */
335 u32 link; /* struct descriptor * */
336 unsigned char params[0];
337};
338
339/* The Speedo3 Rx and Tx buffer descriptors. */
340struct RxFD { /* Receive frame descriptor. */
341 volatile s32 status;
342 u32 link; /* struct RxFD * */
343 u32 rx_buf_addr; /* void * */
344 u32 count;
345} RxFD_ALIGNMENT;
346
347/* Selected elements of the Tx/RxFD.status word. */
348enum RxFD_bits {
349 RxComplete=0x8000, RxOK=0x2000,
350 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
351 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
352 TxUnderrun=0x1000, StatusComplete=0x8000,
353};
354
355#define CONFIG_DATA_SIZE 22
356struct TxFD { /* Transmit frame descriptor set. */
357 s32 status;
358 u32 link; /* void * */
359 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
360 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
361 /* This constitutes two "TBD" entries -- we only use one. */
362#define TX_DESCR_BUF_OFFSET 16
363 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
364 s32 tx_buf_size0; /* Length of Tx frame. */
365 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
366 s32 tx_buf_size1; /* Length of Tx frame. */
367 /* the structure must have space for at least CONFIG_DATA_SIZE starting
368 * from tx_desc_addr field */
369};
370
371/* Multicast filter setting block. --SAW */
372struct speedo_mc_block {
373 struct speedo_mc_block *next;
374 unsigned int tx;
375 dma_addr_t frame_dma;
376 unsigned int len;
377 struct descriptor frame __attribute__ ((__aligned__(16)));
378};
379
380/* Elements of the dump_statistics block. This block must be lword aligned. */
381struct speedo_stats {
382 u32 tx_good_frames;
383 u32 tx_coll16_errs;
384 u32 tx_late_colls;
385 u32 tx_underruns;
386 u32 tx_lost_carrier;
387 u32 tx_deferred;
388 u32 tx_one_colls;
389 u32 tx_multi_colls;
390 u32 tx_total_colls;
391 u32 rx_good_frames;
392 u32 rx_crc_errs;
393 u32 rx_align_errs;
394 u32 rx_resource_errs;
395 u32 rx_overrun_errs;
396 u32 rx_colls_errs;
397 u32 rx_runt_errs;
398 u32 done_marker;
399};
400
401enum Rx_ring_state_bits {
402 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
403};
404
405/* Do not change the position (alignment) of the first few elements!
406 The later elements are grouped for cache locality.
407
408 Unfortunately, all the positions have been shifted since there.
409 A new re-alignment is required. 2000/03/06 SAW */
410struct speedo_private {
411 void __iomem *regs;
412 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
413 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
414 /* The addresses of a Tx/Rx-in-place packets/buffers. */
415 struct sk_buff *tx_skbuff[TX_RING_SIZE];
416 struct sk_buff *rx_skbuff[RX_RING_SIZE];
417 /* Mapped addresses of the rings. */
418 dma_addr_t tx_ring_dma;
419#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
420 dma_addr_t rx_ring_dma[RX_RING_SIZE];
421 struct descriptor *last_cmd; /* Last command sent. */
422 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
423 spinlock_t lock; /* Group with Tx control cache line. */
424 u32 tx_threshold; /* The value for txdesc.count. */
425 struct RxFD *last_rxf; /* Last filled RX buffer. */
426 dma_addr_t last_rxf_dma;
427 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
428 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
429 struct net_device_stats stats;
430 struct speedo_stats *lstats;
431 dma_addr_t lstats_dma;
432 int chip_id;
433 struct pci_dev *pdev;
434 struct timer_list timer; /* Media selection timer. */
435 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
436 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
437 long in_interrupt; /* Word-aligned dev->interrupt */
438 unsigned char acpi_pwr;
439 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
440 unsigned int tx_full:1; /* The Tx queue is full. */
441 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
442 unsigned int rx_bug:1; /* Work around receiver hang errata. */
443 unsigned char default_port:8; /* Last dev->if_port value. */
444 unsigned char rx_ring_state; /* RX ring status flags. */
445 unsigned short phy[2]; /* PHY media interfaces available. */
446 unsigned short partner; /* Link partner caps. */
447 struct mii_if_info mii_if; /* MII API hooks, info */
448 u32 msg_enable; /* debug message level */
449};
450
451/* The parameters for a CmdConfigure operation.
452 There are so many options that it would be difficult to document each bit.
453 We mostly use the default or recommended settings. */
454static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
455 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
456 0, 0x2E, 0, 0x60, 0,
457 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
458 0x3f, 0x05, };
459static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
460 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
461 0, 0x2E, 0, 0x60, 0x08, 0x88,
462 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
463 0x31, 0x05, };
464
465/* PHY media interface chips. */
Arjan van de Venf71e1302006-03-03 21:33:57 -0500466static const char * const phys[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 "None", "i82553-A/B", "i82553-C", "i82503",
468 "DP83840", "80c240", "80c24", "i82555",
469 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
470 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
471enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
472 S80C24, I82555, DP83840A=10, };
473static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
474#define EE_READ_CMD (6)
475
476static int eepro100_init_one(struct pci_dev *pdev,
477 const struct pci_device_id *ent);
478
479static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
480static int mdio_read(struct net_device *dev, int phy_id, int location);
481static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
482static int speedo_open(struct net_device *dev);
483static void speedo_resume(struct net_device *dev);
484static void speedo_timer(unsigned long data);
485static void speedo_init_rx_ring(struct net_device *dev);
486static void speedo_tx_timeout(struct net_device *dev);
487static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
488static void speedo_refill_rx_buffers(struct net_device *dev, int force);
489static int speedo_rx(struct net_device *dev);
490static void speedo_tx_buffer_gc(struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100491static irqreturn_t speedo_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492static int speedo_close(struct net_device *dev);
493static struct net_device_stats *speedo_get_stats(struct net_device *dev);
494static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
495static void set_rx_mode(struct net_device *dev);
496static void speedo_show_state(struct net_device *dev);
Jeff Garzik7282d492006-09-13 14:30:00 -0400497static const struct ethtool_ops ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501#ifdef honor_default_port
502/* Optional driver feature to allow forcing the transceiver setting.
503 Not recommended. */
504static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
505 0x2000, 0x2100, 0x0400, 0x3100};
506#endif
507
508/* How to wait for the command unit to accept a command.
509 Typically this takes 0 ticks. */
510static inline unsigned char wait_for_cmd_done(struct net_device *dev,
511 struct speedo_private *sp)
512{
513 int wait = 1000;
514 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
515 unsigned char r;
516
517 do {
518 udelay(1);
519 r = ioread8(cmd_ioaddr);
520 } while(r && --wait >= 0);
521
522 if (wait < 0)
523 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
524 return r;
525}
526
527static int __devinit eepro100_init_one (struct pci_dev *pdev,
528 const struct pci_device_id *ent)
529{
530 void __iomem *ioaddr;
531 int irq, pci_bar;
532 int acpi_idle_state = 0, pm;
533 static int cards_found /* = 0 */;
534 unsigned long pci_base;
535
536#ifndef MODULE
537 /* when built-in, we only print version if device is found */
538 static int did_version;
539 if (did_version++ == 0)
540 printk(version);
541#endif
542
543 /* save power state before pci_enable_device overwrites it */
544 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
545 if (pm) {
546 u16 pwr_command;
547 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
548 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
549 }
550
551 if (pci_enable_device(pdev))
552 goto err_out_free_mmio_region;
553
554 pci_set_master(pdev);
555
556 if (!request_region(pci_resource_start(pdev, 1),
557 pci_resource_len(pdev, 1), "eepro100")) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400558 dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 goto err_out_none;
560 }
561 if (!request_mem_region(pci_resource_start(pdev, 0),
562 pci_resource_len(pdev, 0), "eepro100")) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400563 dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 goto err_out_free_pio_region;
565 }
566
567 irq = pdev->irq;
568 pci_bar = use_io ? 1 : 0;
569 pci_base = pci_resource_start(pdev, pci_bar);
570 if (DEBUG & NETIF_MSG_PROBE)
571 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
572 pci_base, irq);
573
574 ioaddr = pci_iomap(pdev, pci_bar, 0);
575 if (!ioaddr) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -0400576 dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 goto err_out_free_mmio_region;
578 }
579
580 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
581 cards_found++;
582 else
583 goto err_out_iounmap;
584
585 return 0;
586
587err_out_iounmap: ;
588 pci_iounmap(pdev, ioaddr);
589err_out_free_mmio_region:
590 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
591err_out_free_pio_region:
592 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
593err_out_none:
594 return -ENODEV;
595}
596
597#ifdef CONFIG_NET_POLL_CONTROLLER
598/*
599 * Polling 'interrupt' - used by things like netconsole to send skbs
600 * without having to re-enable interrupts. It's not called while
601 * the interrupt routine is executing.
602 */
603
604static void poll_speedo (struct net_device *dev)
605{
606 /* disable_irq is not very nice, but with the funny lockless design
607 we have no other choice. */
608 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100609 speedo_interrupt (dev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 enable_irq(dev->irq);
611}
612#endif
613
614static int __devinit speedo_found1(struct pci_dev *pdev,
615 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
616{
617 struct net_device *dev;
618 struct speedo_private *sp;
619 const char *product;
620 int i, option;
621 u16 eeprom[0x100];
622 int size;
623 void *tx_ring_space;
624 dma_addr_t tx_ring_dma;
625
626 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
627 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
628 if (tx_ring_space == NULL)
629 return -1;
630
631 dev = alloc_etherdev(sizeof(struct speedo_private));
632 if (dev == NULL) {
633 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
634 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
635 return -1;
636 }
637
638 SET_MODULE_OWNER(dev);
639 SET_NETDEV_DEV(dev, &pdev->dev);
640
641 if (dev->mem_start > 0)
642 option = dev->mem_start;
643 else if (card_idx >= 0 && options[card_idx] >= 0)
644 option = options[card_idx];
645 else
646 option = 0;
647
648 rtnl_lock();
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400649 if (dev_alloc_name(dev, dev->name) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 goto err_free_unlock;
651
652 /* Read the station address EEPROM before doing the reset.
653 Nominally his should even be done before accepting the device, but
654 then we wouldn't have a device name with which to report the error.
655 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
656 */
657 {
658 void __iomem *iobase;
659 int read_cmd, ee_size;
660 u16 sum;
661 int j;
662
663 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
664 requirements. */
665 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
666 if (!iobase)
667 goto err_free_unlock;
668 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
669 == 0xffe0000) {
670 ee_size = 0x100;
671 read_cmd = EE_READ_CMD << 24;
672 } else {
673 ee_size = 0x40;
674 read_cmd = EE_READ_CMD << 22;
675 }
676
677 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
678 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
679 eeprom[i] = value;
680 sum += value;
681 if (i < 3) {
682 dev->dev_addr[j++] = value;
683 dev->dev_addr[j++] = value >> 8;
684 }
685 }
686 if (sum != 0xBABA)
687 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
688 "check settings before activating this device!\n",
689 dev->name, sum);
690 /* Don't unregister_netdev(dev); as the EEPro may actually be
691 usable, especially if the MAC address is set later.
692 On the other hand, it may be unusable if MDI data is corrupted. */
693
694 pci_iounmap(pdev, iobase);
695 }
696
697 /* Reset the chip: stop Tx and Rx processes and clear counters.
698 This takes less than 10usec and will easily finish before the next
699 action. */
700 iowrite32(PortReset, ioaddr + SCBPort);
701 ioread32(ioaddr + SCBPort);
702 udelay(10);
703
704 if (eeprom[3] & 0x0100)
705 product = "OEM i82557/i82558 10/100 Ethernet";
706 else
707 product = pci_name(pdev);
708
709 printk(KERN_INFO "%s: %s, ", dev->name, product);
710
711 for (i = 0; i < 5; i++)
712 printk("%2.2X:", dev->dev_addr[i]);
713 printk("%2.2X, ", dev->dev_addr[i]);
714 printk("IRQ %d.\n", pdev->irq);
715
716 sp = netdev_priv(dev);
717
718 /* we must initialize this early, for mdio_{read,write} */
719 sp->regs = ioaddr;
720
721#if 1 || defined(kernel_bloat)
722 /* OK, this is pure kernel bloat. I don't like it when other drivers
723 waste non-pageable kernel space to emit similar messages, but I need
724 them for bug reports. */
725 {
726 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
727 /* The self-test results must be paragraph aligned. */
728 volatile s32 *self_test_results;
729 int boguscnt = 16000; /* Timeout for set-test. */
730 if ((eeprom[3] & 0x03) != 0x03)
731 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
732 " work-around.\n");
733 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
734 " connectors present:",
735 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
736 for (i = 0; i < 4; i++)
737 if (eeprom[5] & (1<<i))
738 printk(connectors[i]);
739 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
740 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
741 if (eeprom[7] & 0x0700)
742 printk(KERN_INFO " Secondary interface chip %s.\n",
743 phys[(eeprom[7]>>8)&7]);
744 if (((eeprom[6]>>8) & 0x3f) == DP83840
745 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
746 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
747 if (congenb)
748 mdi_reg23 |= 0x0100;
749 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
750 mdi_reg23);
751 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
752 }
753 if ((option >= 0) && (option & 0x70)) {
754 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
755 (option & 0x20 ? 100 : 10),
756 (option & 0x10 ? "full" : "half"));
757 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
758 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
759 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
760 }
761
762 /* Perform a system self-test. */
763 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
764 self_test_results[0] = 0;
765 self_test_results[1] = -1;
766 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
767 do {
768 udelay(10);
769 } while (self_test_results[1] == -1 && --boguscnt >= 0);
770
771 if (boguscnt < 0) { /* Test optimized out. */
772 printk(KERN_ERR "Self test failed, status %8.8x:\n"
773 KERN_ERR " Failure to initialize the i82557.\n"
774 KERN_ERR " Verify that the card is a bus-master"
775 " capable slot.\n",
776 self_test_results[1]);
777 } else
778 printk(KERN_INFO " General self-test: %s.\n"
779 KERN_INFO " Serial sub-system self-test: %s.\n"
780 KERN_INFO " Internal registers self-test: %s.\n"
781 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
782 self_test_results[1] & 0x1000 ? "failed" : "passed",
783 self_test_results[1] & 0x0020 ? "failed" : "passed",
784 self_test_results[1] & 0x0008 ? "failed" : "passed",
785 self_test_results[1] & 0x0004 ? "failed" : "passed",
786 self_test_results[0]);
787 }
788#endif /* kernel_bloat */
789
790 iowrite32(PortReset, ioaddr + SCBPort);
791 ioread32(ioaddr + SCBPort);
792 udelay(10);
793
794 /* Return the chip to its original power state. */
795 pci_set_power_state(pdev, acpi_idle_state);
796
797 pci_set_drvdata (pdev, dev);
798 SET_NETDEV_DEV(dev, &pdev->dev);
799
800 dev->irq = pdev->irq;
801
802 sp->pdev = pdev;
803 sp->msg_enable = DEBUG;
804 sp->acpi_pwr = acpi_idle_state;
805 sp->tx_ring = tx_ring_space;
806 sp->tx_ring_dma = tx_ring_dma;
807 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
808 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
809 init_timer(&sp->timer); /* used in ioctl() */
810 spin_lock_init(&sp->lock);
811
812 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
813 if (card_idx >= 0) {
814 if (full_duplex[card_idx] >= 0)
815 sp->mii_if.full_duplex = full_duplex[card_idx];
816 }
817 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
818
819 sp->phy[0] = eeprom[6];
820 sp->phy[1] = eeprom[7];
821
822 sp->mii_if.phy_id = eeprom[6] & 0x1f;
823 sp->mii_if.phy_id_mask = 0x1f;
824 sp->mii_if.reg_num_mask = 0x1f;
825 sp->mii_if.dev = dev;
826 sp->mii_if.mdio_read = mdio_read;
827 sp->mii_if.mdio_write = mdio_write;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400830 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
831 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 || (pdev->device == 0x245D)) {
833 sp->chip_id = 1;
834 }
835
836 if (sp->rx_bug)
837 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
838
839 /* The Speedo-specific entries in the device structure. */
840 dev->open = &speedo_open;
841 dev->hard_start_xmit = &speedo_start_xmit;
842 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
843 dev->stop = &speedo_close;
844 dev->get_stats = &speedo_get_stats;
845 dev->set_multicast_list = &set_rx_mode;
846 dev->do_ioctl = &speedo_ioctl;
847 SET_ETHTOOL_OPS(dev, &ethtool_ops);
848#ifdef CONFIG_NET_POLL_CONTROLLER
849 dev->poll_controller = &poll_speedo;
850#endif
851
852 if (register_netdevice(dev))
853 goto err_free_unlock;
854 rtnl_unlock();
855
856 return 0;
857
858 err_free_unlock:
859 rtnl_unlock();
860 free_netdev(dev);
861 return -1;
862}
863
864static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
865{
866 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
867 int wait = 0;
868 do
869 if (ioread8(cmd_ioaddr) == 0) break;
870 while(++wait <= 200);
871 if (wait > 100)
872 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
873 ioread8(cmd_ioaddr), wait);
874
875 iowrite8(cmd, cmd_ioaddr);
876
877 for (wait = 0; wait <= 100; wait++)
878 if (ioread8(cmd_ioaddr) == 0) return;
879 for (; wait <= 20000; wait++)
880 if (ioread8(cmd_ioaddr) == 0) return;
881 else udelay(1);
882 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
883 " Current status %8.8x.\n",
884 cmd, wait, ioread32(sp->regs + SCBStatus));
885}
886
887/* Serial EEPROM section.
888 A "bit" grungy, but we work our way through bit-by-bit :->. */
889/* EEPROM_Ctrl bits. */
890#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
891#define EE_CS 0x02 /* EEPROM chip select. */
892#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
893#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
894#define EE_ENB (0x4800 | EE_CS)
895#define EE_WRITE_0 0x4802
896#define EE_WRITE_1 0x4806
897#define EE_OFFSET SCBeeprom
898
899/* The fixes for the code were kindly provided by Dragan Stancevic
900 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
901 access timing.
902 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
903 interval for serial EEPROM. However, it looks like that there is an
904 additional requirement dictating larger udelay's in the code below.
905 2000/05/24 SAW */
906static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
907{
908 unsigned retval = 0;
909 void __iomem *ee_addr = ioaddr + SCBeeprom;
910
911 iowrite16(EE_ENB, ee_addr); udelay(2);
912 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
913
914 /* Shift the command bits out. */
915 do {
916 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
917 iowrite16(dataval, ee_addr); udelay(2);
918 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
919 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
920 } while (--cmd_len >= 0);
921 iowrite16(EE_ENB, ee_addr); udelay(2);
922
923 /* Terminate the EEPROM access. */
924 iowrite16(EE_ENB & ~EE_CS, ee_addr);
925 return retval;
926}
927
928static int mdio_read(struct net_device *dev, int phy_id, int location)
929{
930 struct speedo_private *sp = netdev_priv(dev);
931 void __iomem *ioaddr = sp->regs;
932 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
933 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
934 do {
935 val = ioread32(ioaddr + SCBCtrlMDI);
936 if (--boguscnt < 0) {
937 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
938 break;
939 }
940 } while (! (val & 0x10000000));
941 return val & 0xffff;
942}
943
944static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
945{
946 struct speedo_private *sp = netdev_priv(dev);
947 void __iomem *ioaddr = sp->regs;
948 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
949 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
950 ioaddr + SCBCtrlMDI);
951 do {
952 val = ioread32(ioaddr + SCBCtrlMDI);
953 if (--boguscnt < 0) {
954 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
955 break;
956 }
957 } while (! (val & 0x10000000));
958}
959
960static int
961speedo_open(struct net_device *dev)
962{
963 struct speedo_private *sp = netdev_priv(dev);
964 void __iomem *ioaddr = sp->regs;
965 int retval;
966
967 if (netif_msg_ifup(sp))
968 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
969
970 pci_set_power_state(sp->pdev, PCI_D0);
971
972 /* Set up the Tx queue early.. */
973 sp->cur_tx = 0;
974 sp->dirty_tx = 0;
975 sp->last_cmd = NULL;
976 sp->tx_full = 0;
977 sp->in_interrupt = 0;
978
979 /* .. we can safely take handler calls during init. */
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700980 retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (retval) {
982 return retval;
983 }
984
985 dev->if_port = sp->default_port;
986
987#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
988 /* Retrigger negotiation to reset previous errors. */
989 if ((sp->phy[0] & 0x8000) == 0) {
990 int phy_addr = sp->phy[0] & 0x1f ;
991 /* Use 0x3300 for restarting NWay, other values to force xcvr:
992 0x0000 10-HD
993 0x0100 10-FD
994 0x2000 100-HD
995 0x2100 100-FD
996 */
997#ifdef honor_default_port
998 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
999#else
1000 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1001#endif
1002 }
1003#endif
1004
1005 speedo_init_rx_ring(dev);
1006
1007 /* Fire up the hardware. */
1008 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1009 speedo_resume(dev);
1010
1011 netdevice_start(dev);
1012 netif_start_queue(dev);
1013
1014 /* Setup the chip and configure the multicast list. */
1015 sp->mc_setup_head = NULL;
1016 sp->mc_setup_tail = NULL;
1017 sp->flow_ctrl = sp->partner = 0;
1018 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1019 set_rx_mode(dev);
1020 if ((sp->phy[0] & 0x8000) == 0)
1021 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1022
1023 mii_check_link(&sp->mii_if);
1024
1025 if (netif_msg_ifup(sp)) {
1026 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1027 dev->name, ioread16(ioaddr + SCBStatus));
1028 }
1029
1030 /* Set the timer. The timer serves a dual purpose:
1031 1) to monitor the media interface (e.g. link beat) and perhaps switch
1032 to an alternate media type
1033 2) to monitor Rx activity, and restart the Rx process if the receiver
1034 hangs. */
1035 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1036 sp->timer.data = (unsigned long)dev;
1037 sp->timer.function = &speedo_timer; /* timer handler */
1038 add_timer(&sp->timer);
1039
1040 /* No need to wait for the command unit to accept here. */
1041 if ((sp->phy[0] & 0x8000) == 0)
1042 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1043
1044 return 0;
1045}
1046
1047/* Start the chip hardware after a full reset. */
1048static void speedo_resume(struct net_device *dev)
1049{
1050 struct speedo_private *sp = netdev_priv(dev);
1051 void __iomem *ioaddr = sp->regs;
1052
1053 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1054 sp->tx_threshold = 0x01208000;
1055
1056 /* Set the segment registers to '0'. */
1057 if (wait_for_cmd_done(dev, sp) != 0) {
1058 iowrite32(PortPartialReset, ioaddr + SCBPort);
1059 udelay(10);
1060 }
1061
1062 iowrite32(0, ioaddr + SCBPointer);
1063 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1064 udelay(10); /* Bogus, but it avoids the bug. */
1065
1066 /* Note: these next two operations can take a while. */
1067 do_slow_command(dev, sp, RxAddrLoad);
1068 do_slow_command(dev, sp, CUCmdBase);
1069
1070 /* Load the statistics block and rx ring addresses. */
1071 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1072 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1073
1074 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1075 sp->lstats->done_marker = 0;
1076 wait_for_cmd_done(dev, sp);
1077
1078 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1079 if (netif_msg_rx_err(sp))
1080 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1081 dev->name);
1082 } else {
1083 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1084 ioaddr + SCBPointer);
1085 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1086 }
1087
1088 /* Note: RxStart should complete instantly. */
1089 do_slow_command(dev, sp, RxStart);
1090 do_slow_command(dev, sp, CUDumpStats);
1091
1092 /* Fill the first command with our physical address. */
1093 {
1094 struct descriptor *ias_cmd;
1095
1096 ias_cmd =
1097 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1098 /* Avoid a bug(?!) here by marking the command already completed. */
1099 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1100 ias_cmd->link =
1101 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1102 memcpy(ias_cmd->params, dev->dev_addr, 6);
1103 if (sp->last_cmd)
1104 clear_suspend(sp->last_cmd);
1105 sp->last_cmd = ias_cmd;
1106 }
1107
1108 /* Start the chip's Tx process and unmask interrupts. */
1109 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1110 ioaddr + SCBPointer);
1111 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1112 remain masked --Dragan */
1113 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1114}
1115
1116/*
1117 * Sometimes the receiver stops making progress. This routine knows how to
1118 * get it going again, without losing packets or being otherwise nasty like
1119 * a chip reset would be. Previously the driver had a whole sequence
1120 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1121 * do another, etc. But those things don't really matter. Separate logic
1122 * in the ISR provides for allocating buffers--the other half of operation
1123 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1124 * This problem with the old, more involved algorithm is shown up under
1125 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1126 */
1127static void
1128speedo_rx_soft_reset(struct net_device *dev)
1129{
1130 struct speedo_private *sp = netdev_priv(dev);
1131 struct RxFD *rfd;
1132 void __iomem *ioaddr;
1133
1134 ioaddr = sp->regs;
1135 if (wait_for_cmd_done(dev, sp) != 0) {
1136 printk("%s: previous command stalled\n", dev->name);
1137 return;
1138 }
1139 /*
1140 * Put the hardware into a known state.
1141 */
1142 iowrite8(RxAbort, ioaddr + SCBCmd);
1143
1144 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1145
1146 rfd->rx_buf_addr = 0xffffffff;
1147
1148 if (wait_for_cmd_done(dev, sp) != 0) {
1149 printk("%s: RxAbort command stalled\n", dev->name);
1150 return;
1151 }
1152 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1153 ioaddr + SCBPointer);
1154 iowrite8(RxStart, ioaddr + SCBCmd);
1155}
1156
1157
1158/* Media monitoring and control. */
1159static void speedo_timer(unsigned long data)
1160{
1161 struct net_device *dev = (struct net_device *)data;
1162 struct speedo_private *sp = netdev_priv(dev);
1163 void __iomem *ioaddr = sp->regs;
1164 int phy_num = sp->phy[0] & 0x1f;
1165
1166 /* We have MII and lost link beat. */
1167 if ((sp->phy[0] & 0x8000) == 0) {
1168 int partner = mdio_read(dev, phy_num, MII_LPA);
1169 if (partner != sp->partner) {
1170 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1171 if (netif_msg_link(sp)) {
1172 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1173 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1174 dev->name, sp->partner, partner, sp->mii_if.advertising);
1175 }
1176 sp->partner = partner;
1177 if (flow_ctrl != sp->flow_ctrl) {
1178 sp->flow_ctrl = flow_ctrl;
1179 sp->rx_mode = -1; /* Trigger a reload. */
1180 }
1181 }
1182 }
1183 mii_check_link(&sp->mii_if);
1184 if (netif_msg_timer(sp)) {
1185 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1186 dev->name, ioread16(ioaddr + SCBStatus));
1187 }
1188 if (sp->rx_mode < 0 ||
1189 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1190 /* We haven't received a packet in a Long Time. We might have been
1191 bitten by the receiver hang bug. This can be cleared by sending
1192 a set multicast list command. */
1193 if (netif_msg_timer(sp))
1194 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1195 " from a timer routine,"
1196 " m=%d, j=%ld, l=%ld.\n",
1197 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1198 set_rx_mode(dev);
1199 }
1200 /* We must continue to monitor the media. */
1201 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1202 add_timer(&sp->timer);
1203}
1204
1205static void speedo_show_state(struct net_device *dev)
1206{
1207 struct speedo_private *sp = netdev_priv(dev);
1208 int i;
1209
1210 if (netif_msg_pktdata(sp)) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001211 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 dev->name, sp->cur_tx, sp->dirty_tx);
1213 for (i = 0; i < TX_RING_SIZE; i++)
1214 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1215 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1216 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1217 i, sp->tx_ring[i].status);
1218
1219 printk(KERN_DEBUG "%s: Printing Rx ring"
1220 " (next to receive into %u, dirty index %u).\n",
1221 dev->name, sp->cur_rx, sp->dirty_rx);
1222 for (i = 0; i < RX_RING_SIZE; i++)
1223 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1224 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1225 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1226 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1227 i, (sp->rx_ringp[i] != NULL) ?
1228 (unsigned)sp->rx_ringp[i]->status : 0);
1229 }
1230
1231#if 0
1232 {
1233 void __iomem *ioaddr = sp->regs;
1234 int phy_num = sp->phy[0] & 0x1f;
1235 for (i = 0; i < 16; i++) {
1236 /* FIXME: what does it mean? --SAW */
1237 if (i == 6) i = 21;
1238 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1239 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1240 }
1241 }
1242#endif
1243
1244}
1245
1246/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1247static void
1248speedo_init_rx_ring(struct net_device *dev)
1249{
1250 struct speedo_private *sp = netdev_priv(dev);
1251 struct RxFD *rxf, *last_rxf = NULL;
1252 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1253 int i;
1254
1255 sp->cur_rx = 0;
1256
1257 for (i = 0; i < RX_RING_SIZE; i++) {
1258 struct sk_buff *skb;
1259 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
Jeff Garzik9f7f0092005-08-19 03:52:49 -04001260 if (skb)
1261 rx_align(skb); /* Align IP on 16 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 sp->rx_skbuff[i] = skb;
1263 if (skb == NULL)
1264 break; /* OK. Just initially short of Rx bufs. */
1265 skb->dev = dev; /* Mark as being used by this device. */
David S. Miller689be432005-06-28 15:25:31 -07001266 rxf = (struct RxFD *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 sp->rx_ringp[i] = rxf;
1268 sp->rx_ring_dma[i] =
1269 pci_map_single(sp->pdev, rxf,
1270 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1271 skb_reserve(skb, sizeof(struct RxFD));
1272 if (last_rxf) {
1273 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1274 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1275 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1276 }
1277 last_rxf = rxf;
1278 last_rxf_dma = sp->rx_ring_dma[i];
1279 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1280 rxf->link = 0; /* None yet. */
1281 /* This field unused by i82557. */
1282 rxf->rx_buf_addr = 0xffffffff;
1283 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1284 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1285 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1286 }
1287 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1288 /* Mark the last entry as end-of-list. */
1289 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1290 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1291 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1292 sp->last_rxf = last_rxf;
1293 sp->last_rxf_dma = last_rxf_dma;
1294}
1295
1296static void speedo_purge_tx(struct net_device *dev)
1297{
1298 struct speedo_private *sp = netdev_priv(dev);
1299 int entry;
1300
1301 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1302 entry = sp->dirty_tx % TX_RING_SIZE;
1303 if (sp->tx_skbuff[entry]) {
1304 sp->stats.tx_errors++;
1305 pci_unmap_single(sp->pdev,
1306 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1307 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1308 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1309 sp->tx_skbuff[entry] = NULL;
1310 }
1311 sp->dirty_tx++;
1312 }
1313 while (sp->mc_setup_head != NULL) {
1314 struct speedo_mc_block *t;
1315 if (netif_msg_tx_err(sp))
1316 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1317 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1318 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1319 t = sp->mc_setup_head->next;
1320 kfree(sp->mc_setup_head);
1321 sp->mc_setup_head = t;
1322 }
1323 sp->mc_setup_tail = NULL;
1324 sp->tx_full = 0;
1325 netif_wake_queue(dev);
1326}
1327
1328static void reset_mii(struct net_device *dev)
1329{
1330 struct speedo_private *sp = netdev_priv(dev);
1331
1332 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1333 if ((sp->phy[0] & 0x8000) == 0) {
1334 int phy_addr = sp->phy[0] & 0x1f;
1335 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1336 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1337 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1338 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1339 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1340 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1341#ifdef honor_default_port
1342 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1343#else
1344 mdio_read(dev, phy_addr, MII_BMCR);
1345 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1346 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1347#endif
1348 }
1349}
1350
1351static void speedo_tx_timeout(struct net_device *dev)
1352{
1353 struct speedo_private *sp = netdev_priv(dev);
1354 void __iomem *ioaddr = sp->regs;
1355 int status = ioread16(ioaddr + SCBStatus);
1356 unsigned long flags;
1357
1358 if (netif_msg_tx_err(sp)) {
1359 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1360 " %4.4x at %d/%d command %8.8x.\n",
1361 dev->name, status, ioread16(ioaddr + SCBCmd),
1362 sp->dirty_tx, sp->cur_tx,
1363 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1364
1365 }
1366 speedo_show_state(dev);
1367#if 0
1368 if ((status & 0x00C0) != 0x0080
1369 && (status & 0x003C) == 0x0010) {
1370 /* Only the command unit has stopped. */
1371 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1372 dev->name);
1373 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1374 ioaddr + SCBPointer);
1375 iowrite16(CUStart, ioaddr + SCBCmd);
1376 reset_mii(dev);
1377 } else {
1378#else
1379 {
1380#endif
1381 del_timer_sync(&sp->timer);
1382 /* Reset the Tx and Rx units. */
1383 iowrite32(PortReset, ioaddr + SCBPort);
1384 /* We may get spurious interrupts here. But I don't think that they
1385 may do much harm. 1999/12/09 SAW */
1386 udelay(10);
1387 /* Disable interrupts. */
1388 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1389 synchronize_irq(dev->irq);
1390 speedo_tx_buffer_gc(dev);
1391 /* Free as much as possible.
1392 It helps to recover from a hang because of out-of-memory.
1393 It also simplifies speedo_resume() in case TX ring is full or
1394 close-to-be full. */
1395 speedo_purge_tx(dev);
1396 speedo_refill_rx_buffers(dev, 1);
1397 spin_lock_irqsave(&sp->lock, flags);
1398 speedo_resume(dev);
1399 sp->rx_mode = -1;
1400 dev->trans_start = jiffies;
1401 spin_unlock_irqrestore(&sp->lock, flags);
1402 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1403 /* Reset MII transceiver. Do it before starting the timer to serialize
1404 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1405 reset_mii(dev);
1406 sp->timer.expires = RUN_AT(2*HZ);
1407 add_timer(&sp->timer);
1408 }
1409 return;
1410}
1411
1412static int
1413speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414{
1415 struct speedo_private *sp = netdev_priv(dev);
1416 void __iomem *ioaddr = sp->regs;
1417 int entry;
1418
1419 /* Prevent interrupts from changing the Tx ring from underneath us. */
1420 unsigned long flags;
1421
1422 spin_lock_irqsave(&sp->lock, flags);
1423
1424 /* Check if there are enough space. */
1425 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1426 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1427 netif_stop_queue(dev);
1428 sp->tx_full = 1;
1429 spin_unlock_irqrestore(&sp->lock, flags);
1430 return 1;
1431 }
1432
1433 /* Calculate the Tx descriptor entry. */
1434 entry = sp->cur_tx++ % TX_RING_SIZE;
1435
1436 sp->tx_skbuff[entry] = skb;
1437 sp->tx_ring[entry].status =
1438 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1439 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1440 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1441 sp->tx_ring[entry].link =
1442 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1443 sp->tx_ring[entry].tx_desc_addr =
1444 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1445 /* The data region is always in one buffer descriptor. */
1446 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1447 sp->tx_ring[entry].tx_buf_addr0 =
1448 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1449 skb->len, PCI_DMA_TODEVICE));
1450 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1451
1452 /* workaround for hardware bug on 10 mbit half duplex */
1453
1454 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1455 wait_for_cmd_done(dev, sp);
1456 iowrite8(0 , ioaddr + SCBCmd);
1457 udelay(1);
1458 }
1459
1460 /* Trigger the command unit resume. */
1461 wait_for_cmd_done(dev, sp);
1462 clear_suspend(sp->last_cmd);
1463 /* We want the time window between clearing suspend flag on the previous
1464 command and resuming CU to be as small as possible.
1465 Interrupts in between are very undesired. --SAW */
1466 iowrite8(CUResume, ioaddr + SCBCmd);
1467 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1468
1469 /* Leave room for set_rx_mode(). If there is no more space than reserved
1470 for multicast filter mark the ring as full. */
1471 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1472 netif_stop_queue(dev);
1473 sp->tx_full = 1;
1474 }
1475
1476 spin_unlock_irqrestore(&sp->lock, flags);
1477
1478 dev->trans_start = jiffies;
1479
1480 return 0;
1481}
1482
1483static void speedo_tx_buffer_gc(struct net_device *dev)
1484{
1485 unsigned int dirty_tx;
1486 struct speedo_private *sp = netdev_priv(dev);
1487
1488 dirty_tx = sp->dirty_tx;
1489 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1490 int entry = dirty_tx % TX_RING_SIZE;
1491 int status = le32_to_cpu(sp->tx_ring[entry].status);
1492
1493 if (netif_msg_tx_done(sp))
1494 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1495 entry, status);
1496 if ((status & StatusComplete) == 0)
1497 break; /* It still hasn't been processed. */
1498 if (status & TxUnderrun)
1499 if (sp->tx_threshold < 0x01e08000) {
1500 if (netif_msg_tx_err(sp))
1501 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1502 dev->name);
1503 sp->tx_threshold += 0x00040000;
1504 }
1505 /* Free the original skb. */
1506 if (sp->tx_skbuff[entry]) {
1507 sp->stats.tx_packets++; /* Count only user packets. */
1508 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1509 pci_unmap_single(sp->pdev,
1510 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1511 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1512 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1513 sp->tx_skbuff[entry] = NULL;
1514 }
1515 dirty_tx++;
1516 }
1517
1518 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1519 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1520 " full=%d.\n",
1521 dirty_tx, sp->cur_tx, sp->tx_full);
1522 dirty_tx += TX_RING_SIZE;
1523 }
1524
1525 while (sp->mc_setup_head != NULL
1526 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1527 struct speedo_mc_block *t;
1528 if (netif_msg_tx_err(sp))
1529 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1530 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1531 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1532 t = sp->mc_setup_head->next;
1533 kfree(sp->mc_setup_head);
1534 sp->mc_setup_head = t;
1535 }
1536 if (sp->mc_setup_head == NULL)
1537 sp->mc_setup_tail = NULL;
1538
1539 sp->dirty_tx = dirty_tx;
1540}
1541
1542/* The interrupt handler does all of the Rx thread work and cleans up
1543 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001544static irqreturn_t speedo_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545{
1546 struct net_device *dev = (struct net_device *)dev_instance;
1547 struct speedo_private *sp;
1548 void __iomem *ioaddr;
1549 long boguscnt = max_interrupt_work;
1550 unsigned short status;
1551 unsigned int handled = 0;
1552
1553 sp = netdev_priv(dev);
1554 ioaddr = sp->regs;
1555
1556#ifndef final_version
1557 /* A lock to prevent simultaneous entry on SMP machines. */
1558 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1559 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1560 dev->name);
1561 sp->in_interrupt = 0; /* Avoid halting machine. */
1562 return IRQ_NONE;
1563 }
1564#endif
1565
1566 do {
1567 status = ioread16(ioaddr + SCBStatus);
1568 /* Acknowledge all of the current interrupt sources ASAP. */
1569 /* Will change from 0xfc00 to 0xff00 when we start handling
1570 FCP and ER interrupts --Dragan */
1571 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1572
1573 if (netif_msg_intr(sp))
1574 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1575 dev->name, status);
1576
1577 if ((status & 0xfc00) == 0)
1578 break;
1579 handled = 1;
1580
1581
1582 if ((status & 0x5000) || /* Packet received, or Rx error. */
1583 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1584 /* Need to gather the postponed packet. */
1585 speedo_rx(dev);
1586
1587 /* Always check if all rx buffers are allocated. --SAW */
1588 speedo_refill_rx_buffers(dev, 0);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001589
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 spin_lock(&sp->lock);
1591 /*
1592 * The chip may have suspended reception for various reasons.
1593 * Check for that, and re-prime it should this be the case.
1594 */
1595 switch ((status >> 2) & 0xf) {
1596 case 0: /* Idle */
1597 break;
1598 case 1: /* Suspended */
1599 case 2: /* No resources (RxFDs) */
1600 case 9: /* Suspended with no more RBDs */
1601 case 10: /* No resources due to no RBDs */
1602 case 12: /* Ready with no RBDs */
1603 speedo_rx_soft_reset(dev);
1604 break;
1605 case 3: case 5: case 6: case 7: case 8:
1606 case 11: case 13: case 14: case 15:
1607 /* these are all reserved values */
1608 break;
1609 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001610
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1613 if (status & 0xA400) {
1614 speedo_tx_buffer_gc(dev);
1615 if (sp->tx_full
1616 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1617 /* The ring is no longer full. */
1618 sp->tx_full = 0;
1619 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1620 }
1621 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 spin_unlock(&sp->lock);
1624
1625 if (--boguscnt < 0) {
1626 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1627 dev->name, status);
1628 /* Clear all interrupt sources. */
1629 /* Will change from 0xfc00 to 0xff00 when we start handling
1630 FCP and ER interrupts --Dragan */
1631 iowrite16(0xfc00, ioaddr + SCBStatus);
1632 break;
1633 }
1634 } while (1);
1635
1636 if (netif_msg_intr(sp))
1637 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1638 dev->name, ioread16(ioaddr + SCBStatus));
1639
1640 clear_bit(0, (void*)&sp->in_interrupt);
1641 return IRQ_RETVAL(handled);
1642}
1643
1644static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1645{
1646 struct speedo_private *sp = netdev_priv(dev);
1647 struct RxFD *rxf;
1648 struct sk_buff *skb;
1649 /* Get a fresh skbuff to replace the consumed one. */
1650 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
Jeff Garzik9f7f0092005-08-19 03:52:49 -04001651 if (skb)
1652 rx_align(skb); /* Align IP on 16 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 sp->rx_skbuff[entry] = skb;
1654 if (skb == NULL) {
1655 sp->rx_ringp[entry] = NULL;
1656 return NULL;
1657 }
David S. Miller689be432005-06-28 15:25:31 -07001658 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 sp->rx_ring_dma[entry] =
1660 pci_map_single(sp->pdev, rxf,
1661 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1662 skb->dev = dev;
1663 skb_reserve(skb, sizeof(struct RxFD));
1664 rxf->rx_buf_addr = 0xffffffff;
1665 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1666 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1667 return rxf;
1668}
1669
1670static inline void speedo_rx_link(struct net_device *dev, int entry,
1671 struct RxFD *rxf, dma_addr_t rxf_dma)
1672{
1673 struct speedo_private *sp = netdev_priv(dev);
1674 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1675 rxf->link = 0; /* None yet. */
1676 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1677 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1678 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1679 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1680 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1681 sp->last_rxf = rxf;
1682 sp->last_rxf_dma = rxf_dma;
1683}
1684
1685static int speedo_refill_rx_buf(struct net_device *dev, int force)
1686{
1687 struct speedo_private *sp = netdev_priv(dev);
1688 int entry;
1689 struct RxFD *rxf;
1690
1691 entry = sp->dirty_rx % RX_RING_SIZE;
1692 if (sp->rx_skbuff[entry] == NULL) {
1693 rxf = speedo_rx_alloc(dev, entry);
1694 if (rxf == NULL) {
1695 unsigned int forw;
1696 int forw_entry;
1697 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1698 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1699 dev->name, force);
1700 sp->rx_ring_state |= RrOOMReported;
1701 }
1702 speedo_show_state(dev);
1703 if (!force)
1704 return -1; /* Better luck next time! */
1705 /* Borrow an skb from one of next entries. */
1706 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1707 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1708 break;
1709 if (forw == sp->cur_rx)
1710 return -1;
1711 forw_entry = forw % RX_RING_SIZE;
1712 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1713 sp->rx_skbuff[forw_entry] = NULL;
1714 rxf = sp->rx_ringp[forw_entry];
1715 sp->rx_ringp[forw_entry] = NULL;
1716 sp->rx_ringp[entry] = rxf;
1717 }
1718 } else {
1719 rxf = sp->rx_ringp[entry];
1720 }
1721 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1722 sp->dirty_rx++;
1723 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1724 return 0;
1725}
1726
1727static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1728{
1729 struct speedo_private *sp = netdev_priv(dev);
1730
1731 /* Refill the RX ring. */
1732 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1733 speedo_refill_rx_buf(dev, force) != -1);
1734}
1735
1736static int
1737speedo_rx(struct net_device *dev)
1738{
1739 struct speedo_private *sp = netdev_priv(dev);
1740 int entry = sp->cur_rx % RX_RING_SIZE;
1741 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1742 int alloc_ok = 1;
1743 int npkts = 0;
1744
1745 if (netif_msg_intr(sp))
1746 printk(KERN_DEBUG " In speedo_rx().\n");
1747 /* If we own the next entry, it's a new packet. Send it up. */
1748 while (sp->rx_ringp[entry] != NULL) {
1749 int status;
1750 int pkt_len;
1751
1752 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1753 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1754 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1755 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1756
1757 if (!(status & RxComplete))
1758 break;
1759
1760 if (--rx_work_limit < 0)
1761 break;
1762
1763 /* Check for a rare out-of-memory case: the current buffer is
1764 the last buffer allocated in the RX ring. --SAW */
1765 if (sp->last_rxf == sp->rx_ringp[entry]) {
1766 /* Postpone the packet. It'll be reaped at an interrupt when this
1767 packet is no longer the last packet in the ring. */
1768 if (netif_msg_rx_err(sp))
1769 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1770 dev->name);
1771 sp->rx_ring_state |= RrPostponed;
1772 break;
1773 }
1774
1775 if (netif_msg_rx_status(sp))
1776 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1777 pkt_len);
1778 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1779 if (status & RxErrTooBig)
1780 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1781 "status %8.8x!\n", dev->name, status);
1782 else if (! (status & RxOK)) {
1783 /* There was a fatal error. This *should* be impossible. */
1784 sp->stats.rx_errors++;
1785 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1786 "status %8.8x.\n",
1787 dev->name, status);
1788 }
1789 } else {
1790 struct sk_buff *skb;
1791
1792 /* Check if the packet is long enough to just accept without
1793 copying to a properly sized skbuff. */
1794 if (pkt_len < rx_copybreak
1795 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1797 /* 'skb_put()' points to the start of sk_buff data area. */
1798 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1799 sizeof(struct RxFD) + pkt_len,
1800 PCI_DMA_FROMDEVICE);
1801
1802#if 1 || USE_IP_CSUM
1803 /* Packet is in one chunk -- we can copy + cksum. */
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001804 skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 skb_put(skb, pkt_len);
1806#else
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001807 skb_copy_from_linear_data(sp->rx_skbuff[entry],
1808 skb_put(skb, pkt_len),
1809 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810#endif
1811 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1812 sizeof(struct RxFD) + pkt_len,
1813 PCI_DMA_FROMDEVICE);
1814 npkts++;
1815 } else {
1816 /* Pass up the already-filled skbuff. */
1817 skb = sp->rx_skbuff[entry];
1818 if (skb == NULL) {
1819 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1820 dev->name);
1821 break;
1822 }
1823 sp->rx_skbuff[entry] = NULL;
1824 skb_put(skb, pkt_len);
1825 npkts++;
1826 sp->rx_ringp[entry] = NULL;
1827 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1828 PKT_BUF_SZ + sizeof(struct RxFD),
1829 PCI_DMA_FROMDEVICE);
1830 }
1831 skb->protocol = eth_type_trans(skb, dev);
1832 netif_rx(skb);
1833 dev->last_rx = jiffies;
1834 sp->stats.rx_packets++;
1835 sp->stats.rx_bytes += pkt_len;
1836 }
1837 entry = (++sp->cur_rx) % RX_RING_SIZE;
1838 sp->rx_ring_state &= ~RrPostponed;
1839 /* Refill the recently taken buffers.
1840 Do it one-by-one to handle traffic bursts better. */
1841 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1842 alloc_ok = 0;
1843 }
1844
1845 /* Try hard to refill the recently taken buffers. */
1846 speedo_refill_rx_buffers(dev, 1);
1847
1848 if (npkts)
1849 sp->last_rx_time = jiffies;
1850
1851 return 0;
1852}
1853
1854static int
1855speedo_close(struct net_device *dev)
1856{
1857 struct speedo_private *sp = netdev_priv(dev);
1858 void __iomem *ioaddr = sp->regs;
1859 int i;
1860
1861 netdevice_stop(dev);
1862 netif_stop_queue(dev);
1863
1864 if (netif_msg_ifdown(sp))
1865 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1866 dev->name, ioread16(ioaddr + SCBStatus));
1867
1868 /* Shut off the media monitoring timer. */
1869 del_timer_sync(&sp->timer);
1870
1871 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1872
1873 /* Shutting down the chip nicely fails to disable flow control. So.. */
1874 iowrite32(PortPartialReset, ioaddr + SCBPort);
1875 ioread32(ioaddr + SCBPort); /* flush posted write */
1876 /*
1877 * The chip requires a 10 microsecond quiet period. Wait here!
1878 */
1879 udelay(10);
1880
1881 free_irq(dev->irq, dev);
1882 speedo_show_state(dev);
1883
1884 /* Free all the skbuffs in the Rx and Tx queues. */
1885 for (i = 0; i < RX_RING_SIZE; i++) {
1886 struct sk_buff *skb = sp->rx_skbuff[i];
1887 sp->rx_skbuff[i] = NULL;
1888 /* Clear the Rx descriptors. */
1889 if (skb) {
1890 pci_unmap_single(sp->pdev,
1891 sp->rx_ring_dma[i],
1892 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1893 dev_kfree_skb(skb);
1894 }
1895 }
1896
1897 for (i = 0; i < TX_RING_SIZE; i++) {
1898 struct sk_buff *skb = sp->tx_skbuff[i];
1899 sp->tx_skbuff[i] = NULL;
1900 /* Clear the Tx descriptors. */
1901 if (skb) {
1902 pci_unmap_single(sp->pdev,
1903 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1904 skb->len, PCI_DMA_TODEVICE);
1905 dev_kfree_skb(skb);
1906 }
1907 }
1908
1909 /* Free multicast setting blocks. */
1910 for (i = 0; sp->mc_setup_head != NULL; i++) {
1911 struct speedo_mc_block *t;
1912 t = sp->mc_setup_head->next;
1913 kfree(sp->mc_setup_head);
1914 sp->mc_setup_head = t;
1915 }
1916 sp->mc_setup_tail = NULL;
1917 if (netif_msg_ifdown(sp))
1918 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1919
1920 pci_set_power_state(sp->pdev, PCI_D2);
1921
1922 return 0;
1923}
1924
1925/* The Speedo-3 has an especially awkward and unusable method of getting
1926 statistics out of the chip. It takes an unpredictable length of time
1927 for the dump-stats command to complete. To avoid a busy-wait loop we
1928 update the stats with the previous dump results, and then trigger a
1929 new dump.
1930
1931 Oh, and incoming frames are dropped while executing dump-stats!
1932 */
1933static struct net_device_stats *
1934speedo_get_stats(struct net_device *dev)
1935{
1936 struct speedo_private *sp = netdev_priv(dev);
1937 void __iomem *ioaddr = sp->regs;
1938
1939 /* Update only if the previous dump finished. */
1940 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1941 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1942 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1943 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1944 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1945 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1946 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1947 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1948 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1949 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1950 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1951 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1952 sp->lstats->done_marker = 0x0000;
1953 if (netif_running(dev)) {
1954 unsigned long flags;
1955 /* Take a spinlock to make wait_for_cmd_done and sending the
1956 command atomic. --SAW */
1957 spin_lock_irqsave(&sp->lock, flags);
1958 wait_for_cmd_done(dev, sp);
1959 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1960 spin_unlock_irqrestore(&sp->lock, flags);
1961 }
1962 }
1963 return &sp->stats;
1964}
1965
1966static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1967{
1968 struct speedo_private *sp = netdev_priv(dev);
1969 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1970 strncpy(info->version, version, sizeof(info->version)-1);
1971 if (sp->pdev)
1972 strcpy(info->bus_info, pci_name(sp->pdev));
1973}
1974
1975static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1976{
1977 struct speedo_private *sp = netdev_priv(dev);
1978 spin_lock_irq(&sp->lock);
1979 mii_ethtool_gset(&sp->mii_if, ecmd);
1980 spin_unlock_irq(&sp->lock);
1981 return 0;
1982}
1983
1984static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1985{
1986 struct speedo_private *sp = netdev_priv(dev);
1987 int res;
1988 spin_lock_irq(&sp->lock);
1989 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1990 spin_unlock_irq(&sp->lock);
1991 return res;
1992}
1993
1994static int speedo_nway_reset(struct net_device *dev)
1995{
1996 struct speedo_private *sp = netdev_priv(dev);
1997 return mii_nway_restart(&sp->mii_if);
1998}
1999
2000static u32 speedo_get_link(struct net_device *dev)
2001{
2002 struct speedo_private *sp = netdev_priv(dev);
2003 return mii_link_ok(&sp->mii_if);
2004}
2005
2006static u32 speedo_get_msglevel(struct net_device *dev)
2007{
2008 struct speedo_private *sp = netdev_priv(dev);
2009 return sp->msg_enable;
2010}
2011
2012static void speedo_set_msglevel(struct net_device *dev, u32 v)
2013{
2014 struct speedo_private *sp = netdev_priv(dev);
2015 sp->msg_enable = v;
2016}
2017
Jeff Garzik7282d492006-09-13 14:30:00 -04002018static const struct ethtool_ops ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 .get_drvinfo = speedo_get_drvinfo,
2020 .get_settings = speedo_get_settings,
2021 .set_settings = speedo_set_settings,
2022 .nway_reset = speedo_nway_reset,
2023 .get_link = speedo_get_link,
2024 .get_msglevel = speedo_get_msglevel,
2025 .set_msglevel = speedo_set_msglevel,
2026};
2027
2028static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2029{
2030 struct speedo_private *sp = netdev_priv(dev);
2031 struct mii_ioctl_data *data = if_mii(rq);
2032 int phy = sp->phy[0] & 0x1f;
2033 int saved_acpi;
2034 int t;
2035
2036 switch(cmd) {
2037 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2038 data->phy_id = phy;
2039
2040 case SIOCGMIIREG: /* Read MII PHY register. */
2041 /* FIXME: these operations need to be serialized with MDIO
2042 access from the timeout handler.
2043 They are currently serialized only with MDIO access from the
2044 timer routine. 2000/05/09 SAW */
2045 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2046 t = del_timer_sync(&sp->timer);
2047 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2048 if (t)
2049 add_timer(&sp->timer); /* may be set to the past --SAW */
2050 pci_set_power_state(sp->pdev, saved_acpi);
2051 return 0;
2052
2053 case SIOCSMIIREG: /* Write MII PHY register. */
2054 if (!capable(CAP_NET_ADMIN))
2055 return -EPERM;
2056 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2057 t = del_timer_sync(&sp->timer);
2058 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2059 if (t)
2060 add_timer(&sp->timer); /* may be set to the past --SAW */
2061 pci_set_power_state(sp->pdev, saved_acpi);
2062 return 0;
2063 default:
2064 return -EOPNOTSUPP;
2065 }
2066}
2067
2068/* Set or clear the multicast filter for this adaptor.
2069 This is very ugly with Intel chips -- we usually have to execute an
2070 entire configuration command, plus process a multicast command.
2071 This is complicated. We must put a large configuration command and
2072 an arbitrarily-sized multicast command in the transmit list.
2073 To minimize the disruption -- the previous command might have already
2074 loaded the link -- we convert the current command block, normally a Tx
2075 command, into a no-op and link it to the new command.
2076*/
2077static void set_rx_mode(struct net_device *dev)
2078{
2079 struct speedo_private *sp = netdev_priv(dev);
2080 void __iomem *ioaddr = sp->regs;
2081 struct descriptor *last_cmd;
2082 char new_rx_mode;
2083 unsigned long flags;
2084 int entry, i;
2085
2086 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2087 new_rx_mode = 3;
2088 } else if ((dev->flags & IFF_ALLMULTI) ||
2089 dev->mc_count > multicast_filter_limit) {
2090 new_rx_mode = 1;
2091 } else
2092 new_rx_mode = 0;
2093
2094 if (netif_msg_rx_status(sp))
2095 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2096 sp->rx_mode, new_rx_mode);
2097
2098 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2099 /* The Tx ring is full -- don't add anything! Hope the mode will be
2100 * set again later. */
2101 sp->rx_mode = -1;
2102 return;
2103 }
2104
2105 if (new_rx_mode != sp->rx_mode) {
2106 u8 *config_cmd_data;
2107
2108 spin_lock_irqsave(&sp->lock, flags);
2109 entry = sp->cur_tx++ % TX_RING_SIZE;
2110 last_cmd = sp->last_cmd;
2111 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2112
2113 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2114 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2115 sp->tx_ring[entry].link =
2116 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2117 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2118 /* Construct a full CmdConfig frame. */
2119 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2120 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2121 config_cmd_data[4] = rxdmacount;
2122 config_cmd_data[5] = txdmacount + 0x80;
2123 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2124 /* 0x80 doesn't disable FC 0x84 does.
2125 Disable Flow control since we are not ACK-ing any FC interrupts
2126 for now. --Dragan */
2127 config_cmd_data[19] = 0x84;
2128 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2129 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2130 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2131 config_cmd_data[15] |= 0x80;
2132 config_cmd_data[8] = 0;
2133 }
2134 /* Trigger the command unit resume. */
2135 wait_for_cmd_done(dev, sp);
2136 clear_suspend(last_cmd);
2137 iowrite8(CUResume, ioaddr + SCBCmd);
2138 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2139 netif_stop_queue(dev);
2140 sp->tx_full = 1;
2141 }
2142 spin_unlock_irqrestore(&sp->lock, flags);
2143 }
2144
2145 if (new_rx_mode == 0 && dev->mc_count < 4) {
2146 /* The simple case of 0-3 multicast list entries occurs often, and
2147 fits within one tx_ring[] entry. */
2148 struct dev_mc_list *mclist;
2149 u16 *setup_params, *eaddrs;
2150
2151 spin_lock_irqsave(&sp->lock, flags);
2152 entry = sp->cur_tx++ % TX_RING_SIZE;
2153 last_cmd = sp->last_cmd;
2154 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2155
2156 sp->tx_skbuff[entry] = NULL;
2157 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2158 sp->tx_ring[entry].link =
2159 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2160 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2161 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2162 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2163 /* Fill in the multicast addresses. */
2164 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2165 i++, mclist = mclist->next) {
2166 eaddrs = (u16 *)mclist->dmi_addr;
2167 *setup_params++ = *eaddrs++;
2168 *setup_params++ = *eaddrs++;
2169 *setup_params++ = *eaddrs++;
2170 }
2171
2172 wait_for_cmd_done(dev, sp);
2173 clear_suspend(last_cmd);
2174 /* Immediately trigger the command unit resume. */
2175 iowrite8(CUResume, ioaddr + SCBCmd);
2176
2177 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2178 netif_stop_queue(dev);
2179 sp->tx_full = 1;
2180 }
2181 spin_unlock_irqrestore(&sp->lock, flags);
2182 } else if (new_rx_mode == 0) {
2183 struct dev_mc_list *mclist;
2184 u16 *setup_params, *eaddrs;
2185 struct speedo_mc_block *mc_blk;
2186 struct descriptor *mc_setup_frm;
2187 int i;
2188
2189 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2190 GFP_ATOMIC);
2191 if (mc_blk == NULL) {
2192 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2193 dev->name);
2194 sp->rx_mode = -1; /* We failed, try again. */
2195 return;
2196 }
2197 mc_blk->next = NULL;
2198 mc_blk->len = 2 + multicast_filter_limit*6;
2199 mc_blk->frame_dma =
2200 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2201 PCI_DMA_TODEVICE);
2202 mc_setup_frm = &mc_blk->frame;
2203
2204 /* Fill the setup frame. */
2205 if (netif_msg_ifup(sp))
2206 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2207 dev->name, mc_setup_frm);
2208 mc_setup_frm->cmd_status =
2209 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2210 /* Link set below. */
2211 setup_params = (u16 *)&mc_setup_frm->params;
2212 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2213 /* Fill in the multicast addresses. */
2214 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2215 i++, mclist = mclist->next) {
2216 eaddrs = (u16 *)mclist->dmi_addr;
2217 *setup_params++ = *eaddrs++;
2218 *setup_params++ = *eaddrs++;
2219 *setup_params++ = *eaddrs++;
2220 }
2221
2222 /* Disable interrupts while playing with the Tx Cmd list. */
2223 spin_lock_irqsave(&sp->lock, flags);
2224
2225 if (sp->mc_setup_tail)
2226 sp->mc_setup_tail->next = mc_blk;
2227 else
2228 sp->mc_setup_head = mc_blk;
2229 sp->mc_setup_tail = mc_blk;
2230 mc_blk->tx = sp->cur_tx;
2231
2232 entry = sp->cur_tx++ % TX_RING_SIZE;
2233 last_cmd = sp->last_cmd;
2234 sp->last_cmd = mc_setup_frm;
2235
2236 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2237 sp->tx_skbuff[entry] = NULL;
2238 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2239 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2240
2241 /* Set the link in the setup frame. */
2242 mc_setup_frm->link =
2243 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2244
2245 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2246 mc_blk->len, PCI_DMA_TODEVICE);
2247
2248 wait_for_cmd_done(dev, sp);
2249 clear_suspend(last_cmd);
2250 /* Immediately trigger the command unit resume. */
2251 iowrite8(CUResume, ioaddr + SCBCmd);
2252
2253 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2254 netif_stop_queue(dev);
2255 sp->tx_full = 1;
2256 }
2257 spin_unlock_irqrestore(&sp->lock, flags);
2258
2259 if (netif_msg_rx_status(sp))
2260 printk(" CmdMCSetup frame length %d in entry %d.\n",
2261 dev->mc_count, entry);
2262 }
2263
2264 sp->rx_mode = new_rx_mode;
2265}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002266
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267#ifdef CONFIG_PM
2268static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2269{
2270 struct net_device *dev = pci_get_drvdata (pdev);
2271 struct speedo_private *sp = netdev_priv(dev);
2272 void __iomem *ioaddr = sp->regs;
2273
2274 pci_save_state(pdev);
2275
2276 if (!netif_running(dev))
2277 return 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002278
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 del_timer_sync(&sp->timer);
2280
2281 netif_device_detach(dev);
2282 iowrite32(PortPartialReset, ioaddr + SCBPort);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002283
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 /* XXX call pci_set_power_state ()? */
2285 pci_disable_device(pdev);
2286 pci_set_power_state (pdev, PCI_D3hot);
2287 return 0;
2288}
2289
2290static int eepro100_resume(struct pci_dev *pdev)
2291{
2292 struct net_device *dev = pci_get_drvdata (pdev);
2293 struct speedo_private *sp = netdev_priv(dev);
2294 void __iomem *ioaddr = sp->regs;
Jeff Garzikcad1b9d2007-07-17 00:15:54 -04002295 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
2297 pci_set_power_state(pdev, PCI_D0);
2298 pci_restore_state(pdev);
Jeff Garzikcad1b9d2007-07-17 00:15:54 -04002299
2300 rc = pci_enable_device(pdev);
2301 if (rc)
2302 return rc;
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 pci_set_master(pdev);
2305
2306 if (!netif_running(dev))
2307 return 0;
2308
2309 /* I'm absolutely uncertain if this part of code may work.
2310 The problems are:
2311 - correct hardware reinitialization;
2312 - correct driver behavior between different steps of the
2313 reinitialization;
2314 - serialization with other driver calls.
2315 2000/03/08 SAW */
2316 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2317 speedo_resume(dev);
2318 netif_device_attach(dev);
2319 sp->rx_mode = -1;
2320 sp->flow_ctrl = sp->partner = 0;
2321 set_rx_mode(dev);
2322 sp->timer.expires = RUN_AT(2*HZ);
2323 add_timer(&sp->timer);
2324 return 0;
2325}
2326#endif /* CONFIG_PM */
2327
2328static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2329{
2330 struct net_device *dev = pci_get_drvdata (pdev);
2331 struct speedo_private *sp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 unregister_netdev(dev);
2334
2335 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2336 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2337
2338 pci_iounmap(pdev, sp->regs);
2339 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2340 + sizeof(struct speedo_stats),
2341 sp->tx_ring, sp->tx_ring_dma);
2342 pci_disable_device(pdev);
2343 free_netdev(dev);
2344}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346static struct pci_device_id eepro100_pci_tbl[] = {
2347 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2348 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2362 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2363 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2364 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2365 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2366 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2367 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2368 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2369 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2370 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2371 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2372 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2373 { 0,}
2374};
2375MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377static struct pci_driver eepro100_driver = {
2378 .name = "eepro100",
2379 .id_table = eepro100_pci_tbl,
2380 .probe = eepro100_init_one,
2381 .remove = __devexit_p(eepro100_remove_one),
2382#ifdef CONFIG_PM
2383 .suspend = eepro100_suspend,
2384 .resume = eepro100_resume,
2385#endif /* CONFIG_PM */
2386};
2387
2388static int __init eepro100_init_module(void)
2389{
2390#ifdef MODULE
2391 printk(version);
2392#endif
Jeff Garzik29917622006-08-19 17:48:59 -04002393 return pci_register_driver(&eepro100_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394}
2395
2396static void __exit eepro100_cleanup_module(void)
2397{
2398 pci_unregister_driver(&eepro100_driver);
2399}
2400
2401module_init(eepro100_init_module);
2402module_exit(eepro100_cleanup_module);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002403
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404/*
2405 * Local variables:
2406 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2407 * c-indent-level: 4
2408 * c-basic-offset: 4
2409 * tab-width: 4
2410 * End:
2411 */