blob: bccb12e03c2df33c511b2d59ad6bf7d22abab094 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2/*
3 Written 1996-1999 by Donald Becker.
4
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
9
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
12
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
15
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
28*/
29
Arjan van de Venf71e1302006-03-03 21:33:57 -050030static const char * const version =
Linus Torvalds1da177e2005-04-16 15:20:36 -070031"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34/* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
36
37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41static int txdmacount = 128;
42static int rxdmacount /* = 0 */;
43
44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47# define rx_align(skb) skb_reserve((skb), 2)
48# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49#else
50# define rx_align(skb)
51# define RxFD_ALIGNMENT
52#endif
53
54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56static int rx_copybreak = 200;
57
58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59static int max_interrupt_work = 20;
60
61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62static int multicast_filter_limit = 64;
63
64/* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69/* A few values that may be tweaked. */
70/* The ring sizes should be a power of two for efficiency. */
71#define TX_RING_SIZE 64
72#define RX_RING_SIZE 64
73/* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75#define TX_MULTICAST_SIZE 2
76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77/* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80/* Hysteresis marking queue as no longer full. */
81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88#define PKT_BUF_SZ 1536
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#include <linux/module.h>
91
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
96#include <linux/slab.h>
97#include <linux/interrupt.h>
98#include <linux/timer.h>
99#include <linux/pci.h>
100#include <linux/spinlock.h>
101#include <linux/init.h>
102#include <linux/mii.h>
103#include <linux/delay.h>
104#include <linux/bitops.h>
105
106#include <asm/io.h>
107#include <asm/uaccess.h>
108#include <asm/irq.h>
109
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/rtnetlink.h>
113#include <linux/skbuff.h>
114#include <linux/ethtool.h>
115
116static int use_io;
117static int debug = -1;
118#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
119 NETIF_MSG_HW | \
120 NETIF_MSG_RX_ERR | \
121 NETIF_MSG_TX_ERR)
122#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
123
124
125MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
126MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
127MODULE_LICENSE("GPL");
128module_param(use_io, int, 0);
129module_param(debug, int, 0);
130module_param_array(options, int, NULL, 0);
131module_param_array(full_duplex, int, NULL, 0);
132module_param(congenb, int, 0);
133module_param(txfifo, int, 0);
134module_param(rxfifo, int, 0);
135module_param(txdmacount, int, 0);
136module_param(rxdmacount, int, 0);
137module_param(rx_copybreak, int, 0);
138module_param(max_interrupt_work, int, 0);
139module_param(multicast_filter_limit, int, 0);
140MODULE_PARM_DESC(debug, "debug level (0-6)");
141MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
142MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
143MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
144MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
145MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
146MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
147MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
148MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
149MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
150MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
151
152#define RUN_AT(x) (jiffies + (x))
153
154#define netdevice_start(dev)
155#define netdevice_stop(dev)
156#define netif_set_tx_timeout(dev, tf, tm) \
157 do { \
158 (dev)->tx_timeout = (tf); \
159 (dev)->watchdog_timeo = (tm); \
160 } while(0)
161
162
163
164/*
165 Theory of Operation
166
167I. Board Compatibility
168
169This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
170single-chip fast Ethernet controller for PCI, as used on the Intel
171EtherExpress Pro 100 adapter.
172
173II. Board-specific settings
174
175PCI bus devices are configured by the system at boot time, so no jumpers
176need to be set on the board. The system BIOS should be set to assign the
177PCI INTA signal to an otherwise unused system IRQ line. While it's
178possible to share PCI interrupt lines, it negatively impacts performance and
179only recent kernels support it.
180
181III. Driver operation
182
183IIIA. General
184The Speedo3 is very similar to other Intel network chips, that is to say
185"apparently designed on a different planet". This chips retains the complex
186Rx and Tx descriptors and multiple buffers pointers as previous chips, but
187also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
188Tx mode, but in a simplified lower-overhead manner: it associates only a
189single buffer descriptor with each frame descriptor.
190
191Despite the extra space overhead in each receive skbuff, the driver must use
192the simplified Rx buffer mode to assure that only a single data buffer is
193associated with each RxFD. The driver implements this by reserving space
194for the Rx descriptor at the head of each Rx skbuff.
195
196The Speedo-3 has receive and command unit base addresses that are added to
197almost all descriptor pointers. The driver sets these to zero, so that all
198pointer fields are absolute addresses.
199
200The System Control Block (SCB) of some previous Intel chips exists on the
201chip in both PCI I/O and memory space. This driver uses the I/O space
202registers, but might switch to memory mapped mode to better support non-x86
203processors.
204
205IIIB. Transmit structure
206
207The driver must use the complex Tx command+descriptor mode in order to
208have a indirect pointer to the skbuff data section. Each Tx command block
209(TxCB) is associated with two immediately appended Tx Buffer Descriptor
210(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
211speedo_private data structure for each adapter instance.
212
213The newer i82558 explicitly supports this structure, and can read the two
214TxBDs in the same PCI burst as the TxCB.
215
216This ring structure is used for all normal transmit packets, but the
217transmit packet descriptors aren't long enough for most non-Tx commands such
218as CmdConfigure. This is complicated by the possibility that the chip has
219already loaded the link address in the previous descriptor. So for these
220commands we convert the next free descriptor on the ring to a NoOp, and point
221that descriptor's link to the complex command.
222
223An additional complexity of these non-transmit commands are that they may be
224added asynchronous to the normal transmit queue, so we disable interrupts
225whenever the Tx descriptor ring is manipulated.
226
227A notable aspect of these special configure commands is that they do
228work with the normal Tx ring entry scavenge method. The Tx ring scavenge
229is done at interrupt time using the 'dirty_tx' index, and checking for the
230command-complete bit. While the setup frames may have the NoOp command on the
231Tx ring marked as complete, but not have completed the setup command, this
232is not a problem. The tx_ring entry can be still safely reused, as the
233tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
234
235Commands may have bits set e.g. CmdSuspend in the command word to either
236suspend or stop the transmit/command unit. This driver always flags the last
237command with CmdSuspend, erases the CmdSuspend in the previous command, and
238then issues a CU_RESUME.
239Note: Watch out for the potential race condition here: imagine
240 erasing the previous suspend
241 the chip processes the previous command
242 the chip processes the final command, and suspends
243 doing the CU_RESUME
244 the chip processes the next-yet-valid post-final-command.
245So blindly sending a CU_RESUME is only safe if we do it immediately after
246after erasing the previous CmdSuspend, without the possibility of an
247intervening delay. Thus the resume command is always within the
248interrupts-disabled region. This is a timing dependence, but handling this
249condition in a timing-independent way would considerably complicate the code.
250
251Note: In previous generation Intel chips, restarting the command unit was a
252notoriously slow process. This is presumably no longer true.
253
254IIIC. Receive structure
255
256Because of the bus-master support on the Speedo3 this driver uses the new
257SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
258This scheme allocates full-sized skbuffs as receive buffers. The value
259SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
260trade-off the memory wasted by passing the full-sized skbuff to the queue
261layer for all frames vs. the copying cost of copying a frame to a
262correctly-sized skbuff.
263
264For small frames the copying cost is negligible (esp. considering that we
265are pre-loading the cache with immediately useful header information), so we
266allocate a new, minimally-sized skbuff. For large frames the copying cost
267is non-trivial, and the larger copy might flush the cache of useful data, so
268we pass up the skbuff the packet was received into.
269
270IV. Notes
271
272Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
273that stated that I could disclose the information. But I still resent
274having to sign an Intel NDA when I'm helping Intel sell their own product!
275
276*/
277
278static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280/* Offsets to the various registers.
281 All accesses need not be longword aligned. */
282enum speedo_offsets {
283 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
284 SCBIntmask = 3,
285 SCBPointer = 4, /* General purpose pointer. */
286 SCBPort = 8, /* Misc. commands and operands. */
287 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
288 SCBCtrlMDI = 16, /* MDI interface control. */
289 SCBEarlyRx = 20, /* Early receive byte count. */
290};
291/* Commands that can be put in a command list entry. */
292enum commands {
293 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
294 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
295 CmdDump = 0x60000, CmdDiagnose = 0x70000,
296 CmdSuspend = 0x40000000, /* Suspend after completion. */
297 CmdIntr = 0x20000000, /* Interrupt after completion. */
298 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
299};
300/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
301 status bits. Previous driver versions used separate 16 bit fields for
302 commands and statuses. --SAW
303 */
304#if defined(__alpha__)
305# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
306#else
307# if defined(__LITTLE_ENDIAN)
308# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
309# elif defined(__BIG_ENDIAN)
310# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
311# else
312# error Unsupported byteorder
313# endif
314#endif
315
316enum SCBCmdBits {
317 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
318 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
319 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
320 /* The rest are Rx and Tx commands. */
321 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
322 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
323 CUDumpStats=0x0070, /* Dump then reset stats counters. */
324 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
325 RxResumeNoResources=0x0007,
326};
327
328enum SCBPort_cmds {
329 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
330};
331
332/* The Speedo3 Rx and Tx frame/buffer descriptors. */
333struct descriptor { /* A generic descriptor. */
334 volatile s32 cmd_status; /* All command and status fields. */
335 u32 link; /* struct descriptor * */
336 unsigned char params[0];
337};
338
339/* The Speedo3 Rx and Tx buffer descriptors. */
340struct RxFD { /* Receive frame descriptor. */
341 volatile s32 status;
342 u32 link; /* struct RxFD * */
343 u32 rx_buf_addr; /* void * */
344 u32 count;
345} RxFD_ALIGNMENT;
346
347/* Selected elements of the Tx/RxFD.status word. */
348enum RxFD_bits {
349 RxComplete=0x8000, RxOK=0x2000,
350 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
351 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
352 TxUnderrun=0x1000, StatusComplete=0x8000,
353};
354
355#define CONFIG_DATA_SIZE 22
356struct TxFD { /* Transmit frame descriptor set. */
357 s32 status;
358 u32 link; /* void * */
359 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
360 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
361 /* This constitutes two "TBD" entries -- we only use one. */
362#define TX_DESCR_BUF_OFFSET 16
363 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
364 s32 tx_buf_size0; /* Length of Tx frame. */
365 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
366 s32 tx_buf_size1; /* Length of Tx frame. */
367 /* the structure must have space for at least CONFIG_DATA_SIZE starting
368 * from tx_desc_addr field */
369};
370
371/* Multicast filter setting block. --SAW */
372struct speedo_mc_block {
373 struct speedo_mc_block *next;
374 unsigned int tx;
375 dma_addr_t frame_dma;
376 unsigned int len;
377 struct descriptor frame __attribute__ ((__aligned__(16)));
378};
379
380/* Elements of the dump_statistics block. This block must be lword aligned. */
381struct speedo_stats {
382 u32 tx_good_frames;
383 u32 tx_coll16_errs;
384 u32 tx_late_colls;
385 u32 tx_underruns;
386 u32 tx_lost_carrier;
387 u32 tx_deferred;
388 u32 tx_one_colls;
389 u32 tx_multi_colls;
390 u32 tx_total_colls;
391 u32 rx_good_frames;
392 u32 rx_crc_errs;
393 u32 rx_align_errs;
394 u32 rx_resource_errs;
395 u32 rx_overrun_errs;
396 u32 rx_colls_errs;
397 u32 rx_runt_errs;
398 u32 done_marker;
399};
400
401enum Rx_ring_state_bits {
402 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
403};
404
405/* Do not change the position (alignment) of the first few elements!
406 The later elements are grouped for cache locality.
407
408 Unfortunately, all the positions have been shifted since there.
409 A new re-alignment is required. 2000/03/06 SAW */
410struct speedo_private {
411 void __iomem *regs;
412 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
413 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
414 /* The addresses of a Tx/Rx-in-place packets/buffers. */
415 struct sk_buff *tx_skbuff[TX_RING_SIZE];
416 struct sk_buff *rx_skbuff[RX_RING_SIZE];
417 /* Mapped addresses of the rings. */
418 dma_addr_t tx_ring_dma;
419#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
420 dma_addr_t rx_ring_dma[RX_RING_SIZE];
421 struct descriptor *last_cmd; /* Last command sent. */
422 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
423 spinlock_t lock; /* Group with Tx control cache line. */
424 u32 tx_threshold; /* The value for txdesc.count. */
425 struct RxFD *last_rxf; /* Last filled RX buffer. */
426 dma_addr_t last_rxf_dma;
427 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
428 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
429 struct net_device_stats stats;
430 struct speedo_stats *lstats;
431 dma_addr_t lstats_dma;
432 int chip_id;
433 struct pci_dev *pdev;
434 struct timer_list timer; /* Media selection timer. */
435 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
436 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
437 long in_interrupt; /* Word-aligned dev->interrupt */
438 unsigned char acpi_pwr;
439 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
440 unsigned int tx_full:1; /* The Tx queue is full. */
441 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
442 unsigned int rx_bug:1; /* Work around receiver hang errata. */
443 unsigned char default_port:8; /* Last dev->if_port value. */
444 unsigned char rx_ring_state; /* RX ring status flags. */
445 unsigned short phy[2]; /* PHY media interfaces available. */
446 unsigned short partner; /* Link partner caps. */
447 struct mii_if_info mii_if; /* MII API hooks, info */
448 u32 msg_enable; /* debug message level */
449};
450
451/* The parameters for a CmdConfigure operation.
452 There are so many options that it would be difficult to document each bit.
453 We mostly use the default or recommended settings. */
454static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
455 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
456 0, 0x2E, 0, 0x60, 0,
457 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
458 0x3f, 0x05, };
459static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
460 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
461 0, 0x2E, 0, 0x60, 0x08, 0x88,
462 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
463 0x31, 0x05, };
464
465/* PHY media interface chips. */
Arjan van de Venf71e1302006-03-03 21:33:57 -0500466static const char * const phys[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 "None", "i82553-A/B", "i82553-C", "i82503",
468 "DP83840", "80c240", "80c24", "i82555",
469 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
470 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
471enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
472 S80C24, I82555, DP83840A=10, };
473static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
474#define EE_READ_CMD (6)
475
476static int eepro100_init_one(struct pci_dev *pdev,
477 const struct pci_device_id *ent);
478
479static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
480static int mdio_read(struct net_device *dev, int phy_id, int location);
481static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
482static int speedo_open(struct net_device *dev);
483static void speedo_resume(struct net_device *dev);
484static void speedo_timer(unsigned long data);
485static void speedo_init_rx_ring(struct net_device *dev);
486static void speedo_tx_timeout(struct net_device *dev);
487static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
488static void speedo_refill_rx_buffers(struct net_device *dev, int force);
489static int speedo_rx(struct net_device *dev);
490static void speedo_tx_buffer_gc(struct net_device *dev);
491static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
492static int speedo_close(struct net_device *dev);
493static struct net_device_stats *speedo_get_stats(struct net_device *dev);
494static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
495static void set_rx_mode(struct net_device *dev);
496static void speedo_show_state(struct net_device *dev);
497static struct ethtool_ops ethtool_ops;
498
499
500
501#ifdef honor_default_port
502/* Optional driver feature to allow forcing the transceiver setting.
503 Not recommended. */
504static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
505 0x2000, 0x2100, 0x0400, 0x3100};
506#endif
507
508/* How to wait for the command unit to accept a command.
509 Typically this takes 0 ticks. */
510static inline unsigned char wait_for_cmd_done(struct net_device *dev,
511 struct speedo_private *sp)
512{
513 int wait = 1000;
514 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
515 unsigned char r;
516
517 do {
518 udelay(1);
519 r = ioread8(cmd_ioaddr);
520 } while(r && --wait >= 0);
521
522 if (wait < 0)
523 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
524 return r;
525}
526
527static int __devinit eepro100_init_one (struct pci_dev *pdev,
528 const struct pci_device_id *ent)
529{
530 void __iomem *ioaddr;
531 int irq, pci_bar;
532 int acpi_idle_state = 0, pm;
533 static int cards_found /* = 0 */;
534 unsigned long pci_base;
535
536#ifndef MODULE
537 /* when built-in, we only print version if device is found */
538 static int did_version;
539 if (did_version++ == 0)
540 printk(version);
541#endif
542
543 /* save power state before pci_enable_device overwrites it */
544 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
545 if (pm) {
546 u16 pwr_command;
547 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
548 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
549 }
550
551 if (pci_enable_device(pdev))
552 goto err_out_free_mmio_region;
553
554 pci_set_master(pdev);
555
556 if (!request_region(pci_resource_start(pdev, 1),
557 pci_resource_len(pdev, 1), "eepro100")) {
Jeff Garzik2e8a5382006-06-27 10:47:51 -0400558 dev_printk (KERN_ERR, &pdev->dev,
559 "eepro100: cannot reserve I/O ports\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 goto err_out_none;
561 }
562 if (!request_mem_region(pci_resource_start(pdev, 0),
563 pci_resource_len(pdev, 0), "eepro100")) {
Jeff Garzik2e8a5382006-06-27 10:47:51 -0400564 dev_printk (KERN_ERR, &pdev->dev,
565 "eepro100: cannot reserve MMIO region\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 goto err_out_free_pio_region;
567 }
568
569 irq = pdev->irq;
570 pci_bar = use_io ? 1 : 0;
571 pci_base = pci_resource_start(pdev, pci_bar);
572 if (DEBUG & NETIF_MSG_PROBE)
573 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
574 pci_base, irq);
575
576 ioaddr = pci_iomap(pdev, pci_bar, 0);
577 if (!ioaddr) {
Jeff Garzik2e8a5382006-06-27 10:47:51 -0400578 dev_printk (KERN_ERR, &pdev->dev, "eepro100: cannot remap IO\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 goto err_out_free_mmio_region;
580 }
581
582 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
583 cards_found++;
584 else
585 goto err_out_iounmap;
586
587 return 0;
588
589err_out_iounmap: ;
590 pci_iounmap(pdev, ioaddr);
591err_out_free_mmio_region:
592 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
593err_out_free_pio_region:
594 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
595err_out_none:
596 return -ENODEV;
597}
598
599#ifdef CONFIG_NET_POLL_CONTROLLER
600/*
601 * Polling 'interrupt' - used by things like netconsole to send skbs
602 * without having to re-enable interrupts. It's not called while
603 * the interrupt routine is executing.
604 */
605
606static void poll_speedo (struct net_device *dev)
607{
608 /* disable_irq is not very nice, but with the funny lockless design
609 we have no other choice. */
610 disable_irq(dev->irq);
611 speedo_interrupt (dev->irq, dev, NULL);
612 enable_irq(dev->irq);
613}
614#endif
615
616static int __devinit speedo_found1(struct pci_dev *pdev,
617 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
618{
619 struct net_device *dev;
620 struct speedo_private *sp;
621 const char *product;
622 int i, option;
623 u16 eeprom[0x100];
624 int size;
625 void *tx_ring_space;
626 dma_addr_t tx_ring_dma;
627
628 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
629 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
630 if (tx_ring_space == NULL)
631 return -1;
632
633 dev = alloc_etherdev(sizeof(struct speedo_private));
634 if (dev == NULL) {
635 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
636 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
637 return -1;
638 }
639
640 SET_MODULE_OWNER(dev);
641 SET_NETDEV_DEV(dev, &pdev->dev);
642
643 if (dev->mem_start > 0)
644 option = dev->mem_start;
645 else if (card_idx >= 0 && options[card_idx] >= 0)
646 option = options[card_idx];
647 else
648 option = 0;
649
650 rtnl_lock();
651 if (dev_alloc_name(dev, dev->name) < 0)
652 goto err_free_unlock;
653
654 /* Read the station address EEPROM before doing the reset.
655 Nominally his should even be done before accepting the device, but
656 then we wouldn't have a device name with which to report the error.
657 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
658 */
659 {
660 void __iomem *iobase;
661 int read_cmd, ee_size;
662 u16 sum;
663 int j;
664
665 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
666 requirements. */
667 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
668 if (!iobase)
669 goto err_free_unlock;
670 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
671 == 0xffe0000) {
672 ee_size = 0x100;
673 read_cmd = EE_READ_CMD << 24;
674 } else {
675 ee_size = 0x40;
676 read_cmd = EE_READ_CMD << 22;
677 }
678
679 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
680 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
681 eeprom[i] = value;
682 sum += value;
683 if (i < 3) {
684 dev->dev_addr[j++] = value;
685 dev->dev_addr[j++] = value >> 8;
686 }
687 }
688 if (sum != 0xBABA)
689 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
690 "check settings before activating this device!\n",
691 dev->name, sum);
692 /* Don't unregister_netdev(dev); as the EEPro may actually be
693 usable, especially if the MAC address is set later.
694 On the other hand, it may be unusable if MDI data is corrupted. */
695
696 pci_iounmap(pdev, iobase);
697 }
698
699 /* Reset the chip: stop Tx and Rx processes and clear counters.
700 This takes less than 10usec and will easily finish before the next
701 action. */
702 iowrite32(PortReset, ioaddr + SCBPort);
703 ioread32(ioaddr + SCBPort);
704 udelay(10);
705
706 if (eeprom[3] & 0x0100)
707 product = "OEM i82557/i82558 10/100 Ethernet";
708 else
709 product = pci_name(pdev);
710
711 printk(KERN_INFO "%s: %s, ", dev->name, product);
712
713 for (i = 0; i < 5; i++)
714 printk("%2.2X:", dev->dev_addr[i]);
715 printk("%2.2X, ", dev->dev_addr[i]);
716 printk("IRQ %d.\n", pdev->irq);
717
718 sp = netdev_priv(dev);
719
720 /* we must initialize this early, for mdio_{read,write} */
721 sp->regs = ioaddr;
722
723#if 1 || defined(kernel_bloat)
724 /* OK, this is pure kernel bloat. I don't like it when other drivers
725 waste non-pageable kernel space to emit similar messages, but I need
726 them for bug reports. */
727 {
728 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
729 /* The self-test results must be paragraph aligned. */
730 volatile s32 *self_test_results;
731 int boguscnt = 16000; /* Timeout for set-test. */
732 if ((eeprom[3] & 0x03) != 0x03)
733 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
734 " work-around.\n");
735 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
736 " connectors present:",
737 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
738 for (i = 0; i < 4; i++)
739 if (eeprom[5] & (1<<i))
740 printk(connectors[i]);
741 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
742 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
743 if (eeprom[7] & 0x0700)
744 printk(KERN_INFO " Secondary interface chip %s.\n",
745 phys[(eeprom[7]>>8)&7]);
746 if (((eeprom[6]>>8) & 0x3f) == DP83840
747 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
748 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
749 if (congenb)
750 mdi_reg23 |= 0x0100;
751 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
752 mdi_reg23);
753 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
754 }
755 if ((option >= 0) && (option & 0x70)) {
756 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
757 (option & 0x20 ? 100 : 10),
758 (option & 0x10 ? "full" : "half"));
759 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
760 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
761 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
762 }
763
764 /* Perform a system self-test. */
765 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
766 self_test_results[0] = 0;
767 self_test_results[1] = -1;
768 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
769 do {
770 udelay(10);
771 } while (self_test_results[1] == -1 && --boguscnt >= 0);
772
773 if (boguscnt < 0) { /* Test optimized out. */
774 printk(KERN_ERR "Self test failed, status %8.8x:\n"
775 KERN_ERR " Failure to initialize the i82557.\n"
776 KERN_ERR " Verify that the card is a bus-master"
777 " capable slot.\n",
778 self_test_results[1]);
779 } else
780 printk(KERN_INFO " General self-test: %s.\n"
781 KERN_INFO " Serial sub-system self-test: %s.\n"
782 KERN_INFO " Internal registers self-test: %s.\n"
783 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
784 self_test_results[1] & 0x1000 ? "failed" : "passed",
785 self_test_results[1] & 0x0020 ? "failed" : "passed",
786 self_test_results[1] & 0x0008 ? "failed" : "passed",
787 self_test_results[1] & 0x0004 ? "failed" : "passed",
788 self_test_results[0]);
789 }
790#endif /* kernel_bloat */
791
792 iowrite32(PortReset, ioaddr + SCBPort);
793 ioread32(ioaddr + SCBPort);
794 udelay(10);
795
796 /* Return the chip to its original power state. */
797 pci_set_power_state(pdev, acpi_idle_state);
798
799 pci_set_drvdata (pdev, dev);
800 SET_NETDEV_DEV(dev, &pdev->dev);
801
802 dev->irq = pdev->irq;
803
804 sp->pdev = pdev;
805 sp->msg_enable = DEBUG;
806 sp->acpi_pwr = acpi_idle_state;
807 sp->tx_ring = tx_ring_space;
808 sp->tx_ring_dma = tx_ring_dma;
809 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
810 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
811 init_timer(&sp->timer); /* used in ioctl() */
812 spin_lock_init(&sp->lock);
813
814 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
815 if (card_idx >= 0) {
816 if (full_duplex[card_idx] >= 0)
817 sp->mii_if.full_duplex = full_duplex[card_idx];
818 }
819 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
820
821 sp->phy[0] = eeprom[6];
822 sp->phy[1] = eeprom[7];
823
824 sp->mii_if.phy_id = eeprom[6] & 0x1f;
825 sp->mii_if.phy_id_mask = 0x1f;
826 sp->mii_if.reg_num_mask = 0x1f;
827 sp->mii_if.dev = dev;
828 sp->mii_if.mdio_read = mdio_read;
829 sp->mii_if.mdio_write = mdio_write;
830
831 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
832 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
833 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
834 || (pdev->device == 0x245D)) {
835 sp->chip_id = 1;
836 }
837
838 if (sp->rx_bug)
839 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
840
841 /* The Speedo-specific entries in the device structure. */
842 dev->open = &speedo_open;
843 dev->hard_start_xmit = &speedo_start_xmit;
844 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
845 dev->stop = &speedo_close;
846 dev->get_stats = &speedo_get_stats;
847 dev->set_multicast_list = &set_rx_mode;
848 dev->do_ioctl = &speedo_ioctl;
849 SET_ETHTOOL_OPS(dev, &ethtool_ops);
850#ifdef CONFIG_NET_POLL_CONTROLLER
851 dev->poll_controller = &poll_speedo;
852#endif
853
854 if (register_netdevice(dev))
855 goto err_free_unlock;
856 rtnl_unlock();
857
858 return 0;
859
860 err_free_unlock:
861 rtnl_unlock();
862 free_netdev(dev);
863 return -1;
864}
865
866static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
867{
868 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
869 int wait = 0;
870 do
871 if (ioread8(cmd_ioaddr) == 0) break;
872 while(++wait <= 200);
873 if (wait > 100)
874 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
875 ioread8(cmd_ioaddr), wait);
876
877 iowrite8(cmd, cmd_ioaddr);
878
879 for (wait = 0; wait <= 100; wait++)
880 if (ioread8(cmd_ioaddr) == 0) return;
881 for (; wait <= 20000; wait++)
882 if (ioread8(cmd_ioaddr) == 0) return;
883 else udelay(1);
884 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
885 " Current status %8.8x.\n",
886 cmd, wait, ioread32(sp->regs + SCBStatus));
887}
888
889/* Serial EEPROM section.
890 A "bit" grungy, but we work our way through bit-by-bit :->. */
891/* EEPROM_Ctrl bits. */
892#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
893#define EE_CS 0x02 /* EEPROM chip select. */
894#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
895#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
896#define EE_ENB (0x4800 | EE_CS)
897#define EE_WRITE_0 0x4802
898#define EE_WRITE_1 0x4806
899#define EE_OFFSET SCBeeprom
900
901/* The fixes for the code were kindly provided by Dragan Stancevic
902 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
903 access timing.
904 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
905 interval for serial EEPROM. However, it looks like that there is an
906 additional requirement dictating larger udelay's in the code below.
907 2000/05/24 SAW */
908static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
909{
910 unsigned retval = 0;
911 void __iomem *ee_addr = ioaddr + SCBeeprom;
912
913 iowrite16(EE_ENB, ee_addr); udelay(2);
914 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
915
916 /* Shift the command bits out. */
917 do {
918 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
919 iowrite16(dataval, ee_addr); udelay(2);
920 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
921 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
922 } while (--cmd_len >= 0);
923 iowrite16(EE_ENB, ee_addr); udelay(2);
924
925 /* Terminate the EEPROM access. */
926 iowrite16(EE_ENB & ~EE_CS, ee_addr);
927 return retval;
928}
929
930static int mdio_read(struct net_device *dev, int phy_id, int location)
931{
932 struct speedo_private *sp = netdev_priv(dev);
933 void __iomem *ioaddr = sp->regs;
934 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
935 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
936 do {
937 val = ioread32(ioaddr + SCBCtrlMDI);
938 if (--boguscnt < 0) {
939 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
940 break;
941 }
942 } while (! (val & 0x10000000));
943 return val & 0xffff;
944}
945
946static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
947{
948 struct speedo_private *sp = netdev_priv(dev);
949 void __iomem *ioaddr = sp->regs;
950 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
951 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
952 ioaddr + SCBCtrlMDI);
953 do {
954 val = ioread32(ioaddr + SCBCtrlMDI);
955 if (--boguscnt < 0) {
956 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
957 break;
958 }
959 } while (! (val & 0x10000000));
960}
961
962static int
963speedo_open(struct net_device *dev)
964{
965 struct speedo_private *sp = netdev_priv(dev);
966 void __iomem *ioaddr = sp->regs;
967 int retval;
968
969 if (netif_msg_ifup(sp))
970 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
971
972 pci_set_power_state(sp->pdev, PCI_D0);
973
974 /* Set up the Tx queue early.. */
975 sp->cur_tx = 0;
976 sp->dirty_tx = 0;
977 sp->last_cmd = NULL;
978 sp->tx_full = 0;
979 sp->in_interrupt = 0;
980
981 /* .. we can safely take handler calls during init. */
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700982 retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 if (retval) {
984 return retval;
985 }
986
987 dev->if_port = sp->default_port;
988
989#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
990 /* Retrigger negotiation to reset previous errors. */
991 if ((sp->phy[0] & 0x8000) == 0) {
992 int phy_addr = sp->phy[0] & 0x1f ;
993 /* Use 0x3300 for restarting NWay, other values to force xcvr:
994 0x0000 10-HD
995 0x0100 10-FD
996 0x2000 100-HD
997 0x2100 100-FD
998 */
999#ifdef honor_default_port
1000 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1001#else
1002 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1003#endif
1004 }
1005#endif
1006
1007 speedo_init_rx_ring(dev);
1008
1009 /* Fire up the hardware. */
1010 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1011 speedo_resume(dev);
1012
1013 netdevice_start(dev);
1014 netif_start_queue(dev);
1015
1016 /* Setup the chip and configure the multicast list. */
1017 sp->mc_setup_head = NULL;
1018 sp->mc_setup_tail = NULL;
1019 sp->flow_ctrl = sp->partner = 0;
1020 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1021 set_rx_mode(dev);
1022 if ((sp->phy[0] & 0x8000) == 0)
1023 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1024
1025 mii_check_link(&sp->mii_if);
1026
1027 if (netif_msg_ifup(sp)) {
1028 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1029 dev->name, ioread16(ioaddr + SCBStatus));
1030 }
1031
1032 /* Set the timer. The timer serves a dual purpose:
1033 1) to monitor the media interface (e.g. link beat) and perhaps switch
1034 to an alternate media type
1035 2) to monitor Rx activity, and restart the Rx process if the receiver
1036 hangs. */
1037 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1038 sp->timer.data = (unsigned long)dev;
1039 sp->timer.function = &speedo_timer; /* timer handler */
1040 add_timer(&sp->timer);
1041
1042 /* No need to wait for the command unit to accept here. */
1043 if ((sp->phy[0] & 0x8000) == 0)
1044 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1045
1046 return 0;
1047}
1048
1049/* Start the chip hardware after a full reset. */
1050static void speedo_resume(struct net_device *dev)
1051{
1052 struct speedo_private *sp = netdev_priv(dev);
1053 void __iomem *ioaddr = sp->regs;
1054
1055 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1056 sp->tx_threshold = 0x01208000;
1057
1058 /* Set the segment registers to '0'. */
1059 if (wait_for_cmd_done(dev, sp) != 0) {
1060 iowrite32(PortPartialReset, ioaddr + SCBPort);
1061 udelay(10);
1062 }
1063
1064 iowrite32(0, ioaddr + SCBPointer);
1065 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1066 udelay(10); /* Bogus, but it avoids the bug. */
1067
1068 /* Note: these next two operations can take a while. */
1069 do_slow_command(dev, sp, RxAddrLoad);
1070 do_slow_command(dev, sp, CUCmdBase);
1071
1072 /* Load the statistics block and rx ring addresses. */
1073 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1074 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1075
1076 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1077 sp->lstats->done_marker = 0;
1078 wait_for_cmd_done(dev, sp);
1079
1080 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1081 if (netif_msg_rx_err(sp))
1082 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1083 dev->name);
1084 } else {
1085 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1086 ioaddr + SCBPointer);
1087 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1088 }
1089
1090 /* Note: RxStart should complete instantly. */
1091 do_slow_command(dev, sp, RxStart);
1092 do_slow_command(dev, sp, CUDumpStats);
1093
1094 /* Fill the first command with our physical address. */
1095 {
1096 struct descriptor *ias_cmd;
1097
1098 ias_cmd =
1099 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1100 /* Avoid a bug(?!) here by marking the command already completed. */
1101 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1102 ias_cmd->link =
1103 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1104 memcpy(ias_cmd->params, dev->dev_addr, 6);
1105 if (sp->last_cmd)
1106 clear_suspend(sp->last_cmd);
1107 sp->last_cmd = ias_cmd;
1108 }
1109
1110 /* Start the chip's Tx process and unmask interrupts. */
1111 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1112 ioaddr + SCBPointer);
1113 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1114 remain masked --Dragan */
1115 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1116}
1117
1118/*
1119 * Sometimes the receiver stops making progress. This routine knows how to
1120 * get it going again, without losing packets or being otherwise nasty like
1121 * a chip reset would be. Previously the driver had a whole sequence
1122 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1123 * do another, etc. But those things don't really matter. Separate logic
1124 * in the ISR provides for allocating buffers--the other half of operation
1125 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1126 * This problem with the old, more involved algorithm is shown up under
1127 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1128 */
1129static void
1130speedo_rx_soft_reset(struct net_device *dev)
1131{
1132 struct speedo_private *sp = netdev_priv(dev);
1133 struct RxFD *rfd;
1134 void __iomem *ioaddr;
1135
1136 ioaddr = sp->regs;
1137 if (wait_for_cmd_done(dev, sp) != 0) {
1138 printk("%s: previous command stalled\n", dev->name);
1139 return;
1140 }
1141 /*
1142 * Put the hardware into a known state.
1143 */
1144 iowrite8(RxAbort, ioaddr + SCBCmd);
1145
1146 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1147
1148 rfd->rx_buf_addr = 0xffffffff;
1149
1150 if (wait_for_cmd_done(dev, sp) != 0) {
1151 printk("%s: RxAbort command stalled\n", dev->name);
1152 return;
1153 }
1154 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1155 ioaddr + SCBPointer);
1156 iowrite8(RxStart, ioaddr + SCBCmd);
1157}
1158
1159
1160/* Media monitoring and control. */
1161static void speedo_timer(unsigned long data)
1162{
1163 struct net_device *dev = (struct net_device *)data;
1164 struct speedo_private *sp = netdev_priv(dev);
1165 void __iomem *ioaddr = sp->regs;
1166 int phy_num = sp->phy[0] & 0x1f;
1167
1168 /* We have MII and lost link beat. */
1169 if ((sp->phy[0] & 0x8000) == 0) {
1170 int partner = mdio_read(dev, phy_num, MII_LPA);
1171 if (partner != sp->partner) {
1172 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1173 if (netif_msg_link(sp)) {
1174 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1175 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1176 dev->name, sp->partner, partner, sp->mii_if.advertising);
1177 }
1178 sp->partner = partner;
1179 if (flow_ctrl != sp->flow_ctrl) {
1180 sp->flow_ctrl = flow_ctrl;
1181 sp->rx_mode = -1; /* Trigger a reload. */
1182 }
1183 }
1184 }
1185 mii_check_link(&sp->mii_if);
1186 if (netif_msg_timer(sp)) {
1187 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1188 dev->name, ioread16(ioaddr + SCBStatus));
1189 }
1190 if (sp->rx_mode < 0 ||
1191 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1192 /* We haven't received a packet in a Long Time. We might have been
1193 bitten by the receiver hang bug. This can be cleared by sending
1194 a set multicast list command. */
1195 if (netif_msg_timer(sp))
1196 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1197 " from a timer routine,"
1198 " m=%d, j=%ld, l=%ld.\n",
1199 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1200 set_rx_mode(dev);
1201 }
1202 /* We must continue to monitor the media. */
1203 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1204 add_timer(&sp->timer);
1205}
1206
1207static void speedo_show_state(struct net_device *dev)
1208{
1209 struct speedo_private *sp = netdev_priv(dev);
1210 int i;
1211
1212 if (netif_msg_pktdata(sp)) {
1213 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1214 dev->name, sp->cur_tx, sp->dirty_tx);
1215 for (i = 0; i < TX_RING_SIZE; i++)
1216 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1217 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1218 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1219 i, sp->tx_ring[i].status);
1220
1221 printk(KERN_DEBUG "%s: Printing Rx ring"
1222 " (next to receive into %u, dirty index %u).\n",
1223 dev->name, sp->cur_rx, sp->dirty_rx);
1224 for (i = 0; i < RX_RING_SIZE; i++)
1225 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1226 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1227 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1228 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1229 i, (sp->rx_ringp[i] != NULL) ?
1230 (unsigned)sp->rx_ringp[i]->status : 0);
1231 }
1232
1233#if 0
1234 {
1235 void __iomem *ioaddr = sp->regs;
1236 int phy_num = sp->phy[0] & 0x1f;
1237 for (i = 0; i < 16; i++) {
1238 /* FIXME: what does it mean? --SAW */
1239 if (i == 6) i = 21;
1240 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1241 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1242 }
1243 }
1244#endif
1245
1246}
1247
1248/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1249static void
1250speedo_init_rx_ring(struct net_device *dev)
1251{
1252 struct speedo_private *sp = netdev_priv(dev);
1253 struct RxFD *rxf, *last_rxf = NULL;
1254 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1255 int i;
1256
1257 sp->cur_rx = 0;
1258
1259 for (i = 0; i < RX_RING_SIZE; i++) {
1260 struct sk_buff *skb;
1261 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
Jeff Garzik9f7f0092005-08-19 03:52:49 -04001262 if (skb)
1263 rx_align(skb); /* Align IP on 16 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 sp->rx_skbuff[i] = skb;
1265 if (skb == NULL)
1266 break; /* OK. Just initially short of Rx bufs. */
1267 skb->dev = dev; /* Mark as being used by this device. */
David S. Miller689be432005-06-28 15:25:31 -07001268 rxf = (struct RxFD *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 sp->rx_ringp[i] = rxf;
1270 sp->rx_ring_dma[i] =
1271 pci_map_single(sp->pdev, rxf,
1272 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1273 skb_reserve(skb, sizeof(struct RxFD));
1274 if (last_rxf) {
1275 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1276 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1277 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1278 }
1279 last_rxf = rxf;
1280 last_rxf_dma = sp->rx_ring_dma[i];
1281 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1282 rxf->link = 0; /* None yet. */
1283 /* This field unused by i82557. */
1284 rxf->rx_buf_addr = 0xffffffff;
1285 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1286 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1287 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1288 }
1289 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1290 /* Mark the last entry as end-of-list. */
1291 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1292 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1293 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1294 sp->last_rxf = last_rxf;
1295 sp->last_rxf_dma = last_rxf_dma;
1296}
1297
1298static void speedo_purge_tx(struct net_device *dev)
1299{
1300 struct speedo_private *sp = netdev_priv(dev);
1301 int entry;
1302
1303 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1304 entry = sp->dirty_tx % TX_RING_SIZE;
1305 if (sp->tx_skbuff[entry]) {
1306 sp->stats.tx_errors++;
1307 pci_unmap_single(sp->pdev,
1308 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1309 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1310 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1311 sp->tx_skbuff[entry] = NULL;
1312 }
1313 sp->dirty_tx++;
1314 }
1315 while (sp->mc_setup_head != NULL) {
1316 struct speedo_mc_block *t;
1317 if (netif_msg_tx_err(sp))
1318 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1319 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1320 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1321 t = sp->mc_setup_head->next;
1322 kfree(sp->mc_setup_head);
1323 sp->mc_setup_head = t;
1324 }
1325 sp->mc_setup_tail = NULL;
1326 sp->tx_full = 0;
1327 netif_wake_queue(dev);
1328}
1329
1330static void reset_mii(struct net_device *dev)
1331{
1332 struct speedo_private *sp = netdev_priv(dev);
1333
1334 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1335 if ((sp->phy[0] & 0x8000) == 0) {
1336 int phy_addr = sp->phy[0] & 0x1f;
1337 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1338 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1339 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1340 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1341 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1342 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1343#ifdef honor_default_port
1344 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1345#else
1346 mdio_read(dev, phy_addr, MII_BMCR);
1347 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1348 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1349#endif
1350 }
1351}
1352
1353static void speedo_tx_timeout(struct net_device *dev)
1354{
1355 struct speedo_private *sp = netdev_priv(dev);
1356 void __iomem *ioaddr = sp->regs;
1357 int status = ioread16(ioaddr + SCBStatus);
1358 unsigned long flags;
1359
1360 if (netif_msg_tx_err(sp)) {
1361 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1362 " %4.4x at %d/%d command %8.8x.\n",
1363 dev->name, status, ioread16(ioaddr + SCBCmd),
1364 sp->dirty_tx, sp->cur_tx,
1365 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1366
1367 }
1368 speedo_show_state(dev);
1369#if 0
1370 if ((status & 0x00C0) != 0x0080
1371 && (status & 0x003C) == 0x0010) {
1372 /* Only the command unit has stopped. */
1373 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1374 dev->name);
1375 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1376 ioaddr + SCBPointer);
1377 iowrite16(CUStart, ioaddr + SCBCmd);
1378 reset_mii(dev);
1379 } else {
1380#else
1381 {
1382#endif
1383 del_timer_sync(&sp->timer);
1384 /* Reset the Tx and Rx units. */
1385 iowrite32(PortReset, ioaddr + SCBPort);
1386 /* We may get spurious interrupts here. But I don't think that they
1387 may do much harm. 1999/12/09 SAW */
1388 udelay(10);
1389 /* Disable interrupts. */
1390 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1391 synchronize_irq(dev->irq);
1392 speedo_tx_buffer_gc(dev);
1393 /* Free as much as possible.
1394 It helps to recover from a hang because of out-of-memory.
1395 It also simplifies speedo_resume() in case TX ring is full or
1396 close-to-be full. */
1397 speedo_purge_tx(dev);
1398 speedo_refill_rx_buffers(dev, 1);
1399 spin_lock_irqsave(&sp->lock, flags);
1400 speedo_resume(dev);
1401 sp->rx_mode = -1;
1402 dev->trans_start = jiffies;
1403 spin_unlock_irqrestore(&sp->lock, flags);
1404 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1405 /* Reset MII transceiver. Do it before starting the timer to serialize
1406 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1407 reset_mii(dev);
1408 sp->timer.expires = RUN_AT(2*HZ);
1409 add_timer(&sp->timer);
1410 }
1411 return;
1412}
1413
1414static int
1415speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1416{
1417 struct speedo_private *sp = netdev_priv(dev);
1418 void __iomem *ioaddr = sp->regs;
1419 int entry;
1420
1421 /* Prevent interrupts from changing the Tx ring from underneath us. */
1422 unsigned long flags;
1423
1424 spin_lock_irqsave(&sp->lock, flags);
1425
1426 /* Check if there are enough space. */
1427 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1428 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1429 netif_stop_queue(dev);
1430 sp->tx_full = 1;
1431 spin_unlock_irqrestore(&sp->lock, flags);
1432 return 1;
1433 }
1434
1435 /* Calculate the Tx descriptor entry. */
1436 entry = sp->cur_tx++ % TX_RING_SIZE;
1437
1438 sp->tx_skbuff[entry] = skb;
1439 sp->tx_ring[entry].status =
1440 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1441 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1442 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1443 sp->tx_ring[entry].link =
1444 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1445 sp->tx_ring[entry].tx_desc_addr =
1446 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1447 /* The data region is always in one buffer descriptor. */
1448 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1449 sp->tx_ring[entry].tx_buf_addr0 =
1450 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1451 skb->len, PCI_DMA_TODEVICE));
1452 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1453
1454 /* workaround for hardware bug on 10 mbit half duplex */
1455
1456 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1457 wait_for_cmd_done(dev, sp);
1458 iowrite8(0 , ioaddr + SCBCmd);
1459 udelay(1);
1460 }
1461
1462 /* Trigger the command unit resume. */
1463 wait_for_cmd_done(dev, sp);
1464 clear_suspend(sp->last_cmd);
1465 /* We want the time window between clearing suspend flag on the previous
1466 command and resuming CU to be as small as possible.
1467 Interrupts in between are very undesired. --SAW */
1468 iowrite8(CUResume, ioaddr + SCBCmd);
1469 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1470
1471 /* Leave room for set_rx_mode(). If there is no more space than reserved
1472 for multicast filter mark the ring as full. */
1473 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1474 netif_stop_queue(dev);
1475 sp->tx_full = 1;
1476 }
1477
1478 spin_unlock_irqrestore(&sp->lock, flags);
1479
1480 dev->trans_start = jiffies;
1481
1482 return 0;
1483}
1484
1485static void speedo_tx_buffer_gc(struct net_device *dev)
1486{
1487 unsigned int dirty_tx;
1488 struct speedo_private *sp = netdev_priv(dev);
1489
1490 dirty_tx = sp->dirty_tx;
1491 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1492 int entry = dirty_tx % TX_RING_SIZE;
1493 int status = le32_to_cpu(sp->tx_ring[entry].status);
1494
1495 if (netif_msg_tx_done(sp))
1496 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1497 entry, status);
1498 if ((status & StatusComplete) == 0)
1499 break; /* It still hasn't been processed. */
1500 if (status & TxUnderrun)
1501 if (sp->tx_threshold < 0x01e08000) {
1502 if (netif_msg_tx_err(sp))
1503 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1504 dev->name);
1505 sp->tx_threshold += 0x00040000;
1506 }
1507 /* Free the original skb. */
1508 if (sp->tx_skbuff[entry]) {
1509 sp->stats.tx_packets++; /* Count only user packets. */
1510 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1511 pci_unmap_single(sp->pdev,
1512 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1513 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1514 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1515 sp->tx_skbuff[entry] = NULL;
1516 }
1517 dirty_tx++;
1518 }
1519
1520 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1521 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1522 " full=%d.\n",
1523 dirty_tx, sp->cur_tx, sp->tx_full);
1524 dirty_tx += TX_RING_SIZE;
1525 }
1526
1527 while (sp->mc_setup_head != NULL
1528 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1529 struct speedo_mc_block *t;
1530 if (netif_msg_tx_err(sp))
1531 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1532 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1533 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1534 t = sp->mc_setup_head->next;
1535 kfree(sp->mc_setup_head);
1536 sp->mc_setup_head = t;
1537 }
1538 if (sp->mc_setup_head == NULL)
1539 sp->mc_setup_tail = NULL;
1540
1541 sp->dirty_tx = dirty_tx;
1542}
1543
1544/* The interrupt handler does all of the Rx thread work and cleans up
1545 after the Tx thread. */
1546static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1547{
1548 struct net_device *dev = (struct net_device *)dev_instance;
1549 struct speedo_private *sp;
1550 void __iomem *ioaddr;
1551 long boguscnt = max_interrupt_work;
1552 unsigned short status;
1553 unsigned int handled = 0;
1554
1555 sp = netdev_priv(dev);
1556 ioaddr = sp->regs;
1557
1558#ifndef final_version
1559 /* A lock to prevent simultaneous entry on SMP machines. */
1560 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1561 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1562 dev->name);
1563 sp->in_interrupt = 0; /* Avoid halting machine. */
1564 return IRQ_NONE;
1565 }
1566#endif
1567
1568 do {
1569 status = ioread16(ioaddr + SCBStatus);
1570 /* Acknowledge all of the current interrupt sources ASAP. */
1571 /* Will change from 0xfc00 to 0xff00 when we start handling
1572 FCP and ER interrupts --Dragan */
1573 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1574
1575 if (netif_msg_intr(sp))
1576 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1577 dev->name, status);
1578
1579 if ((status & 0xfc00) == 0)
1580 break;
1581 handled = 1;
1582
1583
1584 if ((status & 0x5000) || /* Packet received, or Rx error. */
1585 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1586 /* Need to gather the postponed packet. */
1587 speedo_rx(dev);
1588
1589 /* Always check if all rx buffers are allocated. --SAW */
1590 speedo_refill_rx_buffers(dev, 0);
1591
1592 spin_lock(&sp->lock);
1593 /*
1594 * The chip may have suspended reception for various reasons.
1595 * Check for that, and re-prime it should this be the case.
1596 */
1597 switch ((status >> 2) & 0xf) {
1598 case 0: /* Idle */
1599 break;
1600 case 1: /* Suspended */
1601 case 2: /* No resources (RxFDs) */
1602 case 9: /* Suspended with no more RBDs */
1603 case 10: /* No resources due to no RBDs */
1604 case 12: /* Ready with no RBDs */
1605 speedo_rx_soft_reset(dev);
1606 break;
1607 case 3: case 5: case 6: case 7: case 8:
1608 case 11: case 13: case 14: case 15:
1609 /* these are all reserved values */
1610 break;
1611 }
1612
1613
1614 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1615 if (status & 0xA400) {
1616 speedo_tx_buffer_gc(dev);
1617 if (sp->tx_full
1618 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1619 /* The ring is no longer full. */
1620 sp->tx_full = 0;
1621 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1622 }
1623 }
1624
1625 spin_unlock(&sp->lock);
1626
1627 if (--boguscnt < 0) {
1628 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1629 dev->name, status);
1630 /* Clear all interrupt sources. */
1631 /* Will change from 0xfc00 to 0xff00 when we start handling
1632 FCP and ER interrupts --Dragan */
1633 iowrite16(0xfc00, ioaddr + SCBStatus);
1634 break;
1635 }
1636 } while (1);
1637
1638 if (netif_msg_intr(sp))
1639 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1640 dev->name, ioread16(ioaddr + SCBStatus));
1641
1642 clear_bit(0, (void*)&sp->in_interrupt);
1643 return IRQ_RETVAL(handled);
1644}
1645
1646static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1647{
1648 struct speedo_private *sp = netdev_priv(dev);
1649 struct RxFD *rxf;
1650 struct sk_buff *skb;
1651 /* Get a fresh skbuff to replace the consumed one. */
1652 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
Jeff Garzik9f7f0092005-08-19 03:52:49 -04001653 if (skb)
1654 rx_align(skb); /* Align IP on 16 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 sp->rx_skbuff[entry] = skb;
1656 if (skb == NULL) {
1657 sp->rx_ringp[entry] = NULL;
1658 return NULL;
1659 }
David S. Miller689be432005-06-28 15:25:31 -07001660 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 sp->rx_ring_dma[entry] =
1662 pci_map_single(sp->pdev, rxf,
1663 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1664 skb->dev = dev;
1665 skb_reserve(skb, sizeof(struct RxFD));
1666 rxf->rx_buf_addr = 0xffffffff;
1667 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1668 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1669 return rxf;
1670}
1671
1672static inline void speedo_rx_link(struct net_device *dev, int entry,
1673 struct RxFD *rxf, dma_addr_t rxf_dma)
1674{
1675 struct speedo_private *sp = netdev_priv(dev);
1676 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1677 rxf->link = 0; /* None yet. */
1678 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1679 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1680 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1681 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1682 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1683 sp->last_rxf = rxf;
1684 sp->last_rxf_dma = rxf_dma;
1685}
1686
1687static int speedo_refill_rx_buf(struct net_device *dev, int force)
1688{
1689 struct speedo_private *sp = netdev_priv(dev);
1690 int entry;
1691 struct RxFD *rxf;
1692
1693 entry = sp->dirty_rx % RX_RING_SIZE;
1694 if (sp->rx_skbuff[entry] == NULL) {
1695 rxf = speedo_rx_alloc(dev, entry);
1696 if (rxf == NULL) {
1697 unsigned int forw;
1698 int forw_entry;
1699 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1700 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1701 dev->name, force);
1702 sp->rx_ring_state |= RrOOMReported;
1703 }
1704 speedo_show_state(dev);
1705 if (!force)
1706 return -1; /* Better luck next time! */
1707 /* Borrow an skb from one of next entries. */
1708 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1709 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1710 break;
1711 if (forw == sp->cur_rx)
1712 return -1;
1713 forw_entry = forw % RX_RING_SIZE;
1714 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1715 sp->rx_skbuff[forw_entry] = NULL;
1716 rxf = sp->rx_ringp[forw_entry];
1717 sp->rx_ringp[forw_entry] = NULL;
1718 sp->rx_ringp[entry] = rxf;
1719 }
1720 } else {
1721 rxf = sp->rx_ringp[entry];
1722 }
1723 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1724 sp->dirty_rx++;
1725 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1726 return 0;
1727}
1728
1729static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1730{
1731 struct speedo_private *sp = netdev_priv(dev);
1732
1733 /* Refill the RX ring. */
1734 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1735 speedo_refill_rx_buf(dev, force) != -1);
1736}
1737
1738static int
1739speedo_rx(struct net_device *dev)
1740{
1741 struct speedo_private *sp = netdev_priv(dev);
1742 int entry = sp->cur_rx % RX_RING_SIZE;
1743 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1744 int alloc_ok = 1;
1745 int npkts = 0;
1746
1747 if (netif_msg_intr(sp))
1748 printk(KERN_DEBUG " In speedo_rx().\n");
1749 /* If we own the next entry, it's a new packet. Send it up. */
1750 while (sp->rx_ringp[entry] != NULL) {
1751 int status;
1752 int pkt_len;
1753
1754 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1755 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1756 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1757 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1758
1759 if (!(status & RxComplete))
1760 break;
1761
1762 if (--rx_work_limit < 0)
1763 break;
1764
1765 /* Check for a rare out-of-memory case: the current buffer is
1766 the last buffer allocated in the RX ring. --SAW */
1767 if (sp->last_rxf == sp->rx_ringp[entry]) {
1768 /* Postpone the packet. It'll be reaped at an interrupt when this
1769 packet is no longer the last packet in the ring. */
1770 if (netif_msg_rx_err(sp))
1771 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1772 dev->name);
1773 sp->rx_ring_state |= RrPostponed;
1774 break;
1775 }
1776
1777 if (netif_msg_rx_status(sp))
1778 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1779 pkt_len);
1780 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1781 if (status & RxErrTooBig)
1782 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1783 "status %8.8x!\n", dev->name, status);
1784 else if (! (status & RxOK)) {
1785 /* There was a fatal error. This *should* be impossible. */
1786 sp->stats.rx_errors++;
1787 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1788 "status %8.8x.\n",
1789 dev->name, status);
1790 }
1791 } else {
1792 struct sk_buff *skb;
1793
1794 /* Check if the packet is long enough to just accept without
1795 copying to a properly sized skbuff. */
1796 if (pkt_len < rx_copybreak
1797 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1798 skb->dev = dev;
1799 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1800 /* 'skb_put()' points to the start of sk_buff data area. */
1801 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1802 sizeof(struct RxFD) + pkt_len,
1803 PCI_DMA_FROMDEVICE);
1804
1805#if 1 || USE_IP_CSUM
1806 /* Packet is in one chunk -- we can copy + cksum. */
David S. Miller689be432005-06-28 15:25:31 -07001807 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 skb_put(skb, pkt_len);
1809#else
David S. Miller689be432005-06-28 15:25:31 -07001810 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 pkt_len);
1812#endif
1813 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1814 sizeof(struct RxFD) + pkt_len,
1815 PCI_DMA_FROMDEVICE);
1816 npkts++;
1817 } else {
1818 /* Pass up the already-filled skbuff. */
1819 skb = sp->rx_skbuff[entry];
1820 if (skb == NULL) {
1821 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1822 dev->name);
1823 break;
1824 }
1825 sp->rx_skbuff[entry] = NULL;
1826 skb_put(skb, pkt_len);
1827 npkts++;
1828 sp->rx_ringp[entry] = NULL;
1829 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1830 PKT_BUF_SZ + sizeof(struct RxFD),
1831 PCI_DMA_FROMDEVICE);
1832 }
1833 skb->protocol = eth_type_trans(skb, dev);
1834 netif_rx(skb);
1835 dev->last_rx = jiffies;
1836 sp->stats.rx_packets++;
1837 sp->stats.rx_bytes += pkt_len;
1838 }
1839 entry = (++sp->cur_rx) % RX_RING_SIZE;
1840 sp->rx_ring_state &= ~RrPostponed;
1841 /* Refill the recently taken buffers.
1842 Do it one-by-one to handle traffic bursts better. */
1843 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1844 alloc_ok = 0;
1845 }
1846
1847 /* Try hard to refill the recently taken buffers. */
1848 speedo_refill_rx_buffers(dev, 1);
1849
1850 if (npkts)
1851 sp->last_rx_time = jiffies;
1852
1853 return 0;
1854}
1855
1856static int
1857speedo_close(struct net_device *dev)
1858{
1859 struct speedo_private *sp = netdev_priv(dev);
1860 void __iomem *ioaddr = sp->regs;
1861 int i;
1862
1863 netdevice_stop(dev);
1864 netif_stop_queue(dev);
1865
1866 if (netif_msg_ifdown(sp))
1867 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1868 dev->name, ioread16(ioaddr + SCBStatus));
1869
1870 /* Shut off the media monitoring timer. */
1871 del_timer_sync(&sp->timer);
1872
1873 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1874
1875 /* Shutting down the chip nicely fails to disable flow control. So.. */
1876 iowrite32(PortPartialReset, ioaddr + SCBPort);
1877 ioread32(ioaddr + SCBPort); /* flush posted write */
1878 /*
1879 * The chip requires a 10 microsecond quiet period. Wait here!
1880 */
1881 udelay(10);
1882
1883 free_irq(dev->irq, dev);
1884 speedo_show_state(dev);
1885
1886 /* Free all the skbuffs in the Rx and Tx queues. */
1887 for (i = 0; i < RX_RING_SIZE; i++) {
1888 struct sk_buff *skb = sp->rx_skbuff[i];
1889 sp->rx_skbuff[i] = NULL;
1890 /* Clear the Rx descriptors. */
1891 if (skb) {
1892 pci_unmap_single(sp->pdev,
1893 sp->rx_ring_dma[i],
1894 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1895 dev_kfree_skb(skb);
1896 }
1897 }
1898
1899 for (i = 0; i < TX_RING_SIZE; i++) {
1900 struct sk_buff *skb = sp->tx_skbuff[i];
1901 sp->tx_skbuff[i] = NULL;
1902 /* Clear the Tx descriptors. */
1903 if (skb) {
1904 pci_unmap_single(sp->pdev,
1905 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1906 skb->len, PCI_DMA_TODEVICE);
1907 dev_kfree_skb(skb);
1908 }
1909 }
1910
1911 /* Free multicast setting blocks. */
1912 for (i = 0; sp->mc_setup_head != NULL; i++) {
1913 struct speedo_mc_block *t;
1914 t = sp->mc_setup_head->next;
1915 kfree(sp->mc_setup_head);
1916 sp->mc_setup_head = t;
1917 }
1918 sp->mc_setup_tail = NULL;
1919 if (netif_msg_ifdown(sp))
1920 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1921
1922 pci_set_power_state(sp->pdev, PCI_D2);
1923
1924 return 0;
1925}
1926
1927/* The Speedo-3 has an especially awkward and unusable method of getting
1928 statistics out of the chip. It takes an unpredictable length of time
1929 for the dump-stats command to complete. To avoid a busy-wait loop we
1930 update the stats with the previous dump results, and then trigger a
1931 new dump.
1932
1933 Oh, and incoming frames are dropped while executing dump-stats!
1934 */
1935static struct net_device_stats *
1936speedo_get_stats(struct net_device *dev)
1937{
1938 struct speedo_private *sp = netdev_priv(dev);
1939 void __iomem *ioaddr = sp->regs;
1940
1941 /* Update only if the previous dump finished. */
1942 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1943 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1944 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1945 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1946 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1947 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1948 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1949 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1950 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1951 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1952 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1953 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1954 sp->lstats->done_marker = 0x0000;
1955 if (netif_running(dev)) {
1956 unsigned long flags;
1957 /* Take a spinlock to make wait_for_cmd_done and sending the
1958 command atomic. --SAW */
1959 spin_lock_irqsave(&sp->lock, flags);
1960 wait_for_cmd_done(dev, sp);
1961 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1962 spin_unlock_irqrestore(&sp->lock, flags);
1963 }
1964 }
1965 return &sp->stats;
1966}
1967
1968static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1969{
1970 struct speedo_private *sp = netdev_priv(dev);
1971 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1972 strncpy(info->version, version, sizeof(info->version)-1);
1973 if (sp->pdev)
1974 strcpy(info->bus_info, pci_name(sp->pdev));
1975}
1976
1977static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1978{
1979 struct speedo_private *sp = netdev_priv(dev);
1980 spin_lock_irq(&sp->lock);
1981 mii_ethtool_gset(&sp->mii_if, ecmd);
1982 spin_unlock_irq(&sp->lock);
1983 return 0;
1984}
1985
1986static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1987{
1988 struct speedo_private *sp = netdev_priv(dev);
1989 int res;
1990 spin_lock_irq(&sp->lock);
1991 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1992 spin_unlock_irq(&sp->lock);
1993 return res;
1994}
1995
1996static int speedo_nway_reset(struct net_device *dev)
1997{
1998 struct speedo_private *sp = netdev_priv(dev);
1999 return mii_nway_restart(&sp->mii_if);
2000}
2001
2002static u32 speedo_get_link(struct net_device *dev)
2003{
2004 struct speedo_private *sp = netdev_priv(dev);
2005 return mii_link_ok(&sp->mii_if);
2006}
2007
2008static u32 speedo_get_msglevel(struct net_device *dev)
2009{
2010 struct speedo_private *sp = netdev_priv(dev);
2011 return sp->msg_enable;
2012}
2013
2014static void speedo_set_msglevel(struct net_device *dev, u32 v)
2015{
2016 struct speedo_private *sp = netdev_priv(dev);
2017 sp->msg_enable = v;
2018}
2019
2020static struct ethtool_ops ethtool_ops = {
2021 .get_drvinfo = speedo_get_drvinfo,
2022 .get_settings = speedo_get_settings,
2023 .set_settings = speedo_set_settings,
2024 .nway_reset = speedo_nway_reset,
2025 .get_link = speedo_get_link,
2026 .get_msglevel = speedo_get_msglevel,
2027 .set_msglevel = speedo_set_msglevel,
2028};
2029
2030static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2031{
2032 struct speedo_private *sp = netdev_priv(dev);
2033 struct mii_ioctl_data *data = if_mii(rq);
2034 int phy = sp->phy[0] & 0x1f;
2035 int saved_acpi;
2036 int t;
2037
2038 switch(cmd) {
2039 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2040 data->phy_id = phy;
2041
2042 case SIOCGMIIREG: /* Read MII PHY register. */
2043 /* FIXME: these operations need to be serialized with MDIO
2044 access from the timeout handler.
2045 They are currently serialized only with MDIO access from the
2046 timer routine. 2000/05/09 SAW */
2047 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2048 t = del_timer_sync(&sp->timer);
2049 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2050 if (t)
2051 add_timer(&sp->timer); /* may be set to the past --SAW */
2052 pci_set_power_state(sp->pdev, saved_acpi);
2053 return 0;
2054
2055 case SIOCSMIIREG: /* Write MII PHY register. */
2056 if (!capable(CAP_NET_ADMIN))
2057 return -EPERM;
2058 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2059 t = del_timer_sync(&sp->timer);
2060 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2061 if (t)
2062 add_timer(&sp->timer); /* may be set to the past --SAW */
2063 pci_set_power_state(sp->pdev, saved_acpi);
2064 return 0;
2065 default:
2066 return -EOPNOTSUPP;
2067 }
2068}
2069
2070/* Set or clear the multicast filter for this adaptor.
2071 This is very ugly with Intel chips -- we usually have to execute an
2072 entire configuration command, plus process a multicast command.
2073 This is complicated. We must put a large configuration command and
2074 an arbitrarily-sized multicast command in the transmit list.
2075 To minimize the disruption -- the previous command might have already
2076 loaded the link -- we convert the current command block, normally a Tx
2077 command, into a no-op and link it to the new command.
2078*/
2079static void set_rx_mode(struct net_device *dev)
2080{
2081 struct speedo_private *sp = netdev_priv(dev);
2082 void __iomem *ioaddr = sp->regs;
2083 struct descriptor *last_cmd;
2084 char new_rx_mode;
2085 unsigned long flags;
2086 int entry, i;
2087
2088 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2089 new_rx_mode = 3;
2090 } else if ((dev->flags & IFF_ALLMULTI) ||
2091 dev->mc_count > multicast_filter_limit) {
2092 new_rx_mode = 1;
2093 } else
2094 new_rx_mode = 0;
2095
2096 if (netif_msg_rx_status(sp))
2097 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2098 sp->rx_mode, new_rx_mode);
2099
2100 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2101 /* The Tx ring is full -- don't add anything! Hope the mode will be
2102 * set again later. */
2103 sp->rx_mode = -1;
2104 return;
2105 }
2106
2107 if (new_rx_mode != sp->rx_mode) {
2108 u8 *config_cmd_data;
2109
2110 spin_lock_irqsave(&sp->lock, flags);
2111 entry = sp->cur_tx++ % TX_RING_SIZE;
2112 last_cmd = sp->last_cmd;
2113 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2114
2115 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2116 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2117 sp->tx_ring[entry].link =
2118 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2119 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2120 /* Construct a full CmdConfig frame. */
2121 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2122 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2123 config_cmd_data[4] = rxdmacount;
2124 config_cmd_data[5] = txdmacount + 0x80;
2125 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2126 /* 0x80 doesn't disable FC 0x84 does.
2127 Disable Flow control since we are not ACK-ing any FC interrupts
2128 for now. --Dragan */
2129 config_cmd_data[19] = 0x84;
2130 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2131 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2132 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2133 config_cmd_data[15] |= 0x80;
2134 config_cmd_data[8] = 0;
2135 }
2136 /* Trigger the command unit resume. */
2137 wait_for_cmd_done(dev, sp);
2138 clear_suspend(last_cmd);
2139 iowrite8(CUResume, ioaddr + SCBCmd);
2140 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2141 netif_stop_queue(dev);
2142 sp->tx_full = 1;
2143 }
2144 spin_unlock_irqrestore(&sp->lock, flags);
2145 }
2146
2147 if (new_rx_mode == 0 && dev->mc_count < 4) {
2148 /* The simple case of 0-3 multicast list entries occurs often, and
2149 fits within one tx_ring[] entry. */
2150 struct dev_mc_list *mclist;
2151 u16 *setup_params, *eaddrs;
2152
2153 spin_lock_irqsave(&sp->lock, flags);
2154 entry = sp->cur_tx++ % TX_RING_SIZE;
2155 last_cmd = sp->last_cmd;
2156 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2157
2158 sp->tx_skbuff[entry] = NULL;
2159 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2160 sp->tx_ring[entry].link =
2161 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2162 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2163 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2164 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2165 /* Fill in the multicast addresses. */
2166 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2167 i++, mclist = mclist->next) {
2168 eaddrs = (u16 *)mclist->dmi_addr;
2169 *setup_params++ = *eaddrs++;
2170 *setup_params++ = *eaddrs++;
2171 *setup_params++ = *eaddrs++;
2172 }
2173
2174 wait_for_cmd_done(dev, sp);
2175 clear_suspend(last_cmd);
2176 /* Immediately trigger the command unit resume. */
2177 iowrite8(CUResume, ioaddr + SCBCmd);
2178
2179 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2180 netif_stop_queue(dev);
2181 sp->tx_full = 1;
2182 }
2183 spin_unlock_irqrestore(&sp->lock, flags);
2184 } else if (new_rx_mode == 0) {
2185 struct dev_mc_list *mclist;
2186 u16 *setup_params, *eaddrs;
2187 struct speedo_mc_block *mc_blk;
2188 struct descriptor *mc_setup_frm;
2189 int i;
2190
2191 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2192 GFP_ATOMIC);
2193 if (mc_blk == NULL) {
2194 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2195 dev->name);
2196 sp->rx_mode = -1; /* We failed, try again. */
2197 return;
2198 }
2199 mc_blk->next = NULL;
2200 mc_blk->len = 2 + multicast_filter_limit*6;
2201 mc_blk->frame_dma =
2202 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2203 PCI_DMA_TODEVICE);
2204 mc_setup_frm = &mc_blk->frame;
2205
2206 /* Fill the setup frame. */
2207 if (netif_msg_ifup(sp))
2208 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2209 dev->name, mc_setup_frm);
2210 mc_setup_frm->cmd_status =
2211 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2212 /* Link set below. */
2213 setup_params = (u16 *)&mc_setup_frm->params;
2214 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2215 /* Fill in the multicast addresses. */
2216 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2217 i++, mclist = mclist->next) {
2218 eaddrs = (u16 *)mclist->dmi_addr;
2219 *setup_params++ = *eaddrs++;
2220 *setup_params++ = *eaddrs++;
2221 *setup_params++ = *eaddrs++;
2222 }
2223
2224 /* Disable interrupts while playing with the Tx Cmd list. */
2225 spin_lock_irqsave(&sp->lock, flags);
2226
2227 if (sp->mc_setup_tail)
2228 sp->mc_setup_tail->next = mc_blk;
2229 else
2230 sp->mc_setup_head = mc_blk;
2231 sp->mc_setup_tail = mc_blk;
2232 mc_blk->tx = sp->cur_tx;
2233
2234 entry = sp->cur_tx++ % TX_RING_SIZE;
2235 last_cmd = sp->last_cmd;
2236 sp->last_cmd = mc_setup_frm;
2237
2238 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2239 sp->tx_skbuff[entry] = NULL;
2240 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2241 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2242
2243 /* Set the link in the setup frame. */
2244 mc_setup_frm->link =
2245 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2246
2247 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2248 mc_blk->len, PCI_DMA_TODEVICE);
2249
2250 wait_for_cmd_done(dev, sp);
2251 clear_suspend(last_cmd);
2252 /* Immediately trigger the command unit resume. */
2253 iowrite8(CUResume, ioaddr + SCBCmd);
2254
2255 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2256 netif_stop_queue(dev);
2257 sp->tx_full = 1;
2258 }
2259 spin_unlock_irqrestore(&sp->lock, flags);
2260
2261 if (netif_msg_rx_status(sp))
2262 printk(" CmdMCSetup frame length %d in entry %d.\n",
2263 dev->mc_count, entry);
2264 }
2265
2266 sp->rx_mode = new_rx_mode;
2267}
2268
2269#ifdef CONFIG_PM
2270static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2271{
2272 struct net_device *dev = pci_get_drvdata (pdev);
2273 struct speedo_private *sp = netdev_priv(dev);
2274 void __iomem *ioaddr = sp->regs;
2275
2276 pci_save_state(pdev);
2277
2278 if (!netif_running(dev))
2279 return 0;
2280
2281 del_timer_sync(&sp->timer);
2282
2283 netif_device_detach(dev);
2284 iowrite32(PortPartialReset, ioaddr + SCBPort);
2285
2286 /* XXX call pci_set_power_state ()? */
2287 pci_disable_device(pdev);
2288 pci_set_power_state (pdev, PCI_D3hot);
2289 return 0;
2290}
2291
2292static int eepro100_resume(struct pci_dev *pdev)
2293{
2294 struct net_device *dev = pci_get_drvdata (pdev);
2295 struct speedo_private *sp = netdev_priv(dev);
2296 void __iomem *ioaddr = sp->regs;
2297
2298 pci_set_power_state(pdev, PCI_D0);
2299 pci_restore_state(pdev);
2300 pci_enable_device(pdev);
2301 pci_set_master(pdev);
2302
2303 if (!netif_running(dev))
2304 return 0;
2305
2306 /* I'm absolutely uncertain if this part of code may work.
2307 The problems are:
2308 - correct hardware reinitialization;
2309 - correct driver behavior between different steps of the
2310 reinitialization;
2311 - serialization with other driver calls.
2312 2000/03/08 SAW */
2313 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2314 speedo_resume(dev);
2315 netif_device_attach(dev);
2316 sp->rx_mode = -1;
2317 sp->flow_ctrl = sp->partner = 0;
2318 set_rx_mode(dev);
2319 sp->timer.expires = RUN_AT(2*HZ);
2320 add_timer(&sp->timer);
2321 return 0;
2322}
2323#endif /* CONFIG_PM */
2324
2325static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2326{
2327 struct net_device *dev = pci_get_drvdata (pdev);
2328 struct speedo_private *sp = netdev_priv(dev);
2329
2330 unregister_netdev(dev);
2331
2332 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2333 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2334
2335 pci_iounmap(pdev, sp->regs);
2336 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2337 + sizeof(struct speedo_stats),
2338 sp->tx_ring, sp->tx_ring_dma);
2339 pci_disable_device(pdev);
2340 free_netdev(dev);
2341}
2342
2343static struct pci_device_id eepro100_pci_tbl[] = {
2344 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2345 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2346 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2347 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2348 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2362 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2363 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2364 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2365 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2366 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2367 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2368 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2369 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2370 { 0,}
2371};
2372MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2373
2374static struct pci_driver eepro100_driver = {
2375 .name = "eepro100",
2376 .id_table = eepro100_pci_tbl,
2377 .probe = eepro100_init_one,
2378 .remove = __devexit_p(eepro100_remove_one),
2379#ifdef CONFIG_PM
2380 .suspend = eepro100_suspend,
2381 .resume = eepro100_resume,
2382#endif /* CONFIG_PM */
2383};
2384
2385static int __init eepro100_init_module(void)
2386{
2387#ifdef MODULE
2388 printk(version);
2389#endif
2390 return pci_module_init(&eepro100_driver);
2391}
2392
2393static void __exit eepro100_cleanup_module(void)
2394{
2395 pci_unregister_driver(&eepro100_driver);
2396}
2397
2398module_init(eepro100_init_module);
2399module_exit(eepro100_cleanup_module);
2400
2401/*
2402 * Local variables:
2403 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2404 * c-indent-level: 4
2405 * c-basic-offset: 4
2406 * tab-width: 4
2407 * End:
2408 */