blob: 887d7245fe7b436231eee334d6aefc1f7de969c6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
2/*
3 Written/copyright 1994-1999 by Donald Becker.
4
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
7
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
11 Annapolis MD 21403
12
13 -----------------------------------------------------------
14
15 Linux kernel-specific changes:
16
17 LK1.0 (Ion Badulescu)
18 - Major cleanup
19 - Use 2.4 PCI API
20 - Support ethtool
21 - Rewrite perfect filter/hash code
22 - Use interrupts for media changes
23
24 LK1.1 (Ion Badulescu)
25 - Disallow negotiation of unsupported full-duplex modes
26*/
27
28#define DRV_NAME "xircom_tulip_cb"
29#define DRV_VERSION "0.91+LK1.1"
30#define DRV_RELDATE "October 11, 2001"
31
32#define CARDBUS 1
33
34/* A few user-configurable values. */
35
36#define xircom_debug debug
37#ifdef XIRCOM_DEBUG
38static int xircom_debug = XIRCOM_DEBUG;
39#else
40static int xircom_debug = 1;
41#endif
42
43/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
44static int max_interrupt_work = 25;
45
46#define MAX_UNITS 4
47/* Used to pass the full-duplex flag, etc. */
48static int full_duplex[MAX_UNITS];
49static int options[MAX_UNITS];
50static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
51
52/* Keep the ring sizes a power of two for efficiency.
53 Making the Tx ring too large decreases the effectiveness of channel
54 bonding and packet priority.
55 There are no ill effects from too-large receive rings. */
56#define TX_RING_SIZE 16
57#define RX_RING_SIZE 32
58
59/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
60#ifdef __alpha__
61static int rx_copybreak = 1518;
62#else
63static int rx_copybreak = 100;
64#endif
65
66/*
67 Set the bus performance register.
68 Typical: Set 16 longword cache alignment, no burst limit.
69 Cache alignment bits 15:14 Burst length 13:8
70 0000 No alignment 0x00000000 unlimited 0800 8 longwords
71 4000 8 longwords 0100 1 longword 1000 16 longwords
72 8000 16 longwords 0200 2 longwords 2000 32 longwords
73 C000 32 longwords 0400 4 longwords
74 Warning: many older 486 systems are broken and require setting 0x00A04800
75 8 longword cache alignment, 8 longword burst.
76 ToDo: Non-Intel setting could be better.
77*/
78
79#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
80static int csr0 = 0x01A00000 | 0xE000;
81#elif defined(__powerpc__)
82static int csr0 = 0x01B00000 | 0x8000;
83#elif defined(__sparc__)
84static int csr0 = 0x01B00080 | 0x8000;
85#elif defined(__i386__)
86static int csr0 = 0x01A00000 | 0x8000;
87#else
88#warning Processor architecture undefined!
89static int csr0 = 0x00A00000 | 0x4800;
90#endif
91
92/* Operational parameters that usually are not changed. */
93/* Time in jiffies before concluding the transmitter is hung. */
94#define TX_TIMEOUT (4 * HZ)
95#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
96#define PKT_SETUP_SZ 192 /* Size of the setup frame */
97
98/* PCI registers */
99#define PCI_POWERMGMT 0x40
100
101#include <linux/config.h>
102#include <linux/module.h>
103#include <linux/moduleparam.h>
104#include <linux/kernel.h>
105#include <linux/pci.h>
106#include <linux/netdevice.h>
107#include <linux/etherdevice.h>
108#include <linux/delay.h>
109#include <linux/init.h>
110#include <linux/mii.h>
111#include <linux/ethtool.h>
112#include <linux/crc32.h>
113
114#include <asm/io.h>
115#include <asm/processor.h> /* Processor type for cache alignment. */
116#include <asm/uaccess.h>
117
118
119/* These identify the driver base version and may not be removed. */
120static char version[] __devinitdata =
121KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
122KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
123
124MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
125MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
126MODULE_LICENSE("GPL v2");
127MODULE_VERSION(DRV_VERSION);
128
129module_param(debug, int, 0);
130module_param(max_interrupt_work, int, 0);
131module_param(rx_copybreak, int, 0);
132module_param(csr0, int, 0);
133
134module_param_array(options, int, NULL, 0);
135module_param_array(full_duplex, int, NULL, 0);
136
137#define RUN_AT(x) (jiffies + (x))
138
139/*
140 Theory of Operation
141
142I. Board Compatibility
143
144This device driver was forked from the driver for the DECchip "Tulip",
145Digital's single-chip ethernet controllers for PCI. It supports Xircom's
146almost-Tulip-compatible CBE-100 CardBus adapters.
147
148II. Board-specific settings
149
150PCI bus devices are configured by the system at boot time, so no jumpers
151need to be set on the board. The system BIOS preferably should assign the
152PCI INTA signal to an otherwise unused system IRQ line.
153
154III. Driver operation
155
156IIIa. Ring buffers
157
158The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
159This driver uses statically allocated rings of Rx and Tx descriptors, set at
160compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
161for the Rx ring buffers at open() time and passes the skb->data field to the
162Xircom as receive data buffers. When an incoming frame is less than
163RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
164copied to the new skbuff. When the incoming frame is larger, the skbuff is
165passed directly up the protocol stack and replaced by a newly allocated
166skbuff.
167
168The RX_COPYBREAK value is chosen to trade-off the memory wasted by
169using a full-sized skbuff for small frames vs. the copying costs of larger
170frames. For small frames the copying cost is negligible (esp. considering
171that we are pre-loading the cache with immediately useful header
172information). For large frames the copying cost is non-trivial, and the
173larger copy might flush the cache of useful data. A subtle aspect of this
174choice is that the Xircom only receives into longword aligned buffers, thus
175the IP header at offset 14 isn't longword aligned for further processing.
176Copied frames are put into the new skbuff at an offset of "+2", thus copying
177has the beneficial effect of aligning the IP header and preloading the
178cache.
179
180IIIC. Synchronization
181The driver runs as two independent, single-threaded flows of control. One
182is the send-packet routine, which enforces single-threaded use by the
183dev->tbusy flag. The other thread is the interrupt handler, which is single
184threaded by the hardware and other software.
185
186The send packet thread has partial control over the Tx ring and 'dev->tbusy'
187flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
188queue slot is empty, it clears the tbusy flag when finished otherwise it sets
189the 'tp->tx_full' flag.
190
191The interrupt handler has exclusive control over the Rx ring and records stats
192from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
193we can't avoid the interrupt overhead by having the Tx routine reap the Tx
194stats.) After reaping the stats, it marks the queue entry as empty by setting
195the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
196tx_full and tbusy flags.
197
198IV. Notes
199
200IVb. References
201
202http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
203http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
204http://www.national.com/pf/DP/DP83840A.html
205
206IVc. Errata
207
208*/
209
210/* A full-duplex map for media types. */
211enum MediaIs {
212 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
213 MediaIs100=16};
214static const char media_cap[] =
215{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
216
217/* Offsets to the Command and Status Registers, "CSRs". All accesses
218 must be longword instructions and quadword aligned. */
219enum xircom_offsets {
220 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
221 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
222 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
223
224/* The bits in the CSR5 status registers, mostly interrupt sources. */
225enum status_bits {
226 LinkChange=0x08000000,
227 NormalIntr=0x10000, NormalIntrMask=0x00014045,
228 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
229 ReservedIntrMask=0xe0001a18,
230 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
231 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
232 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
233};
234
235enum csr0_control_bits {
236 EnableMWI=0x01000000, EnableMRL=0x00800000,
237 EnableMRM=0x00200000, EqualBusPrio=0x02,
238 SoftwareReset=0x01,
239};
240
241enum csr6_control_bits {
242 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
243 HashFilterBit=0x01, FullDuplexBit=0x0200,
244 TxThresh10=0x400000, TxStoreForw=0x200000,
245 TxThreshMask=0xc000, TxThreshShift=14,
246 EnableTx=0x2000, EnableRx=0x02,
247 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
248 EnableTxRx=(EnableTx | EnableRx),
249};
250
251
252enum tbl_flag {
253 HAS_MII=1, HAS_ACPI=2,
254};
255static struct xircom_chip_table {
256 char *chip_name;
257 int valid_intrs; /* CSR7 interrupt enable settings */
258 int flags;
259} xircom_tbl[] = {
260 { "Xircom Cardbus Adapter",
261 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
262 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
263 HAS_MII | HAS_ACPI, },
264 { NULL, },
265};
266/* This matches the table above. */
267enum chips {
268 X3201_3,
269};
270
271
272/* The Xircom Rx and Tx buffer descriptors. */
273struct xircom_rx_desc {
274 s32 status;
275 s32 length;
276 u32 buffer1, buffer2;
277};
278
279struct xircom_tx_desc {
280 s32 status;
281 s32 length;
282 u32 buffer1, buffer2; /* We use only buffer 1. */
283};
284
285enum tx_desc0_status_bits {
286 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
287 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
288};
289enum tx_desc1_status_bits {
290 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
291 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
292 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
293 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
294};
295enum rx_desc0_status_bits {
296 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
297 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
298 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
299 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
300};
301enum rx_desc1_status_bits {
302 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
303};
304
305struct xircom_private {
306 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
307 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
308 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
309 struct sk_buff* tx_skbuff[TX_RING_SIZE];
310#ifdef CARDBUS
311 /* The X3201-3 requires 4-byte aligned tx bufs */
312 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
313#endif
314 /* The addresses of receive-in-place skbuffs. */
315 struct sk_buff* rx_skbuff[RX_RING_SIZE];
316 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
317 int chip_id;
318 struct net_device_stats stats;
319 unsigned int cur_rx, cur_tx; /* The next free ring entry */
320 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
321 unsigned int tx_full:1; /* The Tx queue is full. */
322 unsigned int speed100:1;
323 unsigned int full_duplex:1; /* Full-duplex operation requested. */
324 unsigned int autoneg:1;
325 unsigned int default_port:4; /* Last dev->if_port value. */
326 unsigned int open:1;
327 unsigned int csr0; /* CSR0 setting. */
328 unsigned int csr6; /* Current CSR6 control settings. */
329 u16 to_advertise; /* NWay capabilities advertised. */
330 u16 advertising[4];
331 signed char phys[4], mii_cnt; /* MII device addresses. */
332 int saved_if_port;
333 struct pci_dev *pdev;
334 spinlock_t lock;
335};
336
337static int mdio_read(struct net_device *dev, int phy_id, int location);
338static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
339static void xircom_up(struct net_device *dev);
340static void xircom_down(struct net_device *dev);
341static int xircom_open(struct net_device *dev);
342static void xircom_tx_timeout(struct net_device *dev);
343static void xircom_init_ring(struct net_device *dev);
344static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
345static int xircom_rx(struct net_device *dev);
346static void xircom_media_change(struct net_device *dev);
347static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
348static int xircom_close(struct net_device *dev);
349static struct net_device_stats *xircom_get_stats(struct net_device *dev);
350static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
351static void set_rx_mode(struct net_device *dev);
352static void check_duplex(struct net_device *dev);
353static struct ethtool_ops ops;
354
355
356/* The Xircom cards are picky about when certain bits in CSR6 can be
357 manipulated. Keith Owens <kaos@ocs.com.au>. */
358static void outl_CSR6(u32 newcsr6, long ioaddr)
359{
360 const int strict_bits =
361 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
362 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
363 unsigned long flags;
364 save_flags(flags);
365 cli();
366 /* mask out the reserved bits that always read 0 on the Xircom cards */
367 newcsr6 &= ~ReservedZeroMask;
368 /* or in the reserved bits that always read 1 */
369 newcsr6 |= ReservedOneMask;
370 currcsr6 = inl(ioaddr + CSR6);
371 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
372 ((currcsr6 & ~EnableTxRx) == 0)) {
373 outl(newcsr6, ioaddr + CSR6); /* safe */
374 restore_flags(flags);
375 return;
376 }
377 /* make sure the transmitter and receiver are stopped first */
378 currcsr6 &= ~EnableTxRx;
379 while (1) {
380 csr5 = inl(ioaddr + CSR5);
381 if (csr5 == 0xffffffff)
382 break; /* cannot read csr5, card removed? */
383 csr5_22_20 = csr5 & 0x700000;
384 csr5_19_17 = csr5 & 0x0e0000;
385 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
386 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
387 break; /* both are stopped or suspended */
388 if (!--attempts) {
389 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
390 "csr5=0x%08x\n", csr5);
391 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
392 restore_flags(flags);
393 return;
394 }
395 outl(currcsr6, ioaddr + CSR6);
396 udelay(1);
397 }
398 /* now it is safe to change csr6 */
399 outl(newcsr6, ioaddr + CSR6);
400 restore_flags(flags);
401}
402
403
404static void __devinit read_mac_address(struct net_device *dev)
405{
406 long ioaddr = dev->base_addr;
407 int i, j;
408 unsigned char tuple, link, data_id, data_count;
409
410 /* Xircom has its address stored in the CIS;
411 * we access it through the boot rom interface for now
412 * this might not work, as the CIS is not parsed but I
413 * (danilo) use the offset I found on my card's CIS !!!
414 *
415 * Doug Ledford: I changed this routine around so that it
416 * walks the CIS memory space, parsing the config items, and
417 * finds the proper lan_node_id tuple and uses the data
418 * stored there.
419 */
420 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
421 for (i = 0x100; i < 0x1f7; i += link+2) {
422 outl(i, ioaddr + CSR10);
423 tuple = inl(ioaddr + CSR9) & 0xff;
424 outl(i + 1, ioaddr + CSR10);
425 link = inl(ioaddr + CSR9) & 0xff;
426 outl(i + 2, ioaddr + CSR10);
427 data_id = inl(ioaddr + CSR9) & 0xff;
428 outl(i + 3, ioaddr + CSR10);
429 data_count = inl(ioaddr + CSR9) & 0xff;
430 if ( (tuple == 0x22) &&
431 (data_id == 0x04) && (data_count == 0x06) ) {
432 /*
433 * This is it. We have the data we want.
434 */
435 for (j = 0; j < 6; j++) {
436 outl(i + j + 4, ioaddr + CSR10);
437 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
438 }
439 break;
440 } else if (link == 0) {
441 break;
442 }
443 }
444}
445
446
447/*
448 * locate the MII interfaces and initialize them.
449 * we disable full-duplex modes here,
450 * because we don't know how to handle them.
451 */
452static void find_mii_transceivers(struct net_device *dev)
453{
454 struct xircom_private *tp = netdev_priv(dev);
455 int phy, phy_idx;
456
457 if (media_cap[tp->default_port] & MediaIsMII) {
458 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
459 tp->to_advertise = media2advert[tp->default_port - 9];
460 } else
461 tp->to_advertise =
462 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
463 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
464
465 /* Find the connected MII xcvrs.
466 Doing this in open() would allow detecting external xcvrs later,
467 but takes much time. */
468 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
469 int mii_status = mdio_read(dev, phy, MII_BMSR);
470 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
471 ((mii_status & BMSR_100BASE4) == 0 &&
472 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
473 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
474 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
475 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
476 tp->phys[phy_idx] = phy;
477 tp->advertising[phy_idx++] = reg4;
478 printk(KERN_INFO "%s: MII transceiver #%d "
479 "config %4.4x status %4.4x advertising %4.4x.\n",
480 dev->name, phy, mii_reg0, mii_status, mii_advert);
481 }
482 }
483 tp->mii_cnt = phy_idx;
484 if (phy_idx == 0) {
485 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
486 dev->name);
487 tp->phys[0] = 0;
488 }
489}
490
491
492/*
493 * To quote Arjan van de Ven:
494 * transceiver_voodoo() enables the external UTP plug thingy.
495 * it's called voodoo as I stole this code and cannot cross-reference
496 * it with the specification.
497 * Actually it seems to go like this:
498 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
499 * so any prior MII settings are lost.
500 * - GPIO0 enables the TP port so the MII can talk to the network.
501 * - a software reset will reset both GPIO pins.
502 * I also moved the software reset here, because doing it in xircom_up()
503 * required enabling the GPIO pins each time, which reset the MII each time.
504 * Thus we couldn't control the MII -- which sucks because we don't know
505 * how to handle full-duplex modes so we *must* disable them.
506 */
507static void transceiver_voodoo(struct net_device *dev)
508{
509 struct xircom_private *tp = netdev_priv(dev);
510 long ioaddr = dev->base_addr;
511
512 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
513 outl(SoftwareReset, ioaddr + CSR0);
514 udelay(2);
515
516 /* Deassert reset. */
517 outl(tp->csr0, ioaddr + CSR0);
518
519 /* Reset the xcvr interface and turn on heartbeat. */
520 outl(0x0008, ioaddr + CSR15);
521 udelay(5); /* The delays are Xircom-recommended to give the
522 * chipset time to reset the actual hardware
523 * on the PCMCIA card
524 */
525 outl(0xa8050000, ioaddr + CSR15);
526 udelay(5);
527 outl(0xa00f0000, ioaddr + CSR15);
528 udelay(5);
529
530 outl_CSR6(0, ioaddr);
531 //outl_CSR6(FullDuplexBit, ioaddr);
532}
533
534
535static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
536{
537 struct net_device *dev;
538 struct xircom_private *tp;
539 static int board_idx = -1;
540 int chip_idx = id->driver_data;
541 long ioaddr;
542 int i;
543 u8 chip_rev;
544
545/* when built into the kernel, we only print version if device is found */
546#ifndef MODULE
547 static int printed_version;
548 if (!printed_version++)
549 printk(version);
550#endif
551
552 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
553
554 board_idx++;
555
556 if (pci_enable_device(pdev))
557 return -ENODEV;
558
559 pci_set_master(pdev);
560
561 ioaddr = pci_resource_start(pdev, 0);
562 dev = alloc_etherdev(sizeof(*tp));
563 if (!dev) {
564 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
565 return -ENOMEM;
566 }
567 SET_MODULE_OWNER(dev);
568 SET_NETDEV_DEV(dev, &pdev->dev);
569
570 dev->base_addr = ioaddr;
571 dev->irq = pdev->irq;
572
573 if (pci_request_regions(pdev, dev->name)) {
574 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
575 goto err_out_free_netdev;
576 }
577
578 /* Bring the chip out of sleep mode.
579 Caution: Snooze mode does not work with some boards! */
580 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
581 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
582
583 /* Stop the chip's Tx and Rx processes. */
584 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
585 /* Clear the missed-packet counter. */
586 (volatile int)inl(ioaddr + CSR8);
587
588 tp = netdev_priv(dev);
589
590 spin_lock_init(&tp->lock);
591 tp->pdev = pdev;
592 tp->chip_id = chip_idx;
593 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
594 /* XXX: is this necessary for Xircom? */
595 tp->csr0 = csr0 & ~EnableMWI;
596
597 pci_set_drvdata(pdev, dev);
598
599 /* The lower four bits are the media type. */
600 if (board_idx >= 0 && board_idx < MAX_UNITS) {
601 tp->default_port = options[board_idx] & 15;
602 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
603 tp->full_duplex = 1;
604 if (mtu[board_idx] > 0)
605 dev->mtu = mtu[board_idx];
606 }
607 if (dev->mem_start)
608 tp->default_port = dev->mem_start;
609 if (tp->default_port) {
610 if (media_cap[tp->default_port] & MediaAlwaysFD)
611 tp->full_duplex = 1;
612 }
613 if (tp->full_duplex)
614 tp->autoneg = 0;
615 else
616 tp->autoneg = 1;
617 tp->speed100 = 1;
618
619 /* The Xircom-specific entries in the device structure. */
620 dev->open = &xircom_open;
621 dev->hard_start_xmit = &xircom_start_xmit;
622 dev->stop = &xircom_close;
623 dev->get_stats = &xircom_get_stats;
624 dev->do_ioctl = &xircom_ioctl;
625#ifdef HAVE_MULTICAST
626 dev->set_multicast_list = &set_rx_mode;
627#endif
628 dev->tx_timeout = xircom_tx_timeout;
629 dev->watchdog_timeo = TX_TIMEOUT;
630 SET_ETHTOOL_OPS(dev, &ops);
631
632 transceiver_voodoo(dev);
633
634 read_mac_address(dev);
635
636 if (register_netdev(dev))
637 goto err_out_cleardev;
638
639 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
640 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
641 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
642 for (i = 0; i < 6; i++)
643 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
644 printk(", IRQ %d.\n", dev->irq);
645
646 if (xircom_tbl[chip_idx].flags & HAS_MII) {
647 find_mii_transceivers(dev);
648 check_duplex(dev);
649 }
650
651 return 0;
652
653err_out_cleardev:
654 pci_set_drvdata(pdev, NULL);
655 pci_release_regions(pdev);
656err_out_free_netdev:
657 free_netdev(dev);
658 return -ENODEV;
659}
660
661
662/* MII transceiver control section.
663 Read and write the MII registers using software-generated serial
664 MDIO protocol. See the MII specifications or DP83840A data sheet
665 for details. */
666
667/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
668 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
669 "overclocking" issues or future 66Mhz PCI. */
670#define mdio_delay() inl(mdio_addr)
671
672/* Read and write the MII registers using software-generated serial
673 MDIO protocol. It is just different enough from the EEPROM protocol
674 to not share code. The maxium data clock rate is 2.5 Mhz. */
675#define MDIO_SHIFT_CLK 0x10000
676#define MDIO_DATA_WRITE0 0x00000
677#define MDIO_DATA_WRITE1 0x20000
678#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
679#define MDIO_ENB_IN 0x40000
680#define MDIO_DATA_READ 0x80000
681
682static int mdio_read(struct net_device *dev, int phy_id, int location)
683{
684 int i;
685 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
686 int retval = 0;
687 long ioaddr = dev->base_addr;
688 long mdio_addr = ioaddr + CSR9;
689
690 /* Establish sync by sending at least 32 logic ones. */
691 for (i = 32; i >= 0; i--) {
692 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
693 mdio_delay();
694 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
695 mdio_delay();
696 }
697 /* Shift the read command bits out. */
698 for (i = 15; i >= 0; i--) {
699 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
700
701 outl(MDIO_ENB | dataval, mdio_addr);
702 mdio_delay();
703 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
704 mdio_delay();
705 }
706 /* Read the two transition, 16 data, and wire-idle bits. */
707 for (i = 19; i > 0; i--) {
708 outl(MDIO_ENB_IN, mdio_addr);
709 mdio_delay();
710 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
711 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
712 mdio_delay();
713 }
714 return (retval>>1) & 0xffff;
715}
716
717
718static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
719{
720 int i;
721 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
722 long ioaddr = dev->base_addr;
723 long mdio_addr = ioaddr + CSR9;
724
725 /* Establish sync by sending 32 logic ones. */
726 for (i = 32; i >= 0; i--) {
727 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
728 mdio_delay();
729 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
730 mdio_delay();
731 }
732 /* Shift the command bits out. */
733 for (i = 31; i >= 0; i--) {
734 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
735 outl(MDIO_ENB | dataval, mdio_addr);
736 mdio_delay();
737 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
738 mdio_delay();
739 }
740 /* Clear out extra bits. */
741 for (i = 2; i > 0; i--) {
742 outl(MDIO_ENB_IN, mdio_addr);
743 mdio_delay();
744 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
745 mdio_delay();
746 }
747 return;
748}
749
750
751static void
752xircom_up(struct net_device *dev)
753{
754 struct xircom_private *tp = netdev_priv(dev);
755 long ioaddr = dev->base_addr;
756 int i;
757
758 xircom_init_ring(dev);
759 /* Clear the tx ring */
760 for (i = 0; i < TX_RING_SIZE; i++) {
761 tp->tx_skbuff[i] = NULL;
762 tp->tx_ring[i].status = 0;
763 }
764
765 if (xircom_debug > 1)
766 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
767
768 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
769 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
770
771 tp->saved_if_port = dev->if_port;
772 if (dev->if_port == 0)
773 dev->if_port = tp->default_port;
774
775 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
776
777 set_rx_mode(dev);
778
779 /* Start the chip's Tx to process setup frame. */
780 outl_CSR6(tp->csr6, ioaddr);
781 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
782
783 /* Acknowledge all outstanding interrupts sources */
784 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
785 /* Enable interrupts by setting the interrupt mask. */
786 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
787 /* Enable Rx */
788 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
789 /* Rx poll demand */
790 outl(0, ioaddr + CSR2);
791
792 /* Tell the net layer we're ready */
793 netif_start_queue (dev);
794
795 /* Check current media state */
796 xircom_media_change(dev);
797
798 if (xircom_debug > 2) {
799 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
800 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
801 inl(ioaddr + CSR6));
802 }
803}
804
805
806static int
807xircom_open(struct net_device *dev)
808{
809 struct xircom_private *tp = netdev_priv(dev);
810
811 if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
812 return -EAGAIN;
813
814 xircom_up(dev);
815 tp->open = 1;
816
817 return 0;
818}
819
820
821static void xircom_tx_timeout(struct net_device *dev)
822{
823 struct xircom_private *tp = netdev_priv(dev);
824 long ioaddr = dev->base_addr;
825
826 if (media_cap[dev->if_port] & MediaIsMII) {
827 /* Do nothing -- the media monitor should handle this. */
828 if (xircom_debug > 1)
829 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
830 dev->name);
831 }
832
833#if defined(way_too_many_messages)
834 if (xircom_debug > 3) {
835 int i;
836 for (i = 0; i < RX_RING_SIZE; i++) {
837 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
838 int j;
839 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
840 "%2.2x %2.2x %2.2x.\n",
841 i, (unsigned int)tp->rx_ring[i].status,
842 (unsigned int)tp->rx_ring[i].length,
843 (unsigned int)tp->rx_ring[i].buffer1,
844 (unsigned int)tp->rx_ring[i].buffer2,
845 buf[0], buf[1], buf[2]);
846 for (j = 0; buf[j] != 0xee && j < 1600; j++)
847 if (j < 100) printk(" %2.2x", buf[j]);
848 printk(" j=%d.\n", j);
849 }
850 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
851 for (i = 0; i < RX_RING_SIZE; i++)
852 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
853 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
854 for (i = 0; i < TX_RING_SIZE; i++)
855 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
856 printk("\n");
857 }
858#endif
859
860 /* Stop and restart the chip's Tx/Rx processes . */
861 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
862 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
863 /* Trigger an immediate transmit demand. */
864 outl(0, ioaddr + CSR1);
865
866 dev->trans_start = jiffies;
867 netif_wake_queue (dev);
868 tp->stats.tx_errors++;
869}
870
871
872/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
873static void xircom_init_ring(struct net_device *dev)
874{
875 struct xircom_private *tp = netdev_priv(dev);
876 int i;
877
878 tp->tx_full = 0;
879 tp->cur_rx = tp->cur_tx = 0;
880 tp->dirty_rx = tp->dirty_tx = 0;
881
882 for (i = 0; i < RX_RING_SIZE; i++) {
883 tp->rx_ring[i].status = 0;
884 tp->rx_ring[i].length = PKT_BUF_SZ;
885 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
886 tp->rx_skbuff[i] = NULL;
887 }
888 /* Mark the last entry as wrapping the ring. */
889 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
890 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
891
892 for (i = 0; i < RX_RING_SIZE; i++) {
893 /* Note the receive buffer must be longword aligned.
894 dev_alloc_skb() provides 16 byte alignment. But do *not*
895 use skb_reserve() to align the IP header! */
896 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
897 tp->rx_skbuff[i] = skb;
898 if (skb == NULL)
899 break;
900 skb->dev = dev; /* Mark as being used by this device. */
901 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
David S. Miller689be432005-06-28 15:25:31 -0700902 tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 }
904 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
905
906 /* The Tx buffer descriptor is filled in as needed, but we
907 do need to clear the ownership bit. */
908 for (i = 0; i < TX_RING_SIZE; i++) {
909 tp->tx_skbuff[i] = NULL;
910 tp->tx_ring[i].status = 0;
911 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
912#ifdef CARDBUS
913 if (tp->chip_id == X3201_3)
914 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
915#endif /* CARDBUS */
916 }
917 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
918}
919
920
921static int
922xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
923{
924 struct xircom_private *tp = netdev_priv(dev);
925 int entry;
926 u32 flag;
927
928 /* Caution: the write order is important here, set the base address
929 with the "ownership" bits last. */
930
931 /* Calculate the next Tx descriptor entry. */
932 entry = tp->cur_tx % TX_RING_SIZE;
933
934 tp->tx_skbuff[entry] = skb;
935#ifdef CARDBUS
936 if (tp->chip_id == X3201_3) {
937 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
938 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
939 } else
940#endif
941 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
942
943 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
944 flag = Tx1WholePkt; /* No interrupt */
945 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
946 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
947 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
948 flag = Tx1WholePkt; /* No Tx-done intr. */
949 } else {
950 /* Leave room for set_rx_mode() to fill entries. */
951 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
952 tp->tx_full = 1;
953 }
954 if (entry == TX_RING_SIZE - 1)
955 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
956
957 tp->tx_ring[entry].length = skb->len | flag;
958 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
959 tp->cur_tx++;
960 if (tp->tx_full)
961 netif_stop_queue (dev);
962 else
963 netif_wake_queue (dev);
964
965 /* Trigger an immediate transmit demand. */
966 outl(0, dev->base_addr + CSR1);
967
968 dev->trans_start = jiffies;
969
970 return 0;
971}
972
973
974static void xircom_media_change(struct net_device *dev)
975{
976 struct xircom_private *tp = netdev_priv(dev);
977 long ioaddr = dev->base_addr;
978 u16 reg0, reg1, reg4, reg5;
979 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
980
981 /* reset status first */
982 mdio_read(dev, tp->phys[0], MII_BMCR);
983 mdio_read(dev, tp->phys[0], MII_BMSR);
984
985 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
986 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
987
988 if (reg1 & BMSR_LSTATUS) {
989 /* link is up */
990 if (reg0 & BMCR_ANENABLE) {
991 /* autonegotiation is enabled */
992 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
993 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
994 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
995 tp->speed100 = 1;
996 tp->full_duplex = 1;
997 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
998 tp->speed100 = 1;
999 tp->full_duplex = 0;
1000 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1001 tp->speed100 = 0;
1002 tp->full_duplex = 1;
1003 } else {
1004 tp->speed100 = 0;
1005 tp->full_duplex = 0;
1006 }
1007 } else {
1008 /* autonegotiation is disabled */
1009 if (reg0 & BMCR_SPEED100)
1010 tp->speed100 = 1;
1011 else
1012 tp->speed100 = 0;
1013 if (reg0 & BMCR_FULLDPLX)
1014 tp->full_duplex = 1;
1015 else
1016 tp->full_duplex = 0;
1017 }
1018 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1019 dev->name,
1020 tp->speed100 ? "100" : "10",
1021 tp->full_duplex ? "full" : "half");
1022 netif_carrier_on(dev);
1023 newcsr6 = csr6 & ~FullDuplexBit;
1024 if (tp->full_duplex)
1025 newcsr6 |= FullDuplexBit;
1026 if (newcsr6 != csr6)
1027 outl_CSR6(newcsr6, ioaddr + CSR6);
1028 } else {
1029 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1030 netif_carrier_off(dev);
1031 }
1032}
1033
1034
1035static void check_duplex(struct net_device *dev)
1036{
1037 struct xircom_private *tp = netdev_priv(dev);
1038 u16 reg0;
1039
1040 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1041 udelay(500);
1042 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1043
1044 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1045 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1046
1047 if (tp->autoneg) {
1048 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1049 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1050 } else {
1051 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1052 if (tp->speed100)
1053 reg0 |= BMCR_SPEED100;
1054 if (tp->full_duplex)
1055 reg0 |= BMCR_FULLDPLX;
1056 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1057 dev->name,
1058 tp->speed100 ? "100" : "10",
1059 tp->full_duplex ? "full" : "half");
1060 }
1061 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1062}
1063
1064
1065/* The interrupt handler does all of the Rx thread work and cleans up
1066 after the Tx thread. */
1067static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1068{
1069 struct net_device *dev = dev_instance;
1070 struct xircom_private *tp = netdev_priv(dev);
1071 long ioaddr = dev->base_addr;
1072 int csr5, work_budget = max_interrupt_work;
1073 int handled = 0;
1074
1075 spin_lock (&tp->lock);
1076
1077 do {
1078 csr5 = inl(ioaddr + CSR5);
1079 /* Acknowledge all of the current interrupt sources ASAP. */
1080 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1081
1082 if (xircom_debug > 4)
1083 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1084 dev->name, csr5, inl(dev->base_addr + CSR5));
1085
1086 if (csr5 == 0xffffffff)
1087 break; /* all bits set, assume PCMCIA card removed */
1088
1089 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1090 break;
1091
1092 handled = 1;
1093
1094 if (csr5 & (RxIntr | RxNoBuf))
1095 work_budget -= xircom_rx(dev);
1096
1097 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1098 unsigned int dirty_tx;
1099
1100 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1101 dirty_tx++) {
1102 int entry = dirty_tx % TX_RING_SIZE;
1103 int status = tp->tx_ring[entry].status;
1104
1105 if (status < 0)
1106 break; /* It still hasn't been Txed */
1107 /* Check for Rx filter setup frames. */
1108 if (tp->tx_skbuff[entry] == NULL)
1109 continue;
1110
1111 if (status & Tx0DescError) {
1112 /* There was an major error, log it. */
1113#ifndef final_version
1114 if (xircom_debug > 1)
1115 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1116 dev->name, status);
1117#endif
1118 tp->stats.tx_errors++;
1119 if (status & Tx0ManyColl) {
1120 tp->stats.tx_aborted_errors++;
1121 }
1122 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1123 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1124 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1125 } else {
1126 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1127 tp->stats.collisions += (status >> 3) & 15;
1128 tp->stats.tx_packets++;
1129 }
1130
1131 /* Free the original skb. */
1132 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1133 tp->tx_skbuff[entry] = NULL;
1134 }
1135
1136#ifndef final_version
1137 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1138 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1139 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1140 dirty_tx += TX_RING_SIZE;
1141 }
1142#endif
1143
1144 if (tp->tx_full &&
1145 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1146 /* The ring is no longer full */
1147 tp->tx_full = 0;
1148
1149 if (tp->tx_full)
1150 netif_stop_queue (dev);
1151 else
1152 netif_wake_queue (dev);
1153
1154 tp->dirty_tx = dirty_tx;
1155 if (csr5 & TxDied) {
1156 if (xircom_debug > 2)
1157 printk(KERN_WARNING "%s: The transmitter stopped."
1158 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1159 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1160 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1161 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1162 }
1163 }
1164
1165 /* Log errors. */
1166 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1167 if (csr5 & LinkChange)
1168 xircom_media_change(dev);
1169 if (csr5 & TxFIFOUnderflow) {
1170 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1171 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1172 else
1173 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1174 /* Restart the transmit process. */
1175 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1176 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1177 }
1178 if (csr5 & RxDied) { /* Missed a Rx frame. */
1179 tp->stats.rx_errors++;
1180 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1181 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1182 }
1183 /* Clear all error sources, included undocumented ones! */
1184 outl(0x0800f7ba, ioaddr + CSR5);
1185 }
1186 if (--work_budget < 0) {
1187 if (xircom_debug > 1)
1188 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1189 "csr5=0x%8.8x.\n", dev->name, csr5);
1190 /* Acknowledge all interrupt sources. */
1191 outl(0x8001ffff, ioaddr + CSR5);
1192 break;
1193 }
1194 } while (1);
1195
1196 if (xircom_debug > 3)
1197 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1198 dev->name, inl(ioaddr + CSR5));
1199
1200 spin_unlock (&tp->lock);
1201 return IRQ_RETVAL(handled);
1202}
1203
1204
1205static int
1206xircom_rx(struct net_device *dev)
1207{
1208 struct xircom_private *tp = netdev_priv(dev);
1209 int entry = tp->cur_rx % RX_RING_SIZE;
1210 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1211 int work_done = 0;
1212
1213 if (xircom_debug > 4)
1214 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1215 tp->rx_ring[entry].status);
1216 /* If we own the next entry, it's a new packet. Send it up. */
1217 while (tp->rx_ring[entry].status >= 0) {
1218 s32 status = tp->rx_ring[entry].status;
1219
1220 if (xircom_debug > 5)
1221 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1222 tp->rx_ring[entry].status);
1223 if (--rx_work_limit < 0)
1224 break;
1225 if ((status & 0x38008300) != 0x0300) {
1226 if ((status & 0x38000300) != 0x0300) {
1227 /* Ignore earlier buffers. */
1228 if ((status & 0xffff) != 0x7fff) {
1229 if (xircom_debug > 1)
1230 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1231 "spanned multiple buffers, status %8.8x!\n",
1232 dev->name, status);
1233 tp->stats.rx_length_errors++;
1234 }
1235 } else if (status & Rx0DescError) {
1236 /* There was a fatal error. */
1237 if (xircom_debug > 2)
1238 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1239 dev->name, status);
1240 tp->stats.rx_errors++; /* end of a packet.*/
1241 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1242 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1243 }
1244 } else {
1245 /* Omit the four octet CRC from the length. */
1246 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1247 struct sk_buff *skb;
1248
1249#ifndef final_version
1250 if (pkt_len > 1518) {
1251 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1252 dev->name, pkt_len, pkt_len);
1253 pkt_len = 1518;
1254 tp->stats.rx_length_errors++;
1255 }
1256#endif
1257 /* Check if the packet is long enough to accept without copying
1258 to a minimally-sized skbuff. */
1259 if (pkt_len < rx_copybreak
1260 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1261 skb->dev = dev;
1262 skb_reserve(skb, 2); /* 16 byte align the IP header */
1263#if ! defined(__alpha__)
1264 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1265 pkt_len, 0);
1266 skb_put(skb, pkt_len);
1267#else
1268 memcpy(skb_put(skb, pkt_len),
1269 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1270#endif
1271 work_done++;
1272 } else { /* Pass up the skb already on the Rx ring. */
1273 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1274 tp->rx_skbuff[entry] = NULL;
1275 }
1276 skb->protocol = eth_type_trans(skb, dev);
1277 netif_rx(skb);
1278 dev->last_rx = jiffies;
1279 tp->stats.rx_packets++;
1280 tp->stats.rx_bytes += pkt_len;
1281 }
1282 entry = (++tp->cur_rx) % RX_RING_SIZE;
1283 }
1284
1285 /* Refill the Rx ring buffers. */
1286 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1287 entry = tp->dirty_rx % RX_RING_SIZE;
1288 if (tp->rx_skbuff[entry] == NULL) {
1289 struct sk_buff *skb;
1290 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1291 if (skb == NULL)
1292 break;
1293 skb->dev = dev; /* Mark as being used by this device. */
David S. Miller689be432005-06-28 15:25:31 -07001294 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 work_done++;
1296 }
1297 tp->rx_ring[entry].status = Rx0DescOwned;
1298 }
1299
1300 return work_done;
1301}
1302
1303
1304static void
1305xircom_down(struct net_device *dev)
1306{
1307 long ioaddr = dev->base_addr;
1308 struct xircom_private *tp = netdev_priv(dev);
1309
1310 /* Disable interrupts by clearing the interrupt mask. */
1311 outl(0, ioaddr + CSR7);
1312 /* Stop the chip's Tx and Rx processes. */
1313 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1314
1315 if (inl(ioaddr + CSR6) != 0xffffffff)
1316 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1317
1318 dev->if_port = tp->saved_if_port;
1319}
1320
1321
1322static int
1323xircom_close(struct net_device *dev)
1324{
1325 long ioaddr = dev->base_addr;
1326 struct xircom_private *tp = netdev_priv(dev);
1327 int i;
1328
1329 if (xircom_debug > 1)
1330 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1331 dev->name, inl(ioaddr + CSR5));
1332
1333 netif_stop_queue(dev);
1334
1335 if (netif_device_present(dev))
1336 xircom_down(dev);
1337
1338 free_irq(dev->irq, dev);
1339
1340 /* Free all the skbuffs in the Rx queue. */
1341 for (i = 0; i < RX_RING_SIZE; i++) {
1342 struct sk_buff *skb = tp->rx_skbuff[i];
1343 tp->rx_skbuff[i] = NULL;
1344 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1345 tp->rx_ring[i].length = 0;
1346 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1347 if (skb) {
1348 dev_kfree_skb(skb);
1349 }
1350 }
1351 for (i = 0; i < TX_RING_SIZE; i++) {
1352 if (tp->tx_skbuff[i])
1353 dev_kfree_skb(tp->tx_skbuff[i]);
1354 tp->tx_skbuff[i] = NULL;
1355 }
1356
1357 tp->open = 0;
1358 return 0;
1359}
1360
1361
1362static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1363{
1364 struct xircom_private *tp = netdev_priv(dev);
1365 long ioaddr = dev->base_addr;
1366
1367 if (netif_device_present(dev))
1368 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1369
1370 return &tp->stats;
1371}
1372
1373static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1374{
1375 struct xircom_private *tp = netdev_priv(dev);
1376 ecmd->supported =
1377 SUPPORTED_10baseT_Half |
1378 SUPPORTED_10baseT_Full |
1379 SUPPORTED_100baseT_Half |
1380 SUPPORTED_100baseT_Full |
1381 SUPPORTED_Autoneg |
1382 SUPPORTED_MII;
1383
1384 ecmd->advertising = ADVERTISED_MII;
1385 if (tp->advertising[0] & ADVERTISE_10HALF)
1386 ecmd->advertising |= ADVERTISED_10baseT_Half;
1387 if (tp->advertising[0] & ADVERTISE_10FULL)
1388 ecmd->advertising |= ADVERTISED_10baseT_Full;
1389 if (tp->advertising[0] & ADVERTISE_100HALF)
1390 ecmd->advertising |= ADVERTISED_100baseT_Half;
1391 if (tp->advertising[0] & ADVERTISE_100FULL)
1392 ecmd->advertising |= ADVERTISED_100baseT_Full;
1393 if (tp->autoneg) {
1394 ecmd->advertising |= ADVERTISED_Autoneg;
1395 ecmd->autoneg = AUTONEG_ENABLE;
1396 } else
1397 ecmd->autoneg = AUTONEG_DISABLE;
1398
1399 ecmd->port = PORT_MII;
1400 ecmd->transceiver = XCVR_INTERNAL;
1401 ecmd->phy_address = tp->phys[0];
1402 ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
1403 ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1404 ecmd->maxtxpkt = TX_RING_SIZE / 2;
1405 ecmd->maxrxpkt = 0;
1406 return 0;
1407}
1408
1409static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1410{
1411 struct xircom_private *tp = netdev_priv(dev);
1412 u16 autoneg, speed100, full_duplex;
1413
1414 autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
1415 speed100 = (ecmd->speed == SPEED_100);
1416 full_duplex = (ecmd->duplex == DUPLEX_FULL);
1417
1418 tp->autoneg = autoneg;
1419 if (speed100 != tp->speed100 ||
1420 full_duplex != tp->full_duplex) {
1421 tp->speed100 = speed100;
1422 tp->full_duplex = full_duplex;
1423 /* change advertising bits */
1424 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1425 ADVERTISE_10FULL |
1426 ADVERTISE_100HALF |
1427 ADVERTISE_100FULL |
1428 ADVERTISE_100BASE4);
1429 if (speed100) {
1430 if (full_duplex)
1431 tp->advertising[0] |= ADVERTISE_100FULL;
1432 else
1433 tp->advertising[0] |= ADVERTISE_100HALF;
1434 } else {
1435 if (full_duplex)
1436 tp->advertising[0] |= ADVERTISE_10FULL;
1437 else
1438 tp->advertising[0] |= ADVERTISE_10HALF;
1439 }
1440 }
1441 check_duplex(dev);
1442 return 0;
1443}
1444
1445static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1446{
1447 struct xircom_private *tp = netdev_priv(dev);
1448 strcpy(info->driver, DRV_NAME);
1449 strcpy(info->version, DRV_VERSION);
1450 strcpy(info->bus_info, pci_name(tp->pdev));
1451}
1452
1453static struct ethtool_ops ops = {
1454 .get_settings = xircom_get_settings,
1455 .set_settings = xircom_set_settings,
1456 .get_drvinfo = xircom_get_drvinfo,
1457};
1458
1459/* Provide ioctl() calls to examine the MII xcvr state. */
1460static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1461{
1462 struct xircom_private *tp = netdev_priv(dev);
1463 u16 *data = (u16 *)&rq->ifr_ifru;
1464 int phy = tp->phys[0] & 0x1f;
1465 unsigned long flags;
1466
1467 switch(cmd) {
1468 /* Legacy mii-diag interface */
1469 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1470 if (tp->mii_cnt)
1471 data[0] = phy;
1472 else
1473 return -ENODEV;
1474 return 0;
1475 case SIOCGMIIREG: /* Read MII PHY register. */
1476 save_flags(flags);
1477 cli();
1478 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1479 restore_flags(flags);
1480 return 0;
1481 case SIOCSMIIREG: /* Write MII PHY register. */
1482 if (!capable(CAP_NET_ADMIN))
1483 return -EPERM;
1484 save_flags(flags);
1485 cli();
1486 if (data[0] == tp->phys[0]) {
1487 u16 value = data[2];
1488 switch (data[1]) {
1489 case 0:
1490 if (value & (BMCR_RESET | BMCR_ANENABLE))
1491 /* Autonegotiation. */
1492 tp->autoneg = 1;
1493 else {
1494 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1495 tp->autoneg = 0;
1496 }
1497 break;
1498 case 4:
1499 tp->advertising[0] = value;
1500 break;
1501 }
1502 check_duplex(dev);
1503 }
1504 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1505 restore_flags(flags);
1506 return 0;
1507 default:
1508 return -EOPNOTSUPP;
1509 }
1510
1511 return -EOPNOTSUPP;
1512}
1513
1514/* Set or clear the multicast filter for this adaptor.
1515 Note that we only use exclusion around actually queueing the
1516 new frame, not around filling tp->setup_frame. This is non-deterministic
1517 when re-entered but still correct. */
1518static void set_rx_mode(struct net_device *dev)
1519{
1520 struct xircom_private *tp = netdev_priv(dev);
1521 struct dev_mc_list *mclist;
1522 long ioaddr = dev->base_addr;
1523 int csr6 = inl(ioaddr + CSR6);
1524 u16 *eaddrs, *setup_frm;
1525 u32 tx_flags;
1526 int i;
1527
1528 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1529 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1530 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1531 tp->csr6 |= PromiscBit;
1532 csr6 |= PromiscBit;
1533 goto out;
1534 }
1535
1536 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1537 /* Too many to filter well -- accept all multicasts. */
1538 tp->csr6 |= AllMultiBit;
1539 csr6 |= AllMultiBit;
1540 goto out;
1541 }
1542
1543 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1544
1545 /* Note that only the low-address shortword of setup_frame is valid! */
1546 setup_frm = tp->setup_frame;
1547 mclist = dev->mc_list;
1548
1549 /* Fill the first entry with our physical address. */
1550 eaddrs = (u16 *)dev->dev_addr;
1551 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1552 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1553 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1554
1555 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1556 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1557 u32 hash, hash2;
1558
1559 tx_flags |= Tx1HashSetup;
1560 tp->csr6 |= HashFilterBit;
1561 csr6 |= HashFilterBit;
1562
1563 /* Fill the unused 3 entries with the broadcast address.
1564 At least one entry *must* contain the broadcast address!!!*/
1565 for (i = 0; i < 3; i++) {
1566 *setup_frm = 0xffff; setup_frm += 2;
1567 *setup_frm = 0xffff; setup_frm += 2;
1568 *setup_frm = 0xffff; setup_frm += 2;
1569 }
1570
1571 /* Truly brain-damaged hash filter layout */
1572 /* XXX: not sure if I should take the last or the first 9 bits */
1573 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1574 u32 *hptr;
1575 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1576 if (hash < 384) {
1577 hash2 = hash + ((hash >> 4) << 4) +
1578 ((hash >> 5) << 5);
1579 } else {
1580 hash -= 384;
1581 hash2 = 64 + hash + (hash >> 4) * 80;
1582 }
1583 hptr = &hash_table[hash2 & ~0x1f];
1584 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1585 }
1586 } else {
1587 /* We have <= 14 mcast addresses so we can use Xircom's
1588 wonderful 16-address perfect filter. */
1589 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1590 eaddrs = (u16 *)mclist->dmi_addr;
1591 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1592 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1593 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1594 }
1595 /* Fill the unused entries with the broadcast address.
1596 At least one entry *must* contain the broadcast address!!!*/
1597 for (; i < 15; i++) {
1598 *setup_frm = 0xffff; setup_frm += 2;
1599 *setup_frm = 0xffff; setup_frm += 2;
1600 *setup_frm = 0xffff; setup_frm += 2;
1601 }
1602 }
1603
1604 /* Now add this frame to the Tx list. */
1605 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1606 /* Same setup recently queued, we need not add it. */
1607 /* XXX: Huh? All it means is that the Tx list is full...*/
1608 } else {
1609 unsigned long flags;
1610 unsigned int entry;
1611 int dummy = -1;
1612
1613 save_flags(flags); cli();
1614 entry = tp->cur_tx++ % TX_RING_SIZE;
1615
1616 if (entry != 0) {
1617 /* Avoid a chip errata by prefixing a dummy entry. */
1618 tp->tx_skbuff[entry] = NULL;
1619 tp->tx_ring[entry].length =
1620 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1621 tp->tx_ring[entry].buffer1 = 0;
1622 /* race with chip, set Tx0DescOwned later */
1623 dummy = entry;
1624 entry = tp->cur_tx++ % TX_RING_SIZE;
1625 }
1626
1627 tp->tx_skbuff[entry] = NULL;
1628 /* Put the setup frame on the Tx list. */
1629 if (entry == TX_RING_SIZE - 1)
1630 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1631 tp->tx_ring[entry].length = tx_flags;
1632 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1633 tp->tx_ring[entry].status = Tx0DescOwned;
1634 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1635 tp->tx_full = 1;
1636 netif_stop_queue (dev);
1637 }
1638 if (dummy >= 0)
1639 tp->tx_ring[dummy].status = Tx0DescOwned;
1640 restore_flags(flags);
1641 /* Trigger an immediate transmit demand. */
1642 outl(0, ioaddr + CSR1);
1643 }
1644
1645out:
1646 outl_CSR6(csr6, ioaddr);
1647}
1648
1649
1650static struct pci_device_id xircom_pci_table[] = {
1651 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1652 {0},
1653};
1654MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1655
1656
1657#ifdef CONFIG_PM
Pavel Machek05adc3b2005-04-16 15:25:25 -07001658static int xircom_suspend(struct pci_dev *pdev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659{
1660 struct net_device *dev = pci_get_drvdata(pdev);
1661 struct xircom_private *tp = netdev_priv(dev);
1662 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1663 if (tp->open)
1664 xircom_down(dev);
1665
1666 pci_save_state(pdev);
1667 pci_disable_device(pdev);
1668 pci_set_power_state(pdev, 3);
1669
1670 return 0;
1671}
1672
1673
1674static int xircom_resume(struct pci_dev *pdev)
1675{
1676 struct net_device *dev = pci_get_drvdata(pdev);
1677 struct xircom_private *tp = netdev_priv(dev);
1678 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1679
1680 pci_set_power_state(pdev,0);
1681 pci_enable_device(pdev);
1682 pci_restore_state(pdev);
1683
1684 /* Bring the chip out of sleep mode.
1685 Caution: Snooze mode does not work with some boards! */
1686 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1687 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1688
1689 transceiver_voodoo(dev);
1690 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1691 check_duplex(dev);
1692
1693 if (tp->open)
1694 xircom_up(dev);
1695 return 0;
1696}
1697#endif /* CONFIG_PM */
1698
1699
1700static void __devexit xircom_remove_one(struct pci_dev *pdev)
1701{
1702 struct net_device *dev = pci_get_drvdata(pdev);
1703
1704 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1705 unregister_netdev(dev);
1706 pci_release_regions(pdev);
1707 free_netdev(dev);
1708 pci_set_drvdata(pdev, NULL);
1709}
1710
1711
1712static struct pci_driver xircom_driver = {
1713 .name = DRV_NAME,
1714 .id_table = xircom_pci_table,
1715 .probe = xircom_init_one,
1716 .remove = __devexit_p(xircom_remove_one),
1717#ifdef CONFIG_PM
1718 .suspend = xircom_suspend,
1719 .resume = xircom_resume
1720#endif /* CONFIG_PM */
1721};
1722
1723
1724static int __init xircom_init(void)
1725{
1726/* when a module, this is printed whether or not devices are found in probe */
1727#ifdef MODULE
1728 printk(version);
1729#endif
1730 return pci_module_init(&xircom_driver);
1731}
1732
1733
1734static void __exit xircom_exit(void)
1735{
1736 pci_unregister_driver(&xircom_driver);
1737}
1738
1739module_init(xircom_init)
1740module_exit(xircom_exit)
1741
1742/*
1743 * Local variables:
1744 * c-indent-level: 4
1745 * c-basic-offset: 4
1746 * tab-width: 4
1747 * End:
1748 */