blob: 7e1fafed6b2de844906f18b72d71487b8f35aa4c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
2
3/*
4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
14
15*/
16
17#include <linux/config.h>
18
19#define DRV_NAME "tulip"
20#ifdef CONFIG_TULIP_NAPI
21#define DRV_VERSION "1.1.13-NAPI" /* Keep at least for test */
22#else
23#define DRV_VERSION "1.1.13"
24#endif
25#define DRV_RELDATE "May 11, 2002"
26
27
28#include <linux/module.h>
29#include <linux/pci.h>
30#include "tulip.h"
31#include <linux/init.h>
32#include <linux/etherdevice.h>
33#include <linux/delay.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/crc32.h>
37#include <asm/unaligned.h>
38#include <asm/uaccess.h>
39
40#ifdef __sparc__
41#include <asm/pbm.h>
42#endif
43
44static char version[] __devinitdata =
45 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
46
47
48/* A few user-configurable values. */
49
50/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
51static unsigned int max_interrupt_work = 25;
52
53#define MAX_UNITS 8
54/* Used to pass the full-duplex flag, etc. */
55static int full_duplex[MAX_UNITS];
56static int options[MAX_UNITS];
57static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
58
59/* The possible media types that can be set in options[] are: */
60const char * const medianame[32] = {
61 "10baseT", "10base2", "AUI", "100baseTx",
62 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
63 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
64 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
65 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
66 "","","","", "","","","", "","","","Transceiver reset",
67};
68
69/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
70#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
71 || defined(__sparc_) || defined(__ia64__) \
72 || defined(__sh__) || defined(__mips__)
73static int rx_copybreak = 1518;
74#else
75static int rx_copybreak = 100;
76#endif
77
78/*
79 Set the bus performance register.
80 Typical: Set 16 longword cache alignment, no burst limit.
81 Cache alignment bits 15:14 Burst length 13:8
82 0000 No alignment 0x00000000 unlimited 0800 8 longwords
83 4000 8 longwords 0100 1 longword 1000 16 longwords
84 8000 16 longwords 0200 2 longwords 2000 32 longwords
85 C000 32 longwords 0400 4 longwords
86 Warning: many older 486 systems are broken and require setting 0x00A04800
87 8 longword cache alignment, 8 longword burst.
88 ToDo: Non-Intel setting could be better.
89*/
90
91#if defined(__alpha__) || defined(__ia64__)
92static int csr0 = 0x01A00000 | 0xE000;
93#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
94static int csr0 = 0x01A00000 | 0x8000;
95#elif defined(__sparc__) || defined(__hppa__)
96/* The UltraSparc PCI controllers will disconnect at every 64-byte
97 * crossing anyways so it makes no sense to tell Tulip to burst
98 * any more than that.
99 */
100static int csr0 = 0x01A00000 | 0x9000;
101#elif defined(__arm__) || defined(__sh__)
102static int csr0 = 0x01A00000 | 0x4800;
103#elif defined(__mips__)
104static int csr0 = 0x00200000 | 0x4000;
105#else
106#warning Processor architecture undefined!
107static int csr0 = 0x00A00000 | 0x4800;
108#endif
109
110/* Operational parameters that usually are not changed. */
111/* Time in jiffies before concluding the transmitter is hung. */
112#define TX_TIMEOUT (4*HZ)
113
114
115MODULE_AUTHOR("The Linux Kernel Team");
116MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
117MODULE_LICENSE("GPL");
118MODULE_VERSION(DRV_VERSION);
119module_param(tulip_debug, int, 0);
120module_param(max_interrupt_work, int, 0);
121module_param(rx_copybreak, int, 0);
122module_param(csr0, int, 0);
123module_param_array(options, int, NULL, 0);
124module_param_array(full_duplex, int, NULL, 0);
125
126#define PFX DRV_NAME ": "
127
128#ifdef TULIP_DEBUG
129int tulip_debug = TULIP_DEBUG;
130#else
131int tulip_debug = 1;
132#endif
133
134
135
136/*
137 * This table use during operation for capabilities and media timer.
138 *
139 * It is indexed via the values in 'enum chips'
140 */
141
142struct tulip_chip_table tulip_tbl[] = {
143 { }, /* placeholder for array, slot unused currently */
144 { }, /* placeholder for array, slot unused currently */
145
146 /* DC21140 */
147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer },
149
150 /* DC21142, DC21143 */
151 { "Digital DS21143 Tulip", 128, 0x0801fbff,
152 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
153 | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer },
154
155 /* LC82C168 */
156 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
157 HAS_MII | HAS_PNICNWAY, pnic_timer },
158
159 /* MX98713 */
160 { "Macronix 98713 PMAC", 128, 0x0001ebef,
161 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
162
163 /* MX98715 */
164 { "Macronix 98715 PMAC", 256, 0x0001ebef,
165 HAS_MEDIA_TABLE, mxic_timer },
166
167 /* MX98725 */
168 { "Macronix 98725 PMAC", 256, 0x0001ebef,
169 HAS_MEDIA_TABLE, mxic_timer },
170
171 /* AX88140 */
172 { "ASIX AX88140", 128, 0x0001fbff,
173 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
174 | IS_ASIX, tulip_timer },
175
176 /* PNIC2 */
177 { "Lite-On PNIC-II", 256, 0x0801fbff,
178 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer },
179
180 /* COMET */
181 { "ADMtek Comet", 256, 0x0001abef,
182 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
183
184 /* COMPEX9881 */
185 { "Compex 9881 PMAC", 128, 0x0001ebef,
186 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
187
188 /* I21145 */
189 { "Intel DS21145 Tulip", 128, 0x0801fbff,
190 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
191 | HAS_NWAY | HAS_PCI_MWI, t21142_timer },
192
193 /* DM910X */
194 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
195 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
196 tulip_timer },
197
198 /* RS7112 */
199 { "Conexant LANfinity", 256, 0x0001ebef,
200 HAS_MII | HAS_ACPI, tulip_timer },
201
202 /* ULi526X */
203 { "ULi M5261/M5263", 128, 0x0001ebef,
204 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
205};
206
207
208static struct pci_device_id tulip_pci_tbl[] = {
209 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
210 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
211 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
212 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
213 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
214/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
215 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
216 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
217 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
218 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
219 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
228 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
229 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
230 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
231 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
233 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
235 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
237 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
238 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
243 { 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
244 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
Hideki Yamane9b259782005-06-27 00:18:32 -0400245 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 { } /* terminate list */
247};
248MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
249
250
251/* A full-duplex map for media types. */
252const char tulip_media_cap[32] =
253{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
254
255static void tulip_tx_timeout(struct net_device *dev);
256static void tulip_init_ring(struct net_device *dev);
257static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
258static int tulip_open(struct net_device *dev);
259static int tulip_close(struct net_device *dev);
260static void tulip_up(struct net_device *dev);
261static void tulip_down(struct net_device *dev);
262static struct net_device_stats *tulip_get_stats(struct net_device *dev);
263static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
264static void set_rx_mode(struct net_device *dev);
265#ifdef CONFIG_NET_POLL_CONTROLLER
266static void poll_tulip(struct net_device *dev);
267#endif
268
269static void tulip_set_power_state (struct tulip_private *tp,
270 int sleep, int snooze)
271{
272 if (tp->flags & HAS_ACPI) {
273 u32 tmp, newtmp;
274 pci_read_config_dword (tp->pdev, CFDD, &tmp);
275 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
276 if (sleep)
277 newtmp |= CFDD_Sleep;
278 else if (snooze)
279 newtmp |= CFDD_Snooze;
280 if (tmp != newtmp)
281 pci_write_config_dword (tp->pdev, CFDD, newtmp);
282 }
283
284}
285
286
287static void tulip_up(struct net_device *dev)
288{
289 struct tulip_private *tp = netdev_priv(dev);
290 void __iomem *ioaddr = tp->base_addr;
291 int next_tick = 3*HZ;
292 int i;
293
294 /* Wake the chip from sleep/snooze mode. */
295 tulip_set_power_state (tp, 0, 0);
296
297 /* On some chip revs we must set the MII/SYM port before the reset!? */
298 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
299 iowrite32(0x00040000, ioaddr + CSR6);
300
301 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
302 iowrite32(0x00000001, ioaddr + CSR0);
303 udelay(100);
304
305 /* Deassert reset.
306 Wait the specified 50 PCI cycles after a reset by initializing
307 Tx and Rx queues and the address filter list. */
308 iowrite32(tp->csr0, ioaddr + CSR0);
309 udelay(100);
310
311 if (tulip_debug > 1)
312 printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
313
314 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
315 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
316 tp->cur_rx = tp->cur_tx = 0;
317 tp->dirty_rx = tp->dirty_tx = 0;
318
319 if (tp->flags & MC_HASH_ONLY) {
320 u32 addr_low = le32_to_cpu(get_unaligned((u32 *)dev->dev_addr));
321 u32 addr_high = le16_to_cpu(get_unaligned((u16 *)(dev->dev_addr+4)));
322 if (tp->chip_id == AX88140) {
323 iowrite32(0, ioaddr + CSR13);
324 iowrite32(addr_low, ioaddr + CSR14);
325 iowrite32(1, ioaddr + CSR13);
326 iowrite32(addr_high, ioaddr + CSR14);
327 } else if (tp->flags & COMET_MAC_ADDR) {
328 iowrite32(addr_low, ioaddr + 0xA4);
329 iowrite32(addr_high, ioaddr + 0xA8);
330 iowrite32(0, ioaddr + 0xAC);
331 iowrite32(0, ioaddr + 0xB0);
332 }
333 } else {
334 /* This is set_rx_mode(), but without starting the transmitter. */
335 u16 *eaddrs = (u16 *)dev->dev_addr;
336 u16 *setup_frm = &tp->setup_frame[15*6];
337 dma_addr_t mapping;
338
339 /* 21140 bug: you must add the broadcast address. */
340 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
341 /* Fill the final entry of the table with our physical address. */
342 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
343 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
344 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
345
346 mapping = pci_map_single(tp->pdev, tp->setup_frame,
347 sizeof(tp->setup_frame),
348 PCI_DMA_TODEVICE);
349 tp->tx_buffers[tp->cur_tx].skb = NULL;
350 tp->tx_buffers[tp->cur_tx].mapping = mapping;
351
352 /* Put the setup frame on the Tx list. */
353 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
354 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
355 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
356
357 tp->cur_tx++;
358 }
359
360 tp->saved_if_port = dev->if_port;
361 if (dev->if_port == 0)
362 dev->if_port = tp->default_port;
363
364 /* Allow selecting a default media. */
365 i = 0;
366 if (tp->mtable == NULL)
367 goto media_picked;
368 if (dev->if_port) {
369 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
370 (dev->if_port == 12 ? 0 : dev->if_port);
371 for (i = 0; i < tp->mtable->leafcount; i++)
372 if (tp->mtable->mleaf[i].media == looking_for) {
373 printk(KERN_INFO "%s: Using user-specified media %s.\n",
374 dev->name, medianame[dev->if_port]);
375 goto media_picked;
376 }
377 }
378 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
379 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
380 for (i = 0; i < tp->mtable->leafcount; i++)
381 if (tp->mtable->mleaf[i].media == looking_for) {
382 printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
383 dev->name, medianame[looking_for]);
384 goto media_picked;
385 }
386 }
387 /* Start sensing first non-full-duplex media. */
388 for (i = tp->mtable->leafcount - 1;
389 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
390 ;
391media_picked:
392
393 tp->csr6 = 0;
394 tp->cur_index = i;
395 tp->nwayset = 0;
396
397 if (dev->if_port) {
398 if (tp->chip_id == DC21143 &&
399 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
400 /* We must reset the media CSRs when we force-select MII mode. */
401 iowrite32(0x0000, ioaddr + CSR13);
402 iowrite32(0x0000, ioaddr + CSR14);
403 iowrite32(0x0008, ioaddr + CSR15);
404 }
405 tulip_select_media(dev, 1);
406 } else if (tp->chip_id == DC21142) {
407 if (tp->mii_cnt) {
408 tulip_select_media(dev, 1);
409 if (tulip_debug > 1)
410 printk(KERN_INFO "%s: Using MII transceiver %d, status "
411 "%4.4x.\n",
412 dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
413 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
414 tp->csr6 = csr6_mask_hdcap;
415 dev->if_port = 11;
416 iowrite32(0x0000, ioaddr + CSR13);
417 iowrite32(0x0000, ioaddr + CSR14);
418 } else
419 t21142_start_nway(dev);
420 } else if (tp->chip_id == PNIC2) {
421 /* for initial startup advertise 10/100 Full and Half */
422 tp->sym_advertise = 0x01E0;
423 /* enable autonegotiate end interrupt */
424 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
425 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
426 pnic2_start_nway(dev);
427 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
428 if (tp->mii_cnt) {
429 dev->if_port = 11;
430 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
431 iowrite32(0x0001, ioaddr + CSR15);
432 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
433 pnic_do_nway(dev);
434 else {
435 /* Start with 10mbps to do autonegotiation. */
436 iowrite32(0x32, ioaddr + CSR12);
437 tp->csr6 = 0x00420000;
438 iowrite32(0x0001B078, ioaddr + 0xB8);
439 iowrite32(0x0201B078, ioaddr + 0xB8);
440 next_tick = 1*HZ;
441 }
442 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
443 && ! tp->medialock) {
444 dev->if_port = 0;
445 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
446 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
447 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
448 /* Provided by BOLO, Macronix - 12/10/1998. */
449 dev->if_port = 0;
450 tp->csr6 = 0x01a80200;
451 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
452 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
453 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
454 /* Enable automatic Tx underrun recovery. */
455 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
456 dev->if_port = tp->mii_cnt ? 11 : 0;
457 tp->csr6 = 0x00040000;
458 } else if (tp->chip_id == AX88140) {
459 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
460 } else
461 tulip_select_media(dev, 1);
462
463 /* Start the chip's Tx to process setup frame. */
464 tulip_stop_rxtx(tp);
465 barrier();
466 udelay(5);
467 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
468
469 /* Enable interrupts by setting the interrupt mask. */
470 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
471 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
472 tulip_start_rxtx(tp);
473 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
474
475 if (tulip_debug > 2) {
476 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
477 dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
478 ioread32(ioaddr + CSR6));
479 }
480
481 /* Set the timer to switch to check for link beat and perhaps switch
482 to an alternate media type. */
483 tp->timer.expires = RUN_AT(next_tick);
484 add_timer(&tp->timer);
485#ifdef CONFIG_TULIP_NAPI
486 init_timer(&tp->oom_timer);
487 tp->oom_timer.data = (unsigned long)dev;
488 tp->oom_timer.function = oom_timer;
489#endif
490}
491
492static int
493tulip_open(struct net_device *dev)
494{
495 int retval;
496
497 if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)))
498 return retval;
499
500 tulip_init_ring (dev);
501
502 tulip_up (dev);
503
504 netif_start_queue (dev);
505
506 return 0;
507}
508
509
510static void tulip_tx_timeout(struct net_device *dev)
511{
512 struct tulip_private *tp = netdev_priv(dev);
513 void __iomem *ioaddr = tp->base_addr;
514 unsigned long flags;
515
516 spin_lock_irqsave (&tp->lock, flags);
517
518 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
519 /* Do nothing -- the media monitor should handle this. */
520 if (tulip_debug > 1)
521 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
522 dev->name);
523 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
524 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
525 || tp->chip_id == DM910X || tp->chip_id == ULI526X) {
526 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
527 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
528 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
529 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
530 if ( ! tp->medialock && tp->mtable) {
531 do
532 --tp->cur_index;
533 while (tp->cur_index >= 0
534 && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
535 & MediaIsFD));
536 if (--tp->cur_index < 0) {
537 /* We start again, but should instead look for default. */
538 tp->cur_index = tp->mtable->leafcount - 1;
539 }
540 tulip_select_media(dev, 0);
541 printk(KERN_WARNING "%s: transmit timed out, switching to %s "
542 "media.\n", dev->name, medianame[dev->if_port]);
543 }
544 } else if (tp->chip_id == PNIC2) {
545 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
546 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
547 dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
548 (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
549 } else {
550 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
551 "%8.8x, resetting...\n",
552 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
553 dev->if_port = 0;
554 }
555
556#if defined(way_too_many_messages)
557 if (tulip_debug > 3) {
558 int i;
559 for (i = 0; i < RX_RING_SIZE; i++) {
560 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
561 int j;
562 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
563 "%2.2x %2.2x %2.2x.\n",
564 i, (unsigned int)tp->rx_ring[i].status,
565 (unsigned int)tp->rx_ring[i].length,
566 (unsigned int)tp->rx_ring[i].buffer1,
567 (unsigned int)tp->rx_ring[i].buffer2,
568 buf[0], buf[1], buf[2]);
569 for (j = 0; buf[j] != 0xee && j < 1600; j++)
570 if (j < 100) printk(" %2.2x", buf[j]);
571 printk(" j=%d.\n", j);
572 }
573 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
574 for (i = 0; i < RX_RING_SIZE; i++)
575 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
576 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
577 for (i = 0; i < TX_RING_SIZE; i++)
578 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
579 printk("\n");
580 }
581#endif
582
583 /* Stop and restart the chip's Tx processes . */
584
585 tulip_restart_rxtx(tp);
586 /* Trigger an immediate transmit demand. */
587 iowrite32(0, ioaddr + CSR1);
588
589 tp->stats.tx_errors++;
590
591 spin_unlock_irqrestore (&tp->lock, flags);
592 dev->trans_start = jiffies;
593 netif_wake_queue (dev);
594}
595
596
597/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
598static void tulip_init_ring(struct net_device *dev)
599{
600 struct tulip_private *tp = netdev_priv(dev);
601 int i;
602
603 tp->susp_rx = 0;
604 tp->ttimer = 0;
605 tp->nir = 0;
606
607 for (i = 0; i < RX_RING_SIZE; i++) {
608 tp->rx_ring[i].status = 0x00000000;
609 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
610 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
611 tp->rx_buffers[i].skb = NULL;
612 tp->rx_buffers[i].mapping = 0;
613 }
614 /* Mark the last entry as wrapping the ring. */
615 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
616 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
617
618 for (i = 0; i < RX_RING_SIZE; i++) {
619 dma_addr_t mapping;
620
621 /* Note the receive buffer must be longword aligned.
622 dev_alloc_skb() provides 16 byte alignment. But do *not*
623 use skb_reserve() to align the IP header! */
624 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
625 tp->rx_buffers[i].skb = skb;
626 if (skb == NULL)
627 break;
628 mapping = pci_map_single(tp->pdev, skb->tail,
629 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
630 tp->rx_buffers[i].mapping = mapping;
631 skb->dev = dev; /* Mark as being used by this device. */
632 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
633 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
634 }
635 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
636
637 /* The Tx buffer descriptor is filled in as needed, but we
638 do need to clear the ownership bit. */
639 for (i = 0; i < TX_RING_SIZE; i++) {
640 tp->tx_buffers[i].skb = NULL;
641 tp->tx_buffers[i].mapping = 0;
642 tp->tx_ring[i].status = 0x00000000;
643 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
644 }
645 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
646}
647
648static int
649tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
650{
651 struct tulip_private *tp = netdev_priv(dev);
652 int entry;
653 u32 flag;
654 dma_addr_t mapping;
655
656 spin_lock_irq(&tp->lock);
657
658 /* Calculate the next Tx descriptor entry. */
659 entry = tp->cur_tx % TX_RING_SIZE;
660
661 tp->tx_buffers[entry].skb = skb;
662 mapping = pci_map_single(tp->pdev, skb->data,
663 skb->len, PCI_DMA_TODEVICE);
664 tp->tx_buffers[entry].mapping = mapping;
665 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
666
667 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
668 flag = 0x60000000; /* No interrupt */
669 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
670 flag = 0xe0000000; /* Tx-done intr. */
671 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
672 flag = 0x60000000; /* No Tx-done intr. */
673 } else { /* Leave room for set_rx_mode() to fill entries. */
674 flag = 0xe0000000; /* Tx-done intr. */
675 netif_stop_queue(dev);
676 }
677 if (entry == TX_RING_SIZE-1)
678 flag = 0xe0000000 | DESC_RING_WRAP;
679
680 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
681 /* if we were using Transmit Automatic Polling, we would need a
682 * wmb() here. */
683 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
684 wmb();
685
686 tp->cur_tx++;
687
688 /* Trigger an immediate transmit demand. */
689 iowrite32(0, tp->base_addr + CSR1);
690
691 spin_unlock_irq(&tp->lock);
692
693 dev->trans_start = jiffies;
694
695 return 0;
696}
697
698static void tulip_clean_tx_ring(struct tulip_private *tp)
699{
700 unsigned int dirty_tx;
701
702 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
703 dirty_tx++) {
704 int entry = dirty_tx % TX_RING_SIZE;
705 int status = le32_to_cpu(tp->tx_ring[entry].status);
706
707 if (status < 0) {
708 tp->stats.tx_errors++; /* It wasn't Txed */
709 tp->tx_ring[entry].status = 0;
710 }
711
712 /* Check for Tx filter setup frames. */
713 if (tp->tx_buffers[entry].skb == NULL) {
714 /* test because dummy frames not mapped */
715 if (tp->tx_buffers[entry].mapping)
716 pci_unmap_single(tp->pdev,
717 tp->tx_buffers[entry].mapping,
718 sizeof(tp->setup_frame),
719 PCI_DMA_TODEVICE);
720 continue;
721 }
722
723 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
724 tp->tx_buffers[entry].skb->len,
725 PCI_DMA_TODEVICE);
726
727 /* Free the original skb. */
728 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
729 tp->tx_buffers[entry].skb = NULL;
730 tp->tx_buffers[entry].mapping = 0;
731 }
732}
733
734static void tulip_down (struct net_device *dev)
735{
736 struct tulip_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->base_addr;
738 unsigned long flags;
739
740 del_timer_sync (&tp->timer);
741#ifdef CONFIG_TULIP_NAPI
742 del_timer_sync (&tp->oom_timer);
743#endif
744 spin_lock_irqsave (&tp->lock, flags);
745
746 /* Disable interrupts by clearing the interrupt mask. */
747 iowrite32 (0x00000000, ioaddr + CSR7);
748
749 /* Stop the Tx and Rx processes. */
750 tulip_stop_rxtx(tp);
751
752 /* prepare receive buffers */
753 tulip_refill_rx(dev);
754
755 /* release any unconsumed transmit buffers */
756 tulip_clean_tx_ring(tp);
757
758 if (ioread32 (ioaddr + CSR6) != 0xffffffff)
759 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
760
761 spin_unlock_irqrestore (&tp->lock, flags);
762
763 init_timer(&tp->timer);
764 tp->timer.data = (unsigned long)dev;
765 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
766
767 dev->if_port = tp->saved_if_port;
768
769 /* Leave the driver in snooze, not sleep, mode. */
770 tulip_set_power_state (tp, 0, 1);
771}
772
773
774static int tulip_close (struct net_device *dev)
775{
776 struct tulip_private *tp = netdev_priv(dev);
777 void __iomem *ioaddr = tp->base_addr;
778 int i;
779
780 netif_stop_queue (dev);
781
782 tulip_down (dev);
783
784 if (tulip_debug > 1)
785 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
786 dev->name, ioread32 (ioaddr + CSR5));
787
788 free_irq (dev->irq, dev);
789
790 /* Free all the skbuffs in the Rx queue. */
791 for (i = 0; i < RX_RING_SIZE; i++) {
792 struct sk_buff *skb = tp->rx_buffers[i].skb;
793 dma_addr_t mapping = tp->rx_buffers[i].mapping;
794
795 tp->rx_buffers[i].skb = NULL;
796 tp->rx_buffers[i].mapping = 0;
797
798 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
799 tp->rx_ring[i].length = 0;
800 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
801 if (skb) {
802 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
803 PCI_DMA_FROMDEVICE);
804 dev_kfree_skb (skb);
805 }
806 }
807 for (i = 0; i < TX_RING_SIZE; i++) {
808 struct sk_buff *skb = tp->tx_buffers[i].skb;
809
810 if (skb != NULL) {
811 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
812 skb->len, PCI_DMA_TODEVICE);
813 dev_kfree_skb (skb);
814 }
815 tp->tx_buffers[i].skb = NULL;
816 tp->tx_buffers[i].mapping = 0;
817 }
818
819 return 0;
820}
821
822static struct net_device_stats *tulip_get_stats(struct net_device *dev)
823{
824 struct tulip_private *tp = netdev_priv(dev);
825 void __iomem *ioaddr = tp->base_addr;
826
827 if (netif_running(dev)) {
828 unsigned long flags;
829
830 spin_lock_irqsave (&tp->lock, flags);
831
832 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
833
834 spin_unlock_irqrestore(&tp->lock, flags);
835 }
836
837 return &tp->stats;
838}
839
840
841static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
842{
843 struct tulip_private *np = netdev_priv(dev);
844 strcpy(info->driver, DRV_NAME);
845 strcpy(info->version, DRV_VERSION);
846 strcpy(info->bus_info, pci_name(np->pdev));
847}
848
849static struct ethtool_ops ops = {
850 .get_drvinfo = tulip_get_drvinfo
851};
852
853/* Provide ioctl() calls to examine the MII xcvr state. */
854static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
855{
856 struct tulip_private *tp = netdev_priv(dev);
857 void __iomem *ioaddr = tp->base_addr;
858 struct mii_ioctl_data *data = if_mii(rq);
859 const unsigned int phy_idx = 0;
860 int phy = tp->phys[phy_idx] & 0x1f;
861 unsigned int regnum = data->reg_num;
862
863 switch (cmd) {
864 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
865 if (tp->mii_cnt)
866 data->phy_id = phy;
867 else if (tp->flags & HAS_NWAY)
868 data->phy_id = 32;
869 else if (tp->chip_id == COMET)
870 data->phy_id = 1;
871 else
872 return -ENODEV;
873
874 case SIOCGMIIREG: /* Read MII PHY register. */
875 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
876 int csr12 = ioread32 (ioaddr + CSR12);
877 int csr14 = ioread32 (ioaddr + CSR14);
878 switch (regnum) {
879 case 0:
880 if (((csr14<<5) & 0x1000) ||
881 (dev->if_port == 5 && tp->nwayset))
882 data->val_out = 0x1000;
883 else
884 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
885 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
886 break;
887 case 1:
888 data->val_out =
889 0x1848 +
890 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
891 ((csr12&0x06) == 6 ? 0 : 4);
892 data->val_out |= 0x6048;
893 break;
894 case 4:
895 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
896 data->val_out =
897 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
898 ((csr14 >> 1) & 0x20) + 1;
899 data->val_out |= ((csr14 >> 9) & 0x03C0);
900 break;
901 case 5: data->val_out = tp->lpar; break;
902 default: data->val_out = 0; break;
903 }
904 } else {
905 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
906 }
907 return 0;
908
909 case SIOCSMIIREG: /* Write MII PHY register. */
910 if (!capable (CAP_NET_ADMIN))
911 return -EPERM;
912 if (regnum & ~0x1f)
913 return -EINVAL;
914 if (data->phy_id == phy) {
915 u16 value = data->val_in;
916 switch (regnum) {
917 case 0: /* Check for autonegotiation on or reset. */
918 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
919 if (tp->full_duplex_lock)
920 tp->full_duplex = (value & 0x0100) ? 1 : 0;
921 break;
922 case 4:
923 tp->advertising[phy_idx] =
924 tp->mii_advertise = data->val_in;
925 break;
926 }
927 }
928 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
929 u16 value = data->val_in;
930 if (regnum == 0) {
931 if ((value & 0x1200) == 0x1200) {
932 if (tp->chip_id == PNIC2) {
933 pnic2_start_nway (dev);
934 } else {
935 t21142_start_nway (dev);
936 }
937 }
938 } else if (regnum == 4)
939 tp->sym_advertise = value;
940 } else {
941 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
942 }
943 return 0;
944 default:
945 return -EOPNOTSUPP;
946 }
947
948 return -EOPNOTSUPP;
949}
950
951
952/* Set or clear the multicast filter for this adaptor.
953 Note that we only use exclusion around actually queueing the
954 new frame, not around filling tp->setup_frame. This is non-deterministic
955 when re-entered but still correct. */
956
957#undef set_bit_le
958#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
959
960static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
961{
962 struct tulip_private *tp = netdev_priv(dev);
963 u16 hash_table[32];
964 struct dev_mc_list *mclist;
965 int i;
966 u16 *eaddrs;
967
968 memset(hash_table, 0, sizeof(hash_table));
969 set_bit_le(255, hash_table); /* Broadcast entry */
970 /* This should work on big-endian machines as well. */
971 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
972 i++, mclist = mclist->next) {
973 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
974
975 set_bit_le(index, hash_table);
976
977 }
978 for (i = 0; i < 32; i++) {
979 *setup_frm++ = hash_table[i];
980 *setup_frm++ = hash_table[i];
981 }
982 setup_frm = &tp->setup_frame[13*6];
983
984 /* Fill the final entry with our physical address. */
985 eaddrs = (u16 *)dev->dev_addr;
986 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
987 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
988 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
989}
990
991static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
992{
993 struct tulip_private *tp = netdev_priv(dev);
994 struct dev_mc_list *mclist;
995 int i;
996 u16 *eaddrs;
997
998 /* We have <= 14 addresses so we can use the wonderful
999 16 address perfect filtering of the Tulip. */
1000 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1001 i++, mclist = mclist->next) {
1002 eaddrs = (u16 *)mclist->dmi_addr;
1003 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1004 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1005 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1006 }
1007 /* Fill the unused entries with the broadcast address. */
1008 memset(setup_frm, 0xff, (15-i)*12);
1009 setup_frm = &tp->setup_frame[15*6];
1010
1011 /* Fill the final entry with our physical address. */
1012 eaddrs = (u16 *)dev->dev_addr;
1013 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1014 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1015 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1016}
1017
1018
1019static void set_rx_mode(struct net_device *dev)
1020{
1021 struct tulip_private *tp = netdev_priv(dev);
1022 void __iomem *ioaddr = tp->base_addr;
1023 int csr6;
1024
1025 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1026
1027 tp->csr6 &= ~0x00D5;
1028 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1029 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1030 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1031 /* Unconditionally log net taps. */
1032 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1033 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1034 /* Too many to filter well -- accept all multicasts. */
1035 tp->csr6 |= AcceptAllMulticast;
1036 csr6 |= AcceptAllMulticast;
1037 } else if (tp->flags & MC_HASH_ONLY) {
1038 /* Some work-alikes have only a 64-entry hash filter table. */
1039 /* Should verify correctness on big-endian/__powerpc__ */
1040 struct dev_mc_list *mclist;
1041 int i;
1042 if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
1043 tp->csr6 |= AcceptAllMulticast;
1044 csr6 |= AcceptAllMulticast;
1045 } else {
1046 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1047 int filterbit;
1048 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1049 i++, mclist = mclist->next) {
1050 if (tp->flags & COMET_MAC_ADDR)
1051 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1052 else
1053 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1054 filterbit &= 0x3f;
1055 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1056 if (tulip_debug > 2) {
1057 printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
1058 "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
1059 mclist->dmi_addr[0], mclist->dmi_addr[1],
1060 mclist->dmi_addr[2], mclist->dmi_addr[3],
1061 mclist->dmi_addr[4], mclist->dmi_addr[5],
1062 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
1063 }
1064 }
1065 if (mc_filter[0] == tp->mc_filter[0] &&
1066 mc_filter[1] == tp->mc_filter[1])
1067 ; /* No change. */
1068 else if (tp->flags & IS_ASIX) {
1069 iowrite32(2, ioaddr + CSR13);
1070 iowrite32(mc_filter[0], ioaddr + CSR14);
1071 iowrite32(3, ioaddr + CSR13);
1072 iowrite32(mc_filter[1], ioaddr + CSR14);
1073 } else if (tp->flags & COMET_MAC_ADDR) {
1074 iowrite32(mc_filter[0], ioaddr + 0xAC);
1075 iowrite32(mc_filter[1], ioaddr + 0xB0);
1076 }
1077 tp->mc_filter[0] = mc_filter[0];
1078 tp->mc_filter[1] = mc_filter[1];
1079 }
1080 } else {
1081 unsigned long flags;
1082 u32 tx_flags = 0x08000000 | 192;
1083
1084 /* Note that only the low-address shortword of setup_frame is valid!
1085 The values are doubled for big-endian architectures. */
1086 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1087 build_setup_frame_hash(tp->setup_frame, dev);
1088 tx_flags = 0x08400000 | 192;
1089 } else {
1090 build_setup_frame_perfect(tp->setup_frame, dev);
1091 }
1092
1093 spin_lock_irqsave(&tp->lock, flags);
1094
1095 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1096 /* Same setup recently queued, we need not add it. */
1097 } else {
1098 unsigned int entry;
1099 int dummy = -1;
1100
1101 /* Now add this frame to the Tx list. */
1102
1103 entry = tp->cur_tx++ % TX_RING_SIZE;
1104
1105 if (entry != 0) {
1106 /* Avoid a chip errata by prefixing a dummy entry. Don't do
1107 this on the ULI526X as it triggers a different problem */
Jiri Bencc4cc26d2005-04-27 12:48:56 +02001108 if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 tp->tx_buffers[entry].skb = NULL;
1110 tp->tx_buffers[entry].mapping = 0;
1111 tp->tx_ring[entry].length =
1112 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1113 tp->tx_ring[entry].buffer1 = 0;
1114 /* Must set DescOwned later to avoid race with chip */
1115 dummy = entry;
1116 entry = tp->cur_tx++ % TX_RING_SIZE;
1117 }
1118 }
1119
1120 tp->tx_buffers[entry].skb = NULL;
1121 tp->tx_buffers[entry].mapping =
1122 pci_map_single(tp->pdev, tp->setup_frame,
1123 sizeof(tp->setup_frame),
1124 PCI_DMA_TODEVICE);
1125 /* Put the setup frame on the Tx list. */
1126 if (entry == TX_RING_SIZE-1)
1127 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1128 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1129 tp->tx_ring[entry].buffer1 =
1130 cpu_to_le32(tp->tx_buffers[entry].mapping);
1131 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1132 if (dummy >= 0)
1133 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1134 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1135 netif_stop_queue(dev);
1136
1137 /* Trigger an immediate transmit demand. */
1138 iowrite32(0, ioaddr + CSR1);
1139 }
1140
1141 spin_unlock_irqrestore(&tp->lock, flags);
1142 }
1143
1144 iowrite32(csr6, ioaddr + CSR6);
1145}
1146
1147#ifdef CONFIG_TULIP_MWI
1148static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1149 struct net_device *dev)
1150{
1151 struct tulip_private *tp = netdev_priv(dev);
1152 u8 cache;
1153 u16 pci_command;
1154 u32 csr0;
1155
1156 if (tulip_debug > 3)
1157 printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
1158
1159 tp->csr0 = csr0 = 0;
1160
1161 /* if we have any cache line size at all, we can do MRM */
1162 csr0 |= MRM;
1163
1164 /* ...and barring hardware bugs, MWI */
1165 if (!(tp->chip_id == DC21143 && tp->revision == 65))
1166 csr0 |= MWI;
1167
1168 /* set or disable MWI in the standard PCI command bit.
1169 * Check for the case where mwi is desired but not available
1170 */
1171 if (csr0 & MWI) pci_set_mwi(pdev);
1172 else pci_clear_mwi(pdev);
1173
1174 /* read result from hardware (in case bit refused to enable) */
1175 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1176 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1177 csr0 &= ~MWI;
1178
1179 /* if cache line size hardwired to zero, no MWI */
1180 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1181 if ((csr0 & MWI) && (cache == 0)) {
1182 csr0 &= ~MWI;
1183 pci_clear_mwi(pdev);
1184 }
1185
1186 /* assign per-cacheline-size cache alignment and
1187 * burst length values
1188 */
1189 switch (cache) {
1190 case 8:
1191 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1192 break;
1193 case 16:
1194 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1195 break;
1196 case 32:
1197 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1198 break;
1199 default:
1200 cache = 0;
1201 break;
1202 }
1203
1204 /* if we have a good cache line size, we by now have a good
1205 * csr0, so save it and exit
1206 */
1207 if (cache)
1208 goto out;
1209
1210 /* we don't have a good csr0 or cache line size, disable MWI */
1211 if (csr0 & MWI) {
1212 pci_clear_mwi(pdev);
1213 csr0 &= ~MWI;
1214 }
1215
1216 /* sane defaults for burst length and cache alignment
1217 * originally from de4x5 driver
1218 */
1219 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1220
1221out:
1222 tp->csr0 = csr0;
1223 if (tulip_debug > 2)
1224 printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
1225 pci_name(pdev), cache, csr0);
1226}
1227#endif
1228
1229/*
1230 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1231 * is the DM910X and the on chip ULi devices
1232 */
1233
1234static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1235{
1236 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1237 return 1;
1238 if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
1239 return 1;
1240 if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
1241 return 1;
1242 return 0;
1243}
1244
1245static int __devinit tulip_init_one (struct pci_dev *pdev,
1246 const struct pci_device_id *ent)
1247{
1248 struct tulip_private *tp;
1249 /* See note below on the multiport cards. */
1250 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1251 static struct pci_device_id early_486_chipsets[] = {
1252 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1253 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1254 { },
1255 };
1256 static int last_irq;
1257 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1258 u8 chip_rev;
1259 int i, irq;
1260 unsigned short sum;
1261 unsigned char *ee_data;
1262 struct net_device *dev;
1263 void __iomem *ioaddr;
1264 static int board_idx = -1;
1265 int chip_idx = ent->driver_data;
1266 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1267 unsigned int eeprom_missing = 0;
1268 unsigned int force_csr0 = 0;
1269
1270#ifndef MODULE
1271 static int did_version; /* Already printed version info. */
1272 if (tulip_debug > 0 && did_version++ == 0)
1273 printk (KERN_INFO "%s", version);
1274#endif
1275
1276 board_idx++;
1277
1278 /*
1279 * Lan media wire a tulip chip to a wan interface. Needs a very
1280 * different driver (lmc driver)
1281 */
1282
1283 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1284 printk (KERN_ERR PFX "skipping LMC card.\n");
1285 return -ENODEV;
1286 }
1287
1288 /*
1289 * Early DM9100's need software CRC and the DMFE driver
1290 */
1291
1292 if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
1293 {
1294 u32 dev_rev;
1295 /* Read Chip revision */
1296 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
1297 if(dev_rev < 0x02000030)
1298 {
1299 printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1300 return -ENODEV;
1301 }
1302 }
1303
1304 /*
1305 * Looks for early PCI chipsets where people report hangs
1306 * without the workarounds being on.
1307 */
1308
1309 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1310 aligned. Aries might need this too. The Saturn errata are not
1311 pretty reading but thankfully it's an old 486 chipset.
1312
1313 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1314 Saturn.
1315 */
1316
1317 if (pci_dev_present(early_486_chipsets)) {
1318 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1319 force_csr0 = 1;
1320 }
1321
1322 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1323 if (chip_idx == AX88140) {
1324 if ((csr0 & 0x3f00) == 0)
1325 csr0 |= 0x2000;
1326 }
1327
1328 /* PNIC doesn't have MWI/MRL/MRM... */
1329 if (chip_idx == LC82C168)
1330 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1331
1332 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1333 if (tulip_uli_dm_quirk(pdev)) {
1334 csr0 &= ~0x01f100ff;
1335#if defined(__sparc__)
1336 csr0 = (csr0 & ~0xff00) | 0xe000;
1337#endif
1338 }
1339 /*
1340 * And back to business
1341 */
1342
1343 i = pci_enable_device(pdev);
1344 if (i) {
1345 printk (KERN_ERR PFX
1346 "Cannot enable tulip board #%d, aborting\n",
1347 board_idx);
1348 return i;
1349 }
1350
1351 irq = pdev->irq;
1352
1353 /* alloc_etherdev ensures aligned and zeroed private structures */
1354 dev = alloc_etherdev (sizeof (*tp));
1355 if (!dev) {
1356 printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
1357 return -ENOMEM;
1358 }
1359
1360 SET_MODULE_OWNER(dev);
1361 SET_NETDEV_DEV(dev, &pdev->dev);
1362 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1363 printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, "
1364 "aborting\n", pci_name(pdev),
1365 pci_resource_len (pdev, 0),
1366 pci_resource_start (pdev, 0));
1367 goto err_out_free_netdev;
1368 }
1369
1370 /* grab all resources from both PIO and MMIO regions, as we
1371 * don't want anyone else messing around with our hardware */
1372 if (pci_request_regions (pdev, "tulip"))
1373 goto err_out_free_netdev;
1374
1375#ifndef USE_IO_OPS
1376 ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size);
1377#else
1378 ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);
1379#endif
1380 if (!ioaddr)
1381 goto err_out_free_res;
1382
1383 pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
1384
1385 /*
1386 * initialize private data structure 'tp'
1387 * it is zeroed and aligned in alloc_etherdev
1388 */
1389 tp = netdev_priv(dev);
1390
1391 tp->rx_ring = pci_alloc_consistent(pdev,
1392 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1393 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1394 &tp->rx_ring_dma);
1395 if (!tp->rx_ring)
1396 goto err_out_mtable;
1397 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1398 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1399
1400 tp->chip_id = chip_idx;
1401 tp->flags = tulip_tbl[chip_idx].flags;
1402 tp->pdev = pdev;
1403 tp->base_addr = ioaddr;
1404 tp->revision = chip_rev;
1405 tp->csr0 = csr0;
1406 spin_lock_init(&tp->lock);
1407 spin_lock_init(&tp->mii_lock);
1408 init_timer(&tp->timer);
1409 tp->timer.data = (unsigned long)dev;
1410 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1411
1412 dev->base_addr = (unsigned long)ioaddr;
1413
1414#ifdef CONFIG_TULIP_MWI
1415 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1416 tulip_mwi_config (pdev, dev);
1417#else
1418 /* MWI is broken for DC21143 rev 65... */
1419 if (chip_idx == DC21143 && chip_rev == 65)
1420 tp->csr0 &= ~MWI;
1421#endif
1422
1423 /* Stop the chip's Tx and Rx processes. */
1424 tulip_stop_rxtx(tp);
1425
1426 pci_set_master(pdev);
1427
1428#ifdef CONFIG_GSC
1429 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1430 switch (pdev->subsystem_device) {
1431 default:
1432 break;
1433 case 0x1061:
1434 case 0x1062:
1435 case 0x1063:
1436 case 0x1098:
1437 case 0x1099:
1438 case 0x10EE:
1439 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1440 chip_name = "GSC DS21140 Tulip";
1441 }
1442 }
1443#endif
1444
1445 /* Clear the missed-packet counter. */
1446 ioread32(ioaddr + CSR8);
1447
1448 /* The station address ROM is read byte serially. The register must
1449 be polled, waiting for the value to be read bit serially from the
1450 EEPROM.
1451 */
1452 ee_data = tp->eeprom;
1453 sum = 0;
1454 if (chip_idx == LC82C168) {
1455 for (i = 0; i < 3; i++) {
1456 int value, boguscnt = 100000;
1457 iowrite32(0x600 | i, ioaddr + 0x98);
1458 do
1459 value = ioread32(ioaddr + CSR9);
1460 while (value < 0 && --boguscnt > 0);
1461 put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
1462 sum += value & 0xffff;
1463 }
1464 } else if (chip_idx == COMET) {
1465 /* No need to read the EEPROM. */
1466 put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
1467 put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (u16 *)(dev->dev_addr + 4));
1468 for (i = 0; i < 6; i ++)
1469 sum += dev->dev_addr[i];
1470 } else {
1471 /* A serial EEPROM interface, we read now and sort it out later. */
1472 int sa_offset = 0;
1473 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1474
1475 for (i = 0; i < sizeof(tp->eeprom); i+=2) {
1476 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1477 ee_data[i] = data & 0xff;
1478 ee_data[i + 1] = data >> 8;
1479 }
1480
1481 /* DEC now has a specification (see Notes) but early board makers
1482 just put the address in the first EEPROM locations. */
1483 /* This does memcmp(ee_data, ee_data+16, 8) */
1484 for (i = 0; i < 8; i ++)
1485 if (ee_data[i] != ee_data[16+i])
1486 sa_offset = 20;
1487 if (chip_idx == CONEXANT) {
1488 /* Check that the tuple type and length is correct. */
1489 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1490 sa_offset = 0x19A;
1491 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1492 ee_data[2] == 0) {
1493 sa_offset = 2; /* Grrr, damn Matrox boards. */
1494 multiport_cnt = 4;
1495 }
1496#ifdef CONFIG_DDB5476
1497 if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 6)) {
1498 /* DDB5476 MAC address in first EEPROM locations. */
1499 sa_offset = 0;
1500 /* No media table either */
1501 tp->flags &= ~HAS_MEDIA_TABLE;
1502 }
1503#endif
1504#ifdef CONFIG_DDB5477
1505 if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) {
1506 /* DDB5477 MAC address in first EEPROM locations. */
1507 sa_offset = 0;
1508 /* No media table either */
1509 tp->flags &= ~HAS_MEDIA_TABLE;
1510 }
1511#endif
1512#ifdef CONFIG_MIPS_COBALT
1513 if ((pdev->bus->number == 0) &&
1514 ((PCI_SLOT(pdev->devfn) == 7) ||
1515 (PCI_SLOT(pdev->devfn) == 12))) {
1516 /* Cobalt MAC address in first EEPROM locations. */
1517 sa_offset = 0;
Ralf Baechle12755c12005-06-26 17:45:52 -04001518 /* Ensure our media table fixup get's applied */
1519 memcpy(ee_data + 16, ee_data, 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 }
1521#endif
1522#ifdef CONFIG_GSC
1523 /* Check to see if we have a broken srom */
1524 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1525 /* pci_vendor_id and subsystem_id are swapped */
1526 ee_data[0] = ee_data[2];
1527 ee_data[1] = ee_data[3];
1528 ee_data[2] = 0x61;
1529 ee_data[3] = 0x10;
1530
1531 /* HSC-PCI boards need to be byte-swaped and shifted
1532 * up 1 word. This shift needs to happen at the end
1533 * of the MAC first because of the 2 byte overlap.
1534 */
1535 for (i = 4; i >= 0; i -= 2) {
1536 ee_data[17 + i + 3] = ee_data[17 + i];
1537 ee_data[16 + i + 5] = ee_data[16 + i];
1538 }
1539 }
1540#endif
1541
1542 for (i = 0; i < 6; i ++) {
1543 dev->dev_addr[i] = ee_data[i + sa_offset];
1544 sum += ee_data[i + sa_offset];
1545 }
1546 }
1547 /* Lite-On boards have the address byte-swapped. */
1548 if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02)
1549 && dev->dev_addr[1] == 0x00)
1550 for (i = 0; i < 6; i+=2) {
1551 char tmp = dev->dev_addr[i];
1552 dev->dev_addr[i] = dev->dev_addr[i+1];
1553 dev->dev_addr[i+1] = tmp;
1554 }
1555 /* On the Zynx 315 Etherarray and other multiport boards only the
1556 first Tulip has an EEPROM.
1557 On Sparc systems the mac address is held in the OBP property
1558 "local-mac-address".
1559 The addresses of the subsequent ports are derived from the first.
1560 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1561 that here as well. */
1562 if (sum == 0 || sum == 6*0xff) {
1563#if defined(__sparc__)
1564 struct pcidev_cookie *pcp = pdev->sysdata;
1565#endif
1566 eeprom_missing = 1;
1567 for (i = 0; i < 5; i++)
1568 dev->dev_addr[i] = last_phys_addr[i];
1569 dev->dev_addr[i] = last_phys_addr[i] + 1;
1570#if defined(__sparc__)
1571 if ((pcp != NULL) && prom_getproplen(pcp->prom_node,
1572 "local-mac-address") == 6) {
1573 prom_getproperty(pcp->prom_node, "local-mac-address",
1574 dev->dev_addr, 6);
1575 }
1576#endif
1577#if defined(__i386__) /* Patch up x86 BIOS bug. */
1578 if (last_irq)
1579 irq = last_irq;
1580#endif
1581 }
1582
1583 for (i = 0; i < 6; i++)
1584 last_phys_addr[i] = dev->dev_addr[i];
1585 last_irq = irq;
1586 dev->irq = irq;
1587
1588 /* The lower four bits are the media type. */
1589 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1590 if (options[board_idx] & MEDIA_MASK)
1591 tp->default_port = options[board_idx] & MEDIA_MASK;
1592 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1593 tp->full_duplex = 1;
1594 if (mtu[board_idx] > 0)
1595 dev->mtu = mtu[board_idx];
1596 }
1597 if (dev->mem_start & MEDIA_MASK)
1598 tp->default_port = dev->mem_start & MEDIA_MASK;
1599 if (tp->default_port) {
1600 printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
1601 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1602 tp->medialock = 1;
1603 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1604 tp->full_duplex = 1;
1605 }
1606 if (tp->full_duplex)
1607 tp->full_duplex_lock = 1;
1608
1609 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1610 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
1611 tp->mii_advertise = media2advert[tp->default_port - 9];
1612 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1613 }
1614
1615 if (tp->flags & HAS_MEDIA_TABLE) {
1616 sprintf(dev->name, "tulip%d", board_idx); /* hack */
1617 tulip_parse_eeprom(dev);
1618 strcpy(dev->name, "eth%d"); /* un-hack */
1619 }
1620
1621 if ((tp->flags & ALWAYS_CHECK_MII) ||
1622 (tp->mtable && tp->mtable->has_mii) ||
1623 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1624 if (tp->mtable && tp->mtable->has_mii) {
1625 for (i = 0; i < tp->mtable->leafcount; i++)
1626 if (tp->mtable->mleaf[i].media == 11) {
1627 tp->cur_index = i;
1628 tp->saved_if_port = dev->if_port;
1629 tulip_select_media(dev, 2);
1630 dev->if_port = tp->saved_if_port;
1631 break;
1632 }
1633 }
1634
1635 /* Find the connected MII xcvrs.
1636 Doing this in open() would allow detecting external xcvrs
1637 later, but takes much time. */
1638 tulip_find_mii (dev, board_idx);
1639 }
1640
1641 /* The Tulip-specific entries in the device structure. */
1642 dev->open = tulip_open;
1643 dev->hard_start_xmit = tulip_start_xmit;
1644 dev->tx_timeout = tulip_tx_timeout;
1645 dev->watchdog_timeo = TX_TIMEOUT;
1646#ifdef CONFIG_TULIP_NAPI
1647 dev->poll = tulip_poll;
1648 dev->weight = 16;
1649#endif
1650 dev->stop = tulip_close;
1651 dev->get_stats = tulip_get_stats;
1652 dev->do_ioctl = private_ioctl;
1653 dev->set_multicast_list = set_rx_mode;
1654#ifdef CONFIG_NET_POLL_CONTROLLER
1655 dev->poll_controller = &poll_tulip;
1656#endif
1657 SET_ETHTOOL_OPS(dev, &ops);
1658
1659 if (register_netdev(dev))
1660 goto err_out_free_ring;
1661
1662 printk(KERN_INFO "%s: %s rev %d at %p,",
1663 dev->name, chip_name, chip_rev, ioaddr);
1664 pci_set_drvdata(pdev, dev);
1665
1666 if (eeprom_missing)
1667 printk(" EEPROM not present,");
1668 for (i = 0; i < 6; i++)
1669 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
1670 printk(", IRQ %d.\n", irq);
1671
1672 if (tp->chip_id == PNIC2)
1673 tp->link_change = pnic2_lnk_change;
1674 else if (tp->flags & HAS_NWAY)
1675 tp->link_change = t21142_lnk_change;
1676 else if (tp->flags & HAS_PNICNWAY)
1677 tp->link_change = pnic_lnk_change;
1678
1679 /* Reset the xcvr interface and turn on heartbeat. */
1680 switch (chip_idx) {
1681 case DC21140:
1682 case DM910X:
1683 case ULI526X:
1684 default:
1685 if (tp->mtable)
1686 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1687 break;
1688 case DC21142:
1689 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1690 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1691 iowrite32(0x0000, ioaddr + CSR13);
1692 iowrite32(0x0000, ioaddr + CSR14);
1693 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1694 } else
1695 t21142_start_nway(dev);
1696 break;
1697 case PNIC2:
1698 /* just do a reset for sanity sake */
1699 iowrite32(0x0000, ioaddr + CSR13);
1700 iowrite32(0x0000, ioaddr + CSR14);
1701 break;
1702 case LC82C168:
1703 if ( ! tp->mii_cnt) {
1704 tp->nway = 1;
1705 tp->nwayset = 0;
1706 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1707 iowrite32(0x30, ioaddr + CSR12);
1708 iowrite32(0x0001F078, ioaddr + CSR6);
1709 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1710 }
1711 break;
1712 case MX98713:
1713 case COMPEX9881:
1714 iowrite32(0x00000000, ioaddr + CSR6);
1715 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1716 iowrite32(0x00000001, ioaddr + CSR13);
1717 break;
1718 case MX98715:
1719 case MX98725:
1720 iowrite32(0x01a80000, ioaddr + CSR6);
1721 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1722 iowrite32(0x00001000, ioaddr + CSR12);
1723 break;
1724 case COMET:
1725 /* No initialization necessary. */
1726 break;
1727 }
1728
1729 /* put the chip in snooze mode until opened */
1730 tulip_set_power_state (tp, 0, 1);
1731
1732 return 0;
1733
1734err_out_free_ring:
1735 pci_free_consistent (pdev,
1736 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1737 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1738 tp->rx_ring, tp->rx_ring_dma);
1739
1740err_out_mtable:
1741 if (tp->mtable)
1742 kfree (tp->mtable);
1743 pci_iounmap(pdev, ioaddr);
1744
1745err_out_free_res:
1746 pci_release_regions (pdev);
1747
1748err_out_free_netdev:
1749 free_netdev (dev);
1750 return -ENODEV;
1751}
1752
1753
1754#ifdef CONFIG_PM
1755
1756static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1757{
1758 struct net_device *dev = pci_get_drvdata(pdev);
1759
1760 if (dev && netif_running (dev) && netif_device_present (dev)) {
1761 netif_device_detach (dev);
1762 tulip_down (dev);
1763 /* pci_power_off(pdev, -1); */
1764 }
1765 return 0;
1766}
1767
1768
1769static int tulip_resume(struct pci_dev *pdev)
1770{
1771 struct net_device *dev = pci_get_drvdata(pdev);
1772
1773 if (dev && netif_running (dev) && !netif_device_present (dev)) {
1774#if 1
1775 pci_enable_device (pdev);
1776#endif
1777 /* pci_power_on(pdev); */
1778 tulip_up (dev);
1779 netif_device_attach (dev);
1780 }
1781 return 0;
1782}
1783
1784#endif /* CONFIG_PM */
1785
1786
1787static void __devexit tulip_remove_one (struct pci_dev *pdev)
1788{
1789 struct net_device *dev = pci_get_drvdata (pdev);
1790 struct tulip_private *tp;
1791
1792 if (!dev)
1793 return;
1794
1795 tp = netdev_priv(dev);
1796 unregister_netdev(dev);
1797 pci_free_consistent (pdev,
1798 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1799 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1800 tp->rx_ring, tp->rx_ring_dma);
1801 if (tp->mtable)
1802 kfree (tp->mtable);
1803 pci_iounmap(pdev, tp->base_addr);
1804 free_netdev (dev);
1805 pci_release_regions (pdev);
1806 pci_set_drvdata (pdev, NULL);
1807
1808 /* pci_power_off (pdev, -1); */
1809}
1810
1811#ifdef CONFIG_NET_POLL_CONTROLLER
1812/*
1813 * Polling 'interrupt' - used by things like netconsole to send skbs
1814 * without having to re-enable interrupts. It's not called while
1815 * the interrupt routine is executing.
1816 */
1817
1818static void poll_tulip (struct net_device *dev)
1819{
1820 /* disable_irq here is not very nice, but with the lockless
1821 interrupt handler we have no other choice. */
1822 disable_irq(dev->irq);
1823 tulip_interrupt (dev->irq, dev, NULL);
1824 enable_irq(dev->irq);
1825}
1826#endif
1827
1828static struct pci_driver tulip_driver = {
1829 .name = DRV_NAME,
1830 .id_table = tulip_pci_tbl,
1831 .probe = tulip_init_one,
1832 .remove = __devexit_p(tulip_remove_one),
1833#ifdef CONFIG_PM
1834 .suspend = tulip_suspend,
1835 .resume = tulip_resume,
1836#endif /* CONFIG_PM */
1837};
1838
1839
1840static int __init tulip_init (void)
1841{
1842#ifdef MODULE
1843 printk (KERN_INFO "%s", version);
1844#endif
1845
1846 /* copy module parms into globals */
1847 tulip_rx_copybreak = rx_copybreak;
1848 tulip_max_interrupt_work = max_interrupt_work;
1849
1850 /* probe for and init boards */
1851 return pci_module_init (&tulip_driver);
1852}
1853
1854
1855static void __exit tulip_cleanup (void)
1856{
1857 pci_unregister_driver (&tulip_driver);
1858}
1859
1860
1861module_init(tulip_init);
1862module_exit(tulip_cleanup);