blob: b0d337f7f545c9a43d2311d0dd3f5394714cd2bc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2/*
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
18
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
21 number Y1-LM-2015-01.
22
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
25
26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
36
37 TODO:
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
43 the locking)
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46*/
47
48/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
50 */
51static int rx_copybreak = 200;
52
53/* Should we use MMIO or Port IO?
54 * 0: Port IO
55 * 1: MMIO
56 * 2: Try MMIO, fallback to Port IO
57 */
58static unsigned int use_mmio = 2;
59
60/* end user-configurable values */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 */
64static const int multicast_filter_limit = 32;
65
66/* Operational parameters that are set at compile time. */
67
68/* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
73 *
74 * We don't currently use the Hi Tx ring so, don't make it very big.
75 *
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78 */
79#define TXHI_ENTRIES 2
80#define TXLO_ENTRIES 128
81#define RX_ENTRIES 32
82#define COMMAND_ENTRIES 16
83#define RESPONSE_ENTRIES 32
84
85#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88/* The 3XP will preload and remove 64 entries from the free buffer
89 * list, and we need one entry to keep the ring from wrapping, so
90 * to keep this a power of two, we use 128 entries.
91 */
92#define RXFREE_ENTRIES 128
93#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
94
95/* Operational parameters that usually are not changed. */
96
97/* Time in jiffies before concluding the transmitter is hung. */
98#define TX_TIMEOUT (2*HZ)
99
100#define PKT_BUF_SZ 1536
101
102#define DRV_MODULE_NAME "typhoon"
103#define DRV_MODULE_VERSION "1.5.7"
104#define DRV_MODULE_RELDATE "05/01/07"
105#define PFX DRV_MODULE_NAME ": "
106#define ERR_PFX KERN_ERR PFX
107
108#include <linux/module.h>
109#include <linux/kernel.h>
110#include <linux/string.h>
111#include <linux/timer.h>
112#include <linux/errno.h>
113#include <linux/ioport.h>
114#include <linux/slab.h>
115#include <linux/interrupt.h>
116#include <linux/pci.h>
117#include <linux/netdevice.h>
118#include <linux/etherdevice.h>
119#include <linux/skbuff.h>
120#include <linux/init.h>
121#include <linux/delay.h>
122#include <linux/ethtool.h>
123#include <linux/if_vlan.h>
124#include <linux/crc32.h>
125#include <linux/bitops.h>
126#include <asm/processor.h>
127#include <asm/io.h>
128#include <asm/uaccess.h>
129#include <linux/in6.h>
130#include <asm/checksum.h>
131#include <linux/version.h>
132#include <linux/dma-mapping.h>
133
134#include "typhoon.h"
135#include "typhoon-firmware.h"
136
137static char version[] __devinitdata =
138 "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
141MODULE_VERSION(DRV_MODULE_VERSION);
142MODULE_LICENSE("GPL");
143MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
144MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
145 "the buffer given back to the NIC. Default "
146 "is 200.");
147MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
148 "Default is to try MMIO and fallback to PIO.");
149module_param(rx_copybreak, int, 0);
150module_param(use_mmio, int, 0);
151
152#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
153#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
154#undef NETIF_F_TSO
155#endif
156
157#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
158#error TX ring too small!
159#endif
160
161struct typhoon_card_info {
162 char *name;
163 int capabilities;
164};
165
166#define TYPHOON_CRYPTO_NONE 0x00
167#define TYPHOON_CRYPTO_DES 0x01
168#define TYPHOON_CRYPTO_3DES 0x02
169#define TYPHOON_CRYPTO_VARIABLE 0x04
170#define TYPHOON_FIBER 0x08
171#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
172
173enum typhoon_cards {
174 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
175 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
176 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
177 TYPHOON_FXM,
178};
179
180/* directly indexed by enum typhoon_cards, above */
181static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
182 { "3Com Typhoon (3C990-TX)",
183 TYPHOON_CRYPTO_NONE},
184 { "3Com Typhoon (3CR990-TX-95)",
185 TYPHOON_CRYPTO_DES},
186 { "3Com Typhoon (3CR990-TX-97)",
187 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
188 { "3Com Typhoon (3C990SVR)",
189 TYPHOON_CRYPTO_NONE},
190 { "3Com Typhoon (3CR990SVR95)",
191 TYPHOON_CRYPTO_DES},
192 { "3Com Typhoon (3CR990SVR97)",
193 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
194 { "3Com Typhoon2 (3C990B-TX-M)",
195 TYPHOON_CRYPTO_VARIABLE},
196 { "3Com Typhoon2 (3C990BSVR)",
197 TYPHOON_CRYPTO_VARIABLE},
198 { "3Com Typhoon (3CR990-FX-95)",
199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200 { "3Com Typhoon (3CR990-FX-97)",
201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202 { "3Com Typhoon (3CR990-FX-95 Server)",
203 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
204 { "3Com Typhoon (3CR990-FX-97 Server)",
205 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
206 { "3Com Typhoon2 (3C990B-FX-97)",
207 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
208};
209
210/* Notes on the new subsystem numbering scheme:
211 * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
212 * bit 4 indicates if this card has secured firmware (we don't support it)
213 * bit 8 indicates if this is a (0) copper or (1) fiber card
214 * bits 12-16 indicate card type: (0) client and (1) server
215 */
216static struct pci_device_id typhoon_pci_tbl[] = {
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
226 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
228 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
234 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
236 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
239 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
241 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
243 { 0, }
244};
245MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
246
247/* Define the shared memory area
248 * Align everything the 3XP will normally be using.
249 * We'll need to move/align txHi if we start using that ring.
250 */
251#define __3xp_aligned ____cacheline_aligned
252struct typhoon_shared {
253 struct typhoon_interface iface;
254 struct typhoon_indexes indexes __3xp_aligned;
255 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
256 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
257 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
258 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
259 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
260 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
261 u32 zeroWord;
262 struct tx_desc txHi[TXHI_ENTRIES];
263} __attribute__ ((packed));
264
265struct rxbuff_ent {
266 struct sk_buff *skb;
267 dma_addr_t dma_addr;
268};
269
270struct typhoon {
271 /* Tx cache line section */
272 struct transmit_ring txLoRing ____cacheline_aligned;
273 struct pci_dev * tx_pdev;
274 void __iomem *tx_ioaddr;
275 u32 txlo_dma_addr;
276
277 /* Irq/Rx cache line section */
278 void __iomem *ioaddr ____cacheline_aligned;
279 struct typhoon_indexes *indexes;
280 u8 awaiting_resp;
281 u8 duplex;
282 u8 speed;
283 u8 card_state;
284 struct basic_ring rxLoRing;
285 struct pci_dev * pdev;
286 struct net_device * dev;
287 spinlock_t state_lock;
288 struct vlan_group * vlgrp;
289 struct basic_ring rxHiRing;
290 struct basic_ring rxBuffRing;
291 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
292
293 /* general section */
294 spinlock_t command_lock ____cacheline_aligned;
295 struct basic_ring cmdRing;
296 struct basic_ring respRing;
297 struct net_device_stats stats;
298 struct net_device_stats stats_saved;
299 const char * name;
300 struct typhoon_shared * shared;
301 dma_addr_t shared_dma;
302 u16 xcvr_select;
303 u16 wol_events;
304 u32 offload;
305
306 /* unused stuff (future use) */
307 int capabilities;
308 struct transmit_ring txHiRing;
309};
310
311enum completion_wait_values {
312 NoWait = 0, WaitNoSleep, WaitSleep,
313};
314
315/* These are the values for the typhoon.card_state variable.
316 * These determine where the statistics will come from in get_stats().
317 * The sleep image does not support the statistics we need.
318 */
319enum state_values {
320 Sleeping = 0, Running,
321};
322
323/* PCI writes are not guaranteed to be posted in order, but outstanding writes
324 * cannot pass a read, so this forces current writes to post.
325 */
326#define typhoon_post_pci_writes(x) \
327 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
328
329/* We'll wait up to six seconds for a reset, and half a second normally.
330 */
331#define TYPHOON_UDELAY 50
332#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
333#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
334#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
335
336#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
337#define typhoon_synchronize_irq(x) synchronize_irq()
338#else
339#define typhoon_synchronize_irq(x) synchronize_irq(x)
340#endif
341
342#if defined(NETIF_F_TSO)
343#define skb_tso_size(x) (skb_shinfo(x)->tso_size)
344#define TSO_NUM_DESCRIPTORS 2
345#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
346#else
347#define NETIF_F_TSO 0
348#define skb_tso_size(x) 0
349#define TSO_NUM_DESCRIPTORS 0
350#define TSO_OFFLOAD_ON 0
351#endif
352
353static inline void
354typhoon_inc_index(u32 *index, const int count, const int num_entries)
355{
356 /* Increment a ring index -- we can use this for all rings execept
357 * the Rx rings, as they use different size descriptors
358 * otherwise, everything is the same size as a cmd_desc
359 */
360 *index += count * sizeof(struct cmd_desc);
361 *index %= num_entries * sizeof(struct cmd_desc);
362}
363
364static inline void
365typhoon_inc_cmd_index(u32 *index, const int count)
366{
367 typhoon_inc_index(index, count, COMMAND_ENTRIES);
368}
369
370static inline void
371typhoon_inc_resp_index(u32 *index, const int count)
372{
373 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
374}
375
376static inline void
377typhoon_inc_rxfree_index(u32 *index, const int count)
378{
379 typhoon_inc_index(index, count, RXFREE_ENTRIES);
380}
381
382static inline void
383typhoon_inc_tx_index(u32 *index, const int count)
384{
385 /* if we start using the Hi Tx ring, this needs updateing */
386 typhoon_inc_index(index, count, TXLO_ENTRIES);
387}
388
389static inline void
390typhoon_inc_rx_index(u32 *index, const int count)
391{
392 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
393 *index += count * sizeof(struct rx_desc);
394 *index %= RX_ENTRIES * sizeof(struct rx_desc);
395}
396
397static int
398typhoon_reset(void __iomem *ioaddr, int wait_type)
399{
400 int i, err = 0;
401 int timeout;
402
403 if(wait_type == WaitNoSleep)
404 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
405 else
406 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
407
408 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
409 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
410
411 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
412 typhoon_post_pci_writes(ioaddr);
413 udelay(1);
414 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
415
416 if(wait_type != NoWait) {
417 for(i = 0; i < timeout; i++) {
418 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
419 TYPHOON_STATUS_WAITING_FOR_HOST)
420 goto out;
421
422 if(wait_type == WaitSleep) {
423 set_current_state(TASK_UNINTERRUPTIBLE);
424 schedule_timeout(1);
425 } else
426 udelay(TYPHOON_UDELAY);
427 }
428
429 err = -ETIMEDOUT;
430 }
431
432out:
433 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
434 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
435
436 /* The 3XP seems to need a little extra time to complete the load
437 * of the sleep image before we can reliably boot it. Failure to
438 * do this occasionally results in a hung adapter after boot in
439 * typhoon_init_one() while trying to read the MAC address or
440 * putting the card to sleep. 3Com's driver waits 5ms, but
441 * that seems to be overkill. However, if we can sleep, we might
442 * as well give it that much time. Otherwise, we'll give it 500us,
443 * which should be enough (I've see it work well at 100us, but still
444 * saw occasional problems.)
445 */
446 if(wait_type == WaitSleep)
447 msleep(5);
448 else
449 udelay(500);
450 return err;
451}
452
453static int
454typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
455{
456 int i, err = 0;
457
458 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
459 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
460 goto out;
461 udelay(TYPHOON_UDELAY);
462 }
463
464 err = -ETIMEDOUT;
465
466out:
467 return err;
468}
469
470static inline void
471typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
472{
473 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
474 netif_carrier_off(dev);
475 else
476 netif_carrier_on(dev);
477}
478
479static inline void
480typhoon_hello(struct typhoon *tp)
481{
482 struct basic_ring *ring = &tp->cmdRing;
483 struct cmd_desc *cmd;
484
485 /* We only get a hello request if we've not sent anything to the
486 * card in a long while. If the lock is held, then we're in the
487 * process of issuing a command, so we don't need to respond.
488 */
489 if(spin_trylock(&tp->command_lock)) {
490 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
491 typhoon_inc_cmd_index(&ring->lastWrite, 1);
492
493 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
494 smp_wmb();
495 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
496 spin_unlock(&tp->command_lock);
497 }
498}
499
500static int
501typhoon_process_response(struct typhoon *tp, int resp_size,
502 struct resp_desc *resp_save)
503{
504 struct typhoon_indexes *indexes = tp->indexes;
505 struct resp_desc *resp;
506 u8 *base = tp->respRing.ringBase;
507 int count, len, wrap_len;
508 u32 cleared;
509 u32 ready;
510
511 cleared = le32_to_cpu(indexes->respCleared);
512 ready = le32_to_cpu(indexes->respReady);
513 while(cleared != ready) {
514 resp = (struct resp_desc *)(base + cleared);
515 count = resp->numDesc + 1;
516 if(resp_save && resp->seqNo) {
517 if(count > resp_size) {
518 resp_save->flags = TYPHOON_RESP_ERROR;
519 goto cleanup;
520 }
521
522 wrap_len = 0;
523 len = count * sizeof(*resp);
524 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
525 wrap_len = cleared + len - RESPONSE_RING_SIZE;
526 len = RESPONSE_RING_SIZE - cleared;
527 }
528
529 memcpy(resp_save, resp, len);
530 if(unlikely(wrap_len)) {
531 resp_save += len / sizeof(*resp);
532 memcpy(resp_save, base, wrap_len);
533 }
534
535 resp_save = NULL;
536 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
537 typhoon_media_status(tp->dev, resp);
538 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
539 typhoon_hello(tp);
540 } else {
541 printk(KERN_ERR "%s: dumping unexpected response "
542 "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
543 tp->name, le16_to_cpu(resp->cmd),
544 resp->numDesc, resp->flags,
545 le16_to_cpu(resp->parm1),
546 le32_to_cpu(resp->parm2),
547 le32_to_cpu(resp->parm3));
548 }
549
550cleanup:
551 typhoon_inc_resp_index(&cleared, count);
552 }
553
554 indexes->respCleared = cpu_to_le32(cleared);
555 wmb();
556 return (resp_save == NULL);
557}
558
559static inline int
560typhoon_num_free(int lastWrite, int lastRead, int ringSize)
561{
562 /* this works for all descriptors but rx_desc, as they are a
563 * different size than the cmd_desc -- everyone else is the same
564 */
565 lastWrite /= sizeof(struct cmd_desc);
566 lastRead /= sizeof(struct cmd_desc);
567 return (ringSize + lastRead - lastWrite - 1) % ringSize;
568}
569
570static inline int
571typhoon_num_free_cmd(struct typhoon *tp)
572{
573 int lastWrite = tp->cmdRing.lastWrite;
574 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
575
576 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
577}
578
579static inline int
580typhoon_num_free_resp(struct typhoon *tp)
581{
582 int respReady = le32_to_cpu(tp->indexes->respReady);
583 int respCleared = le32_to_cpu(tp->indexes->respCleared);
584
585 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
586}
587
588static inline int
589typhoon_num_free_tx(struct transmit_ring *ring)
590{
591 /* if we start using the Hi Tx ring, this needs updating */
592 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
593}
594
595static int
596typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
597 int num_resp, struct resp_desc *resp)
598{
599 struct typhoon_indexes *indexes = tp->indexes;
600 struct basic_ring *ring = &tp->cmdRing;
601 struct resp_desc local_resp;
602 int i, err = 0;
603 int got_resp;
604 int freeCmd, freeResp;
605 int len, wrap_len;
606
607 spin_lock(&tp->command_lock);
608
609 freeCmd = typhoon_num_free_cmd(tp);
610 freeResp = typhoon_num_free_resp(tp);
611
612 if(freeCmd < num_cmd || freeResp < num_resp) {
613 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
614 "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
615 freeResp, num_resp);
616 err = -ENOMEM;
617 goto out;
618 }
619
620 if(cmd->flags & TYPHOON_CMD_RESPOND) {
621 /* If we're expecting a response, but the caller hasn't given
622 * us a place to put it, we'll provide one.
623 */
624 tp->awaiting_resp = 1;
625 if(resp == NULL) {
626 resp = &local_resp;
627 num_resp = 1;
628 }
629 }
630
631 wrap_len = 0;
632 len = num_cmd * sizeof(*cmd);
633 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
634 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
635 len = COMMAND_RING_SIZE - ring->lastWrite;
636 }
637
638 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
639 if(unlikely(wrap_len)) {
640 struct cmd_desc *wrap_ptr = cmd;
641 wrap_ptr += len / sizeof(*cmd);
642 memcpy(ring->ringBase, wrap_ptr, wrap_len);
643 }
644
645 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
646
647 /* "I feel a presence... another warrior is on the the mesa."
648 */
649 wmb();
650 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
651 typhoon_post_pci_writes(tp->ioaddr);
652
653 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
654 goto out;
655
656 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
657 * preempt or do anything other than take interrupts. So, don't
658 * wait for a response unless you have to.
659 *
660 * I've thought about trying to sleep here, but we're called
661 * from many contexts that don't allow that. Also, given the way
662 * 3Com has implemented irq coalescing, we would likely timeout --
663 * this has been observed in real life!
664 *
665 * The big killer is we have to wait to get stats from the card,
666 * though we could go to a periodic refresh of those if we don't
667 * mind them getting somewhat stale. The rest of the waiting
668 * commands occur during open/close/suspend/resume, so they aren't
669 * time critical. Creating SAs in the future will also have to
670 * wait here.
671 */
672 got_resp = 0;
673 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
674 if(indexes->respCleared != indexes->respReady)
675 got_resp = typhoon_process_response(tp, num_resp,
676 resp);
677 udelay(TYPHOON_UDELAY);
678 }
679
680 if(!got_resp) {
681 err = -ETIMEDOUT;
682 goto out;
683 }
684
685 /* Collect the error response even if we don't care about the
686 * rest of the response
687 */
688 if(resp->flags & TYPHOON_RESP_ERROR)
689 err = -EIO;
690
691out:
692 if(tp->awaiting_resp) {
693 tp->awaiting_resp = 0;
694 smp_wmb();
695
696 /* Ugh. If a response was added to the ring between
697 * the call to typhoon_process_response() and the clearing
698 * of tp->awaiting_resp, we could have missed the interrupt
699 * and it could hang in the ring an indeterminate amount of
700 * time. So, check for it, and interrupt ourselves if this
701 * is the case.
702 */
703 if(indexes->respCleared != indexes->respReady)
704 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
705 }
706
707 spin_unlock(&tp->command_lock);
708 return err;
709}
710
711static void
712typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
713{
714 struct typhoon *tp = netdev_priv(dev);
715 struct cmd_desc xp_cmd;
716 int err;
717
718 spin_lock_bh(&tp->state_lock);
719 if(!tp->vlgrp != !grp) {
720 /* We've either been turned on for the first time, or we've
721 * been turned off. Update the 3XP.
722 */
723 if(grp)
724 tp->offload |= TYPHOON_OFFLOAD_VLAN;
725 else
726 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
727
728 /* If the interface is up, the runtime is running -- and we
729 * must be up for the vlan core to call us.
730 *
731 * Do the command outside of the spin lock, as it is slow.
732 */
733 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
734 TYPHOON_CMD_SET_OFFLOAD_TASKS);
735 xp_cmd.parm2 = tp->offload;
736 xp_cmd.parm3 = tp->offload;
737 spin_unlock_bh(&tp->state_lock);
738 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
739 if(err < 0)
740 printk("%s: vlan offload error %d\n", tp->name, -err);
741 spin_lock_bh(&tp->state_lock);
742 }
743
744 /* now make the change visible */
745 tp->vlgrp = grp;
746 spin_unlock_bh(&tp->state_lock);
747}
748
749static void
750typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
751{
752 struct typhoon *tp = netdev_priv(dev);
753 spin_lock_bh(&tp->state_lock);
754 if(tp->vlgrp)
755 tp->vlgrp->vlan_devices[vid] = NULL;
756 spin_unlock_bh(&tp->state_lock);
757}
758
759static inline void
760typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
761 u32 ring_dma)
762{
763 struct tcpopt_desc *tcpd;
764 u32 tcpd_offset = ring_dma;
765
766 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
767 tcpd_offset += txRing->lastWrite;
768 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
769 typhoon_inc_tx_index(&txRing->lastWrite, 1);
770
771 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
772 tcpd->numDesc = 1;
773 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
774 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
775 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
776 tcpd->bytesTx = cpu_to_le32(skb->len);
777 tcpd->status = 0;
778}
779
780static int
781typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
782{
783 struct typhoon *tp = netdev_priv(dev);
784 struct transmit_ring *txRing;
785 struct tx_desc *txd, *first_txd;
786 dma_addr_t skb_dma;
787 int numDesc;
788
789 /* we have two rings to choose from, but we only use txLo for now
790 * If we start using the Hi ring as well, we'll need to update
791 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
792 * and TXHI_ENTIRES to match, as well as update the TSO code below
793 * to get the right DMA address
794 */
795 txRing = &tp->txLoRing;
796
797 /* We need one descriptor for each fragment of the sk_buff, plus the
798 * one for the ->data area of it.
799 *
800 * The docs say a maximum of 16 fragment descriptors per TCP option
801 * descriptor, then make a new packet descriptor and option descriptor
802 * for the next 16 fragments. The engineers say just an option
803 * descriptor is needed. I've tested up to 26 fragments with a single
804 * packet descriptor/option descriptor combo, so I use that for now.
805 *
806 * If problems develop with TSO, check this first.
807 */
808 numDesc = skb_shinfo(skb)->nr_frags + 1;
809 if(skb_tso_size(skb))
810 numDesc++;
811
812 /* When checking for free space in the ring, we need to also
813 * account for the initial Tx descriptor, and we always must leave
814 * at least one descriptor unused in the ring so that it doesn't
815 * wrap and look empty.
816 *
817 * The only time we should loop here is when we hit the race
818 * between marking the queue awake and updating the cleared index.
819 * Just loop and it will appear. This comes from the acenic driver.
820 */
821 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
822 smp_rmb();
823
824 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
825 typhoon_inc_tx_index(&txRing->lastWrite, 1);
826
827 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
828 first_txd->numDesc = 0;
829 first_txd->len = 0;
830 first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
831 first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
832 first_txd->processFlags = 0;
833
834 if(skb->ip_summed == CHECKSUM_HW) {
835 /* The 3XP will figure out if this is UDP/TCP */
836 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
837 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
838 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
839 }
840
841 if(vlan_tx_tag_present(skb)) {
842 first_txd->processFlags |=
843 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
844 first_txd->processFlags |=
845 cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
846 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
847 }
848
849 if(skb_tso_size(skb)) {
850 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
851 first_txd->numDesc++;
852
853 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
854 }
855
856 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
857 typhoon_inc_tx_index(&txRing->lastWrite, 1);
858
859 /* No need to worry about padding packet -- the firmware pads
860 * it with zeros to ETH_ZLEN for us.
861 */
862 if(skb_shinfo(skb)->nr_frags == 0) {
863 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
864 PCI_DMA_TODEVICE);
865 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
866 txd->len = cpu_to_le16(skb->len);
867 txd->addr = cpu_to_le32(skb_dma);
868 txd->addrHi = 0;
869 first_txd->numDesc++;
870 } else {
871 int i, len;
872
873 len = skb_headlen(skb);
874 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
875 PCI_DMA_TODEVICE);
876 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
877 txd->len = cpu_to_le16(len);
878 txd->addr = cpu_to_le32(skb_dma);
879 txd->addrHi = 0;
880 first_txd->numDesc++;
881
882 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
884 void *frag_addr;
885
886 txd = (struct tx_desc *) (txRing->ringBase +
887 txRing->lastWrite);
888 typhoon_inc_tx_index(&txRing->lastWrite, 1);
889
890 len = frag->size;
891 frag_addr = (void *) page_address(frag->page) +
892 frag->page_offset;
893 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
894 PCI_DMA_TODEVICE);
895 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
896 txd->len = cpu_to_le16(len);
897 txd->addr = cpu_to_le32(skb_dma);
898 txd->addrHi = 0;
899 first_txd->numDesc++;
900 }
901 }
902
903 /* Kick the 3XP
904 */
905 wmb();
906 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
907
908 dev->trans_start = jiffies;
909
910 /* If we don't have room to put the worst case packet on the
911 * queue, then we must stop the queue. We need 2 extra
912 * descriptors -- one to prevent ring wrap, and one for the
913 * Tx header.
914 */
915 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
916
917 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
918 netif_stop_queue(dev);
919
920 /* A Tx complete IRQ could have gotten inbetween, making
921 * the ring free again. Only need to recheck here, since
922 * Tx is serialized.
923 */
924 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
925 netif_wake_queue(dev);
926 }
927
928 return 0;
929}
930
931static void
932typhoon_set_rx_mode(struct net_device *dev)
933{
934 struct typhoon *tp = netdev_priv(dev);
935 struct cmd_desc xp_cmd;
936 u32 mc_filter[2];
937 u16 filter;
938
939 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
940 if(dev->flags & IFF_PROMISC) {
941 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
942 dev->name);
943 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
944 } else if((dev->mc_count > multicast_filter_limit) ||
945 (dev->flags & IFF_ALLMULTI)) {
946 /* Too many to match, or accept all multicasts. */
947 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
948 } else if(dev->mc_count) {
949 struct dev_mc_list *mclist;
950 int i;
951
952 memset(mc_filter, 0, sizeof(mc_filter));
953 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
954 i++, mclist = mclist->next) {
955 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
956 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
957 }
958
959 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
960 TYPHOON_CMD_SET_MULTICAST_HASH);
961 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
962 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
963 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
964 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
965
966 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
967 }
968
969 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
970 xp_cmd.parm1 = filter;
971 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
972}
973
974static int
975typhoon_do_get_stats(struct typhoon *tp)
976{
977 struct net_device_stats *stats = &tp->stats;
978 struct net_device_stats *saved = &tp->stats_saved;
979 struct cmd_desc xp_cmd;
980 struct resp_desc xp_resp[7];
981 struct stats_resp *s = (struct stats_resp *) xp_resp;
982 int err;
983
984 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
985 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
986 if(err < 0)
987 return err;
988
989 /* 3Com's Linux driver uses txMultipleCollisions as it's
990 * collisions value, but there is some other collision info as well...
991 *
992 * The extra status reported would be a good candidate for
993 * ethtool_ops->get_{strings,stats}()
994 */
995 stats->tx_packets = le32_to_cpu(s->txPackets);
996 stats->tx_bytes = le32_to_cpu(s->txBytes);
997 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
998 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
999 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
1000 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
1001 stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
1002 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
1003 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
1004 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
1005 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
1006 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
1007 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
1008 SPEED_100 : SPEED_10;
1009 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
1010 DUPLEX_FULL : DUPLEX_HALF;
1011
1012 /* add in the saved statistics
1013 */
1014 stats->tx_packets += saved->tx_packets;
1015 stats->tx_bytes += saved->tx_bytes;
1016 stats->tx_errors += saved->tx_errors;
1017 stats->collisions += saved->collisions;
1018 stats->rx_packets += saved->rx_packets;
1019 stats->rx_bytes += saved->rx_bytes;
1020 stats->rx_fifo_errors += saved->rx_fifo_errors;
1021 stats->rx_errors += saved->rx_errors;
1022 stats->rx_crc_errors += saved->rx_crc_errors;
1023 stats->rx_length_errors += saved->rx_length_errors;
1024
1025 return 0;
1026}
1027
1028static struct net_device_stats *
1029typhoon_get_stats(struct net_device *dev)
1030{
1031 struct typhoon *tp = netdev_priv(dev);
1032 struct net_device_stats *stats = &tp->stats;
1033 struct net_device_stats *saved = &tp->stats_saved;
1034
1035 smp_rmb();
1036 if(tp->card_state == Sleeping)
1037 return saved;
1038
1039 if(typhoon_do_get_stats(tp) < 0) {
1040 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1041 return saved;
1042 }
1043
1044 return stats;
1045}
1046
1047static int
1048typhoon_set_mac_address(struct net_device *dev, void *addr)
1049{
1050 struct sockaddr *saddr = (struct sockaddr *) addr;
1051
1052 if(netif_running(dev))
1053 return -EBUSY;
1054
1055 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1056 return 0;
1057}
1058
1059static void
1060typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1061{
1062 struct typhoon *tp = netdev_priv(dev);
1063 struct pci_dev *pci_dev = tp->pdev;
1064 struct cmd_desc xp_cmd;
1065 struct resp_desc xp_resp[3];
1066
1067 smp_rmb();
1068 if(tp->card_state == Sleeping) {
1069 strcpy(info->fw_version, "Sleep image");
1070 } else {
1071 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1072 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1073 strcpy(info->fw_version, "Unknown runtime");
1074 } else {
1075 u32 sleep_ver = xp_resp[0].parm2;
1076 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1077 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1078 sleep_ver & 0xfff);
1079 }
1080 }
1081
1082 strcpy(info->driver, DRV_MODULE_NAME);
1083 strcpy(info->version, DRV_MODULE_VERSION);
1084 strcpy(info->bus_info, pci_name(pci_dev));
1085}
1086
1087static int
1088typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1089{
1090 struct typhoon *tp = netdev_priv(dev);
1091
1092 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1093 SUPPORTED_Autoneg;
1094
1095 switch (tp->xcvr_select) {
1096 case TYPHOON_XCVR_10HALF:
1097 cmd->advertising = ADVERTISED_10baseT_Half;
1098 break;
1099 case TYPHOON_XCVR_10FULL:
1100 cmd->advertising = ADVERTISED_10baseT_Full;
1101 break;
1102 case TYPHOON_XCVR_100HALF:
1103 cmd->advertising = ADVERTISED_100baseT_Half;
1104 break;
1105 case TYPHOON_XCVR_100FULL:
1106 cmd->advertising = ADVERTISED_100baseT_Full;
1107 break;
1108 case TYPHOON_XCVR_AUTONEG:
1109 cmd->advertising = ADVERTISED_10baseT_Half |
1110 ADVERTISED_10baseT_Full |
1111 ADVERTISED_100baseT_Half |
1112 ADVERTISED_100baseT_Full |
1113 ADVERTISED_Autoneg;
1114 break;
1115 }
1116
1117 if(tp->capabilities & TYPHOON_FIBER) {
1118 cmd->supported |= SUPPORTED_FIBRE;
1119 cmd->advertising |= ADVERTISED_FIBRE;
1120 cmd->port = PORT_FIBRE;
1121 } else {
1122 cmd->supported |= SUPPORTED_10baseT_Half |
1123 SUPPORTED_10baseT_Full |
1124 SUPPORTED_TP;
1125 cmd->advertising |= ADVERTISED_TP;
1126 cmd->port = PORT_TP;
1127 }
1128
1129 /* need to get stats to make these link speed/duplex valid */
1130 typhoon_do_get_stats(tp);
1131 cmd->speed = tp->speed;
1132 cmd->duplex = tp->duplex;
1133 cmd->phy_address = 0;
1134 cmd->transceiver = XCVR_INTERNAL;
1135 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1136 cmd->autoneg = AUTONEG_ENABLE;
1137 else
1138 cmd->autoneg = AUTONEG_DISABLE;
1139 cmd->maxtxpkt = 1;
1140 cmd->maxrxpkt = 1;
1141
1142 return 0;
1143}
1144
1145static int
1146typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1147{
1148 struct typhoon *tp = netdev_priv(dev);
1149 struct cmd_desc xp_cmd;
1150 int xcvr;
1151 int err;
1152
1153 err = -EINVAL;
1154 if(cmd->autoneg == AUTONEG_ENABLE) {
1155 xcvr = TYPHOON_XCVR_AUTONEG;
1156 } else {
1157 if(cmd->duplex == DUPLEX_HALF) {
1158 if(cmd->speed == SPEED_10)
1159 xcvr = TYPHOON_XCVR_10HALF;
1160 else if(cmd->speed == SPEED_100)
1161 xcvr = TYPHOON_XCVR_100HALF;
1162 else
1163 goto out;
1164 } else if(cmd->duplex == DUPLEX_FULL) {
1165 if(cmd->speed == SPEED_10)
1166 xcvr = TYPHOON_XCVR_10FULL;
1167 else if(cmd->speed == SPEED_100)
1168 xcvr = TYPHOON_XCVR_100FULL;
1169 else
1170 goto out;
1171 } else
1172 goto out;
1173 }
1174
1175 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1176 xp_cmd.parm1 = cpu_to_le16(xcvr);
1177 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1178 if(err < 0)
1179 goto out;
1180
1181 tp->xcvr_select = xcvr;
1182 if(cmd->autoneg == AUTONEG_ENABLE) {
1183 tp->speed = 0xff; /* invalid */
1184 tp->duplex = 0xff; /* invalid */
1185 } else {
1186 tp->speed = cmd->speed;
1187 tp->duplex = cmd->duplex;
1188 }
1189
1190out:
1191 return err;
1192}
1193
1194static void
1195typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1196{
1197 struct typhoon *tp = netdev_priv(dev);
1198
1199 wol->supported = WAKE_PHY | WAKE_MAGIC;
1200 wol->wolopts = 0;
1201 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1202 wol->wolopts |= WAKE_PHY;
1203 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1204 wol->wolopts |= WAKE_MAGIC;
1205 memset(&wol->sopass, 0, sizeof(wol->sopass));
1206}
1207
1208static int
1209typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1210{
1211 struct typhoon *tp = netdev_priv(dev);
1212
1213 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1214 return -EINVAL;
1215
1216 tp->wol_events = 0;
1217 if(wol->wolopts & WAKE_PHY)
1218 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1219 if(wol->wolopts & WAKE_MAGIC)
1220 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1221
1222 return 0;
1223}
1224
1225static u32
1226typhoon_get_rx_csum(struct net_device *dev)
1227{
1228 /* For now, we don't allow turning off RX checksums.
1229 */
1230 return 1;
1231}
1232
1233static void
1234typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1235{
1236 ering->rx_max_pending = RXENT_ENTRIES;
1237 ering->rx_mini_max_pending = 0;
1238 ering->rx_jumbo_max_pending = 0;
1239 ering->tx_max_pending = TXLO_ENTRIES - 1;
1240
1241 ering->rx_pending = RXENT_ENTRIES;
1242 ering->rx_mini_pending = 0;
1243 ering->rx_jumbo_pending = 0;
1244 ering->tx_pending = TXLO_ENTRIES - 1;
1245}
1246
1247static struct ethtool_ops typhoon_ethtool_ops = {
1248 .get_settings = typhoon_get_settings,
1249 .set_settings = typhoon_set_settings,
1250 .get_drvinfo = typhoon_get_drvinfo,
1251 .get_wol = typhoon_get_wol,
1252 .set_wol = typhoon_set_wol,
1253 .get_link = ethtool_op_get_link,
1254 .get_rx_csum = typhoon_get_rx_csum,
1255 .get_tx_csum = ethtool_op_get_tx_csum,
1256 .set_tx_csum = ethtool_op_set_tx_csum,
1257 .get_sg = ethtool_op_get_sg,
1258 .set_sg = ethtool_op_set_sg,
1259 .get_tso = ethtool_op_get_tso,
1260 .set_tso = ethtool_op_set_tso,
1261 .get_ringparam = typhoon_get_ringparam,
1262};
1263
1264static int
1265typhoon_wait_interrupt(void __iomem *ioaddr)
1266{
1267 int i, err = 0;
1268
1269 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1270 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1271 TYPHOON_INTR_BOOTCMD)
1272 goto out;
1273 udelay(TYPHOON_UDELAY);
1274 }
1275
1276 err = -ETIMEDOUT;
1277
1278out:
1279 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1280 return err;
1281}
1282
1283#define shared_offset(x) offsetof(struct typhoon_shared, x)
1284
1285static void
1286typhoon_init_interface(struct typhoon *tp)
1287{
1288 struct typhoon_interface *iface = &tp->shared->iface;
1289 dma_addr_t shared_dma;
1290
1291 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1292
1293 /* The *Hi members of iface are all init'd to zero by the memset().
1294 */
1295 shared_dma = tp->shared_dma + shared_offset(indexes);
1296 iface->ringIndex = cpu_to_le32(shared_dma);
1297
1298 shared_dma = tp->shared_dma + shared_offset(txLo);
1299 iface->txLoAddr = cpu_to_le32(shared_dma);
1300 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1301
1302 shared_dma = tp->shared_dma + shared_offset(txHi);
1303 iface->txHiAddr = cpu_to_le32(shared_dma);
1304 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1305
1306 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1307 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1308 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1309 sizeof(struct rx_free));
1310
1311 shared_dma = tp->shared_dma + shared_offset(rxLo);
1312 iface->rxLoAddr = cpu_to_le32(shared_dma);
1313 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1314
1315 shared_dma = tp->shared_dma + shared_offset(rxHi);
1316 iface->rxHiAddr = cpu_to_le32(shared_dma);
1317 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1318
1319 shared_dma = tp->shared_dma + shared_offset(cmd);
1320 iface->cmdAddr = cpu_to_le32(shared_dma);
1321 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1322
1323 shared_dma = tp->shared_dma + shared_offset(resp);
1324 iface->respAddr = cpu_to_le32(shared_dma);
1325 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1326
1327 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1328 iface->zeroAddr = cpu_to_le32(shared_dma);
1329
1330 tp->indexes = &tp->shared->indexes;
1331 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1332 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1333 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1334 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1335 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1336 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1337 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1338
1339 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1340 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1341
1342 tp->txlo_dma_addr = iface->txLoAddr;
1343 tp->card_state = Sleeping;
1344 smp_wmb();
1345
1346 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1347 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1348
1349 spin_lock_init(&tp->command_lock);
1350 spin_lock_init(&tp->state_lock);
1351}
1352
1353static void
1354typhoon_init_rings(struct typhoon *tp)
1355{
1356 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1357
1358 tp->txLoRing.lastWrite = 0;
1359 tp->txHiRing.lastWrite = 0;
1360 tp->rxLoRing.lastWrite = 0;
1361 tp->rxHiRing.lastWrite = 0;
1362 tp->rxBuffRing.lastWrite = 0;
1363 tp->cmdRing.lastWrite = 0;
1364 tp->cmdRing.lastWrite = 0;
1365
1366 tp->txLoRing.lastRead = 0;
1367 tp->txHiRing.lastRead = 0;
1368}
1369
1370static int
1371typhoon_download_firmware(struct typhoon *tp)
1372{
1373 void __iomem *ioaddr = tp->ioaddr;
1374 struct pci_dev *pdev = tp->pdev;
1375 struct typhoon_file_header *fHdr;
1376 struct typhoon_section_header *sHdr;
1377 u8 *image_data;
1378 void *dpage;
1379 dma_addr_t dpage_dma;
1380 unsigned int csum;
1381 u32 irqEnabled;
1382 u32 irqMasked;
1383 u32 numSections;
1384 u32 section_len;
1385 u32 len;
1386 u32 load_addr;
1387 u32 hmac;
1388 int i;
1389 int err;
1390
1391 err = -EINVAL;
1392 fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1393 image_data = (u8 *) fHdr;
1394
1395 if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1396 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1397 goto err_out;
1398 }
1399
1400 /* Cannot just map the firmware image using pci_map_single() as
1401 * the firmware is part of the kernel/module image, so we allocate
1402 * some consistent memory to copy the sections into, as it is simpler,
1403 * and short-lived. If we ever split out and require a userland
1404 * firmware loader, then we can revisit this.
1405 */
1406 err = -ENOMEM;
1407 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1408 if(!dpage) {
1409 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1410 goto err_out;
1411 }
1412
1413 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1414 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1415 ioaddr + TYPHOON_REG_INTR_ENABLE);
1416 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1417 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1418 ioaddr + TYPHOON_REG_INTR_MASK);
1419
1420 err = -ETIMEDOUT;
1421 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1422 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1423 goto err_out_irq;
1424 }
1425
1426 numSections = le32_to_cpu(fHdr->numSections);
1427 load_addr = le32_to_cpu(fHdr->startAddr);
1428
1429 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1430 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1431 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1432 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1433 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1434 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1435 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1436 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1437 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1438 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1439 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1440 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1441 typhoon_post_pci_writes(ioaddr);
1442 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1443
1444 image_data += sizeof(struct typhoon_file_header);
1445
1446 /* The ioread32() in typhoon_wait_interrupt() will force the
1447 * last write to the command register to post, so
1448 * we don't need a typhoon_post_pci_writes() after it.
1449 */
1450 for(i = 0; i < numSections; i++) {
1451 sHdr = (struct typhoon_section_header *) image_data;
1452 image_data += sizeof(struct typhoon_section_header);
1453 load_addr = le32_to_cpu(sHdr->startAddr);
1454 section_len = le32_to_cpu(sHdr->len);
1455
1456 while(section_len) {
1457 len = min_t(u32, section_len, PAGE_SIZE);
1458
1459 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1460 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1461 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1462 printk(KERN_ERR "%s: segment ready timeout\n",
1463 tp->name);
1464 goto err_out_irq;
1465 }
1466
1467 /* Do an pseudo IPv4 checksum on the data -- first
1468 * need to convert each u16 to cpu order before
1469 * summing. Fortunately, due to the properties of
1470 * the checksum, we can do this once, at the end.
1471 */
1472 csum = csum_partial_copy_nocheck(image_data, dpage,
1473 len, 0);
1474 csum = csum_fold(csum);
1475 csum = le16_to_cpu(csum);
1476
1477 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1478 iowrite32(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1479 iowrite32(load_addr,
1480 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1481 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1482 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1483 typhoon_post_pci_writes(ioaddr);
1484 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1485 ioaddr + TYPHOON_REG_COMMAND);
1486
1487 image_data += len;
1488 load_addr += len;
1489 section_len -= len;
1490 }
1491 }
1492
1493 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1494 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1495 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1496 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1497 goto err_out_irq;
1498 }
1499
1500 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1501
1502 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1503 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1504 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1505 goto err_out_irq;
1506 }
1507
1508 err = 0;
1509
1510err_out_irq:
1511 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1512 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1513
1514 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1515
1516err_out:
1517 return err;
1518}
1519
1520static int
1521typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1522{
1523 void __iomem *ioaddr = tp->ioaddr;
1524
1525 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1526 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1527 goto out_timeout;
1528 }
1529
1530 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1531 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1532 typhoon_post_pci_writes(ioaddr);
1533 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1534 ioaddr + TYPHOON_REG_COMMAND);
1535
1536 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1537 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1538 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1539 goto out_timeout;
1540 }
1541
1542 /* Clear the Transmit and Command ready registers
1543 */
1544 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1545 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1546 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1547 typhoon_post_pci_writes(ioaddr);
1548 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1549
1550 return 0;
1551
1552out_timeout:
1553 return -ETIMEDOUT;
1554}
1555
1556static u32
1557typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1558 volatile u32 * index)
1559{
1560 u32 lastRead = txRing->lastRead;
1561 struct tx_desc *tx;
1562 dma_addr_t skb_dma;
1563 int dma_len;
1564 int type;
1565
1566 while(lastRead != le32_to_cpu(*index)) {
1567 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1568 type = tx->flags & TYPHOON_TYPE_MASK;
1569
1570 if(type == TYPHOON_TX_DESC) {
1571 /* This tx_desc describes a packet.
1572 */
1573 unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1574 struct sk_buff *skb = (struct sk_buff *) ptr;
1575 dev_kfree_skb_irq(skb);
1576 } else if(type == TYPHOON_FRAG_DESC) {
1577 /* This tx_desc describes a memory mapping. Free it.
1578 */
1579 skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1580 dma_len = le16_to_cpu(tx->len);
1581 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1582 PCI_DMA_TODEVICE);
1583 }
1584
1585 tx->flags = 0;
1586 typhoon_inc_tx_index(&lastRead, 1);
1587 }
1588
1589 return lastRead;
1590}
1591
1592static void
1593typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1594 volatile u32 * index)
1595{
1596 u32 lastRead;
1597 int numDesc = MAX_SKB_FRAGS + 1;
1598
1599 /* This will need changing if we start to use the Hi Tx ring. */
1600 lastRead = typhoon_clean_tx(tp, txRing, index);
1601 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1602 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1603 netif_wake_queue(tp->dev);
1604
1605 txRing->lastRead = lastRead;
1606 smp_wmb();
1607}
1608
1609static void
1610typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1611{
1612 struct typhoon_indexes *indexes = tp->indexes;
1613 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1614 struct basic_ring *ring = &tp->rxBuffRing;
1615 struct rx_free *r;
1616
1617 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1618 indexes->rxBuffCleared) {
1619 /* no room in ring, just drop the skb
1620 */
1621 dev_kfree_skb_any(rxb->skb);
1622 rxb->skb = NULL;
1623 return;
1624 }
1625
1626 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1627 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1628 r->virtAddr = idx;
1629 r->physAddr = cpu_to_le32(rxb->dma_addr);
1630
1631 /* Tell the card about it */
1632 wmb();
1633 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1634}
1635
1636static int
1637typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1638{
1639 struct typhoon_indexes *indexes = tp->indexes;
1640 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1641 struct basic_ring *ring = &tp->rxBuffRing;
1642 struct rx_free *r;
1643 struct sk_buff *skb;
1644 dma_addr_t dma_addr;
1645
1646 rxb->skb = NULL;
1647
1648 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1649 indexes->rxBuffCleared)
1650 return -ENOMEM;
1651
1652 skb = dev_alloc_skb(PKT_BUF_SZ);
1653 if(!skb)
1654 return -ENOMEM;
1655
1656#if 0
1657 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1658 * address! Pretty please?
1659 */
1660 skb_reserve(skb, 2);
1661#endif
1662
1663 skb->dev = tp->dev;
1664 dma_addr = pci_map_single(tp->pdev, skb->tail,
1665 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1666
1667 /* Since no card does 64 bit DAC, the high bits will never
1668 * change from zero.
1669 */
1670 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1671 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1672 r->virtAddr = idx;
1673 r->physAddr = cpu_to_le32(dma_addr);
1674 rxb->skb = skb;
1675 rxb->dma_addr = dma_addr;
1676
1677 /* Tell the card about it */
1678 wmb();
1679 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1680 return 0;
1681}
1682
1683static int
1684typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1685 volatile u32 * cleared, int budget)
1686{
1687 struct rx_desc *rx;
1688 struct sk_buff *skb, *new_skb;
1689 struct rxbuff_ent *rxb;
1690 dma_addr_t dma_addr;
1691 u32 local_ready;
1692 u32 rxaddr;
1693 int pkt_len;
1694 u32 idx;
1695 u32 csum_bits;
1696 int received;
1697
1698 received = 0;
1699 local_ready = le32_to_cpu(*ready);
1700 rxaddr = le32_to_cpu(*cleared);
1701 while(rxaddr != local_ready && budget > 0) {
1702 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1703 idx = rx->addr;
1704 rxb = &tp->rxbuffers[idx];
1705 skb = rxb->skb;
1706 dma_addr = rxb->dma_addr;
1707
1708 typhoon_inc_rx_index(&rxaddr, 1);
1709
1710 if(rx->flags & TYPHOON_RX_ERROR) {
1711 typhoon_recycle_rx_skb(tp, idx);
1712 continue;
1713 }
1714
1715 pkt_len = le16_to_cpu(rx->frameLen);
1716
1717 if(pkt_len < rx_copybreak &&
1718 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1719 new_skb->dev = tp->dev;
1720 skb_reserve(new_skb, 2);
1721 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1722 PKT_BUF_SZ,
1723 PCI_DMA_FROMDEVICE);
1724 eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
1725 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1726 PKT_BUF_SZ,
1727 PCI_DMA_FROMDEVICE);
1728 skb_put(new_skb, pkt_len);
1729 typhoon_recycle_rx_skb(tp, idx);
1730 } else {
1731 new_skb = skb;
1732 skb_put(new_skb, pkt_len);
1733 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1734 PCI_DMA_FROMDEVICE);
1735 typhoon_alloc_rx_skb(tp, idx);
1736 }
1737 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1738 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1739 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1740 if(csum_bits ==
1741 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1742 || csum_bits ==
1743 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1744 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1745 } else
1746 new_skb->ip_summed = CHECKSUM_NONE;
1747
1748 spin_lock(&tp->state_lock);
1749 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1750 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1751 ntohl(rx->vlanTag) & 0xffff);
1752 else
1753 netif_receive_skb(new_skb);
1754 spin_unlock(&tp->state_lock);
1755
1756 tp->dev->last_rx = jiffies;
1757 received++;
1758 budget--;
1759 }
1760 *cleared = cpu_to_le32(rxaddr);
1761
1762 return received;
1763}
1764
1765static void
1766typhoon_fill_free_ring(struct typhoon *tp)
1767{
1768 u32 i;
1769
1770 for(i = 0; i < RXENT_ENTRIES; i++) {
1771 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1772 if(rxb->skb)
1773 continue;
1774 if(typhoon_alloc_rx_skb(tp, i) < 0)
1775 break;
1776 }
1777}
1778
1779static int
1780typhoon_poll(struct net_device *dev, int *total_budget)
1781{
1782 struct typhoon *tp = netdev_priv(dev);
1783 struct typhoon_indexes *indexes = tp->indexes;
1784 int orig_budget = *total_budget;
1785 int budget, work_done, done;
1786
1787 rmb();
1788 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1789 typhoon_process_response(tp, 0, NULL);
1790
1791 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1792 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1793
1794 if(orig_budget > dev->quota)
1795 orig_budget = dev->quota;
1796
1797 budget = orig_budget;
1798 work_done = 0;
1799 done = 1;
1800
1801 if(indexes->rxHiCleared != indexes->rxHiReady) {
1802 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1803 &indexes->rxHiCleared, budget);
1804 budget -= work_done;
1805 }
1806
1807 if(indexes->rxLoCleared != indexes->rxLoReady) {
1808 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1809 &indexes->rxLoCleared, budget);
1810 }
1811
1812 if(work_done) {
1813 *total_budget -= work_done;
1814 dev->quota -= work_done;
1815
1816 if(work_done >= orig_budget)
1817 done = 0;
1818 }
1819
1820 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1821 /* rxBuff ring is empty, try to fill it. */
1822 typhoon_fill_free_ring(tp);
1823 }
1824
1825 if(done) {
1826 netif_rx_complete(dev);
1827 iowrite32(TYPHOON_INTR_NONE,
1828 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1829 typhoon_post_pci_writes(tp->ioaddr);
1830 }
1831
1832 return (done ? 0 : 1);
1833}
1834
1835static irqreturn_t
1836typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1837{
1838 struct net_device *dev = (struct net_device *) dev_instance;
1839 struct typhoon *tp = dev->priv;
1840 void __iomem *ioaddr = tp->ioaddr;
1841 u32 intr_status;
1842
1843 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1844 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1845 return IRQ_NONE;
1846
1847 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1848
1849 if(netif_rx_schedule_prep(dev)) {
1850 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1851 typhoon_post_pci_writes(ioaddr);
1852 __netif_rx_schedule(dev);
1853 } else {
1854 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1855 dev->name);
1856 }
1857 return IRQ_HANDLED;
1858}
1859
1860static void
1861typhoon_free_rx_rings(struct typhoon *tp)
1862{
1863 u32 i;
1864
1865 for(i = 0; i < RXENT_ENTRIES; i++) {
1866 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1867 if(rxb->skb) {
1868 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1869 PCI_DMA_FROMDEVICE);
1870 dev_kfree_skb(rxb->skb);
1871 rxb->skb = NULL;
1872 }
1873 }
1874}
1875
1876static int
1877typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events)
1878{
1879 struct pci_dev *pdev = tp->pdev;
1880 void __iomem *ioaddr = tp->ioaddr;
1881 struct cmd_desc xp_cmd;
1882 int err;
1883
1884 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1885 xp_cmd.parm1 = events;
1886 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1887 if(err < 0) {
1888 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1889 tp->name, err);
1890 return err;
1891 }
1892
1893 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1894 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1895 if(err < 0) {
1896 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1897 tp->name, err);
1898 return err;
1899 }
1900
1901 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1902 return -ETIMEDOUT;
1903
1904 /* Since we cannot monitor the status of the link while sleeping,
1905 * tell the world it went away.
1906 */
1907 netif_carrier_off(tp->dev);
1908
1909 pci_enable_wake(tp->pdev, state, 1);
1910 pci_disable_device(pdev);
1911 return pci_set_power_state(pdev, pci_choose_state(pdev, state));
1912}
1913
1914static int
1915typhoon_wakeup(struct typhoon *tp, int wait_type)
1916{
1917 struct pci_dev *pdev = tp->pdev;
1918 void __iomem *ioaddr = tp->ioaddr;
1919
1920 pci_set_power_state(pdev, PCI_D0);
1921 pci_restore_state(pdev);
1922
1923 /* Post 2.x.x versions of the Sleep Image require a reset before
1924 * we can download the Runtime Image. But let's not make users of
1925 * the old firmware pay for the reset.
1926 */
1927 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1928 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1929 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1930 return typhoon_reset(ioaddr, wait_type);
1931
1932 return 0;
1933}
1934
1935static int
1936typhoon_start_runtime(struct typhoon *tp)
1937{
1938 struct net_device *dev = tp->dev;
1939 void __iomem *ioaddr = tp->ioaddr;
1940 struct cmd_desc xp_cmd;
1941 int err;
1942
1943 typhoon_init_rings(tp);
1944 typhoon_fill_free_ring(tp);
1945
1946 err = typhoon_download_firmware(tp);
1947 if(err < 0) {
1948 printk("%s: cannot load runtime on 3XP\n", tp->name);
1949 goto error_out;
1950 }
1951
1952 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1953 printk("%s: cannot boot 3XP\n", tp->name);
1954 err = -EIO;
1955 goto error_out;
1956 }
1957
1958 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1959 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1960 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1961 if(err < 0)
1962 goto error_out;
1963
1964 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1965 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1966 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1967 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1968 if(err < 0)
1969 goto error_out;
1970
1971 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1972 * us some more information on how to control it.
1973 */
1974 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1975 xp_cmd.parm1 = 0;
1976 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1977 if(err < 0)
1978 goto error_out;
1979
1980 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1981 xp_cmd.parm1 = tp->xcvr_select;
1982 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1983 if(err < 0)
1984 goto error_out;
1985
1986 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1987 xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1988 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1989 if(err < 0)
1990 goto error_out;
1991
1992 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1993 spin_lock_bh(&tp->state_lock);
1994 xp_cmd.parm2 = tp->offload;
1995 xp_cmd.parm3 = tp->offload;
1996 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1997 spin_unlock_bh(&tp->state_lock);
1998 if(err < 0)
1999 goto error_out;
2000
2001 typhoon_set_rx_mode(dev);
2002
2003 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2004 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2005 if(err < 0)
2006 goto error_out;
2007
2008 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2009 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2010 if(err < 0)
2011 goto error_out;
2012
2013 tp->card_state = Running;
2014 smp_wmb();
2015
2016 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2017 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2018 typhoon_post_pci_writes(ioaddr);
2019
2020 return 0;
2021
2022error_out:
2023 typhoon_reset(ioaddr, WaitNoSleep);
2024 typhoon_free_rx_rings(tp);
2025 typhoon_init_rings(tp);
2026 return err;
2027}
2028
2029static int
2030typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2031{
2032 struct typhoon_indexes *indexes = tp->indexes;
2033 struct transmit_ring *txLo = &tp->txLoRing;
2034 void __iomem *ioaddr = tp->ioaddr;
2035 struct cmd_desc xp_cmd;
2036 int i;
2037
2038 /* Disable interrupts early, since we can't schedule a poll
2039 * when called with !netif_running(). This will be posted
2040 * when we force the posting of the command.
2041 */
2042 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2043
2044 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2045 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2046
2047 /* Wait 1/2 sec for any outstanding transmits to occur
2048 * We'll cleanup after the reset if this times out.
2049 */
2050 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2051 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2052 break;
2053 udelay(TYPHOON_UDELAY);
2054 }
2055
2056 if(i == TYPHOON_WAIT_TIMEOUT)
2057 printk(KERN_ERR
2058 "%s: halt timed out waiting for Tx to complete\n",
2059 tp->name);
2060
2061 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2062 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2063
2064 /* save the statistics so when we bring the interface up again,
2065 * the values reported to userspace are correct.
2066 */
2067 tp->card_state = Sleeping;
2068 smp_wmb();
2069 typhoon_do_get_stats(tp);
2070 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2071
2072 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2073 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2074
2075 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2076 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2077 tp->name);
2078
2079 if(typhoon_reset(ioaddr, wait_type) < 0) {
2080 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2081 return -ETIMEDOUT;
2082 }
2083
2084 /* cleanup any outstanding Tx packets */
2085 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2086 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2087 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2088 }
2089
2090 return 0;
2091}
2092
2093static void
2094typhoon_tx_timeout(struct net_device *dev)
2095{
2096 struct typhoon *tp = netdev_priv(dev);
2097
2098 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2099 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2100 dev->name);
2101 goto truely_dead;
2102 }
2103
2104 /* If we ever start using the Hi ring, it will need cleaning too */
2105 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2106 typhoon_free_rx_rings(tp);
2107
2108 if(typhoon_start_runtime(tp) < 0) {
2109 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2110 dev->name);
2111 goto truely_dead;
2112 }
2113
2114 netif_wake_queue(dev);
2115 return;
2116
2117truely_dead:
2118 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2119 typhoon_reset(tp->ioaddr, NoWait);
2120 netif_carrier_off(dev);
2121}
2122
2123static int
2124typhoon_open(struct net_device *dev)
2125{
2126 struct typhoon *tp = netdev_priv(dev);
2127 int err;
2128
2129 err = typhoon_wakeup(tp, WaitSleep);
2130 if(err < 0) {
2131 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2132 goto out_sleep;
2133 }
2134
2135 err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
2136 dev->name, dev);
2137 if(err < 0)
2138 goto out_sleep;
2139
2140 err = typhoon_start_runtime(tp);
2141 if(err < 0)
2142 goto out_irq;
2143
2144 netif_start_queue(dev);
2145 return 0;
2146
2147out_irq:
2148 free_irq(dev->irq, dev);
2149
2150out_sleep:
2151 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2152 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2153 dev->name);
2154 typhoon_reset(tp->ioaddr, NoWait);
2155 goto out;
2156 }
2157
2158 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2159 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2160
2161out:
2162 return err;
2163}
2164
2165static int
2166typhoon_close(struct net_device *dev)
2167{
2168 struct typhoon *tp = netdev_priv(dev);
2169
2170 netif_stop_queue(dev);
2171
2172 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2173 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2174
2175 /* Make sure there is no irq handler running on a different CPU. */
2176 typhoon_synchronize_irq(dev->irq);
2177 free_irq(dev->irq, dev);
2178
2179 typhoon_free_rx_rings(tp);
2180 typhoon_init_rings(tp);
2181
2182 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2183 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2184
2185 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2186 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2187
2188 return 0;
2189}
2190
2191#ifdef CONFIG_PM
2192static int
2193typhoon_resume(struct pci_dev *pdev)
2194{
2195 struct net_device *dev = pci_get_drvdata(pdev);
2196 struct typhoon *tp = netdev_priv(dev);
2197
2198 /* If we're down, resume when we are upped.
2199 */
2200 if(!netif_running(dev))
2201 return 0;
2202
2203 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2204 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2205 dev->name);
2206 goto reset;
2207 }
2208
2209 if(typhoon_start_runtime(tp) < 0) {
2210 printk(KERN_ERR "%s: critical: could not start runtime in "
2211 "resume\n", dev->name);
2212 goto reset;
2213 }
2214
2215 netif_device_attach(dev);
2216 netif_start_queue(dev);
2217 return 0;
2218
2219reset:
2220 typhoon_reset(tp->ioaddr, NoWait);
2221 return -EBUSY;
2222}
2223
2224static int
2225typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2226{
2227 struct net_device *dev = pci_get_drvdata(pdev);
2228 struct typhoon *tp = netdev_priv(dev);
2229 struct cmd_desc xp_cmd;
2230
2231 /* If we're down, we're already suspended.
2232 */
2233 if(!netif_running(dev))
2234 return 0;
2235
2236 spin_lock_bh(&tp->state_lock);
2237 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2238 spin_unlock_bh(&tp->state_lock);
2239 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2240 dev->name);
2241 return -EBUSY;
2242 }
2243 spin_unlock_bh(&tp->state_lock);
2244
2245 netif_device_detach(dev);
2246
2247 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2248 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2249 goto need_resume;
2250 }
2251
2252 typhoon_free_rx_rings(tp);
2253 typhoon_init_rings(tp);
2254
2255 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2256 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2257 goto need_resume;
2258 }
2259
2260 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2261 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2262 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2263 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2264 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2265 dev->name);
2266 goto need_resume;
2267 }
2268
2269 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2270 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2271 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2272 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2273 dev->name);
2274 goto need_resume;
2275 }
2276
2277 if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
2278 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2279 goto need_resume;
2280 }
2281
2282 return 0;
2283
2284need_resume:
2285 typhoon_resume(pdev);
2286 return -EBUSY;
2287}
2288
2289static int
2290typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
2291{
2292 return pci_enable_wake(pdev, state, enable);
2293}
2294#endif
2295
2296static int __devinit
2297typhoon_test_mmio(struct pci_dev *pdev)
2298{
2299 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2300 int mode = 0;
2301 u32 val;
2302
2303 if(!ioaddr)
2304 goto out;
2305
2306 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2307 TYPHOON_STATUS_WAITING_FOR_HOST)
2308 goto out_unmap;
2309
2310 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2311 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2312 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2313
2314 /* Ok, see if we can change our interrupt status register by
2315 * sending ourselves an interrupt. If so, then MMIO works.
2316 * The 50usec delay is arbitrary -- it could probably be smaller.
2317 */
2318 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2319 if((val & TYPHOON_INTR_SELF) == 0) {
2320 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2321 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2322 udelay(50);
2323 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2324 if(val & TYPHOON_INTR_SELF)
2325 mode = 1;
2326 }
2327
2328 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2329 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2330 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2331 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2332
2333out_unmap:
2334 pci_iounmap(pdev, ioaddr);
2335
2336out:
2337 if(!mode)
2338 printk(KERN_INFO PFX "falling back to port IO\n");
2339 return mode;
2340}
2341
2342static int __devinit
2343typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2344{
2345 static int did_version = 0;
2346 struct net_device *dev;
2347 struct typhoon *tp;
2348 int card_id = (int) ent->driver_data;
2349 void __iomem *ioaddr;
2350 void *shared;
2351 dma_addr_t shared_dma;
2352 struct cmd_desc xp_cmd;
2353 struct resp_desc xp_resp[3];
2354 int i;
2355 int err = 0;
2356
2357 if(!did_version++)
2358 printk(KERN_INFO "%s", version);
2359
2360 dev = alloc_etherdev(sizeof(*tp));
2361 if(dev == NULL) {
2362 printk(ERR_PFX "%s: unable to alloc new net device\n",
2363 pci_name(pdev));
2364 err = -ENOMEM;
2365 goto error_out;
2366 }
2367 SET_MODULE_OWNER(dev);
2368 SET_NETDEV_DEV(dev, &pdev->dev);
2369
2370 err = pci_enable_device(pdev);
2371 if(err < 0) {
2372 printk(ERR_PFX "%s: unable to enable device\n",
2373 pci_name(pdev));
2374 goto error_out_dev;
2375 }
2376
2377 err = pci_set_mwi(pdev);
2378 if(err < 0) {
2379 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2380 goto error_out_disable;
2381 }
2382
2383 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2384 if(err < 0) {
2385 printk(ERR_PFX "%s: No usable DMA configuration\n",
2386 pci_name(pdev));
2387 goto error_out_mwi;
2388 }
2389
2390 /* sanity checks on IO and MMIO BARs
2391 */
2392 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2393 printk(ERR_PFX
2394 "%s: region #1 not a PCI IO resource, aborting\n",
2395 pci_name(pdev));
2396 err = -ENODEV;
2397 goto error_out_mwi;
2398 }
2399 if(pci_resource_len(pdev, 0) < 128) {
2400 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2401 pci_name(pdev));
2402 err = -ENODEV;
2403 goto error_out_mwi;
2404 }
2405 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2406 printk(ERR_PFX
2407 "%s: region #1 not a PCI MMIO resource, aborting\n",
2408 pci_name(pdev));
2409 err = -ENODEV;
2410 goto error_out_mwi;
2411 }
2412 if(pci_resource_len(pdev, 1) < 128) {
2413 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2414 pci_name(pdev));
2415 err = -ENODEV;
2416 goto error_out_mwi;
2417 }
2418
2419 err = pci_request_regions(pdev, "typhoon");
2420 if(err < 0) {
2421 printk(ERR_PFX "%s: could not request regions\n",
2422 pci_name(pdev));
2423 goto error_out_mwi;
2424 }
2425
2426 /* map our registers
2427 */
2428 if(use_mmio != 0 && use_mmio != 1)
2429 use_mmio = typhoon_test_mmio(pdev);
2430
2431 ioaddr = pci_iomap(pdev, use_mmio, 128);
2432 if (!ioaddr) {
2433 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2434 pci_name(pdev));
2435 err = -EIO;
2436 goto error_out_regions;
2437 }
2438
2439 /* allocate pci dma space for rx and tx descriptor rings
2440 */
2441 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2442 &shared_dma);
2443 if(!shared) {
2444 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2445 pci_name(pdev));
2446 err = -ENOMEM;
2447 goto error_out_remap;
2448 }
2449
2450 dev->irq = pdev->irq;
2451 tp = netdev_priv(dev);
2452 tp->shared = (struct typhoon_shared *) shared;
2453 tp->shared_dma = shared_dma;
2454 tp->pdev = pdev;
2455 tp->tx_pdev = pdev;
2456 tp->ioaddr = ioaddr;
2457 tp->tx_ioaddr = ioaddr;
2458 tp->dev = dev;
2459
2460 /* Init sequence:
2461 * 1) Reset the adapter to clear any bad juju
2462 * 2) Reload the sleep image
2463 * 3) Boot the sleep image
2464 * 4) Get the hardware address.
2465 * 5) Put the card to sleep.
2466 */
2467 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2468 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2469 err = -EIO;
2470 goto error_out_dma;
2471 }
2472
2473 /* Now that we've reset the 3XP and are sure it's not going to
2474 * write all over memory, enable bus mastering, and save our
2475 * state for resuming after a suspend.
2476 */
2477 pci_set_master(pdev);
2478 pci_save_state(pdev);
2479
2480 /* dev->name is not valid until we register, but we need to
2481 * use some common routines to initialize the card. So that those
2482 * routines print the right name, we keep our oun pointer to the name
2483 */
2484 tp->name = pci_name(pdev);
2485
2486 typhoon_init_interface(tp);
2487 typhoon_init_rings(tp);
2488
2489 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2490 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2491 pci_name(pdev));
2492 err = -EIO;
2493 goto error_out_reset;
2494 }
2495
2496 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2497 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2498 printk(ERR_PFX "%s: cannot read MAC address\n",
2499 pci_name(pdev));
2500 err = -EIO;
2501 goto error_out_reset;
2502 }
2503
2504 *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2505 *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2506
2507 if(!is_valid_ether_addr(dev->dev_addr)) {
2508 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2509 "aborting\n", pci_name(pdev));
2510 goto error_out_reset;
2511 }
2512
2513 /* Read the Sleep Image version last, so the response is valid
2514 * later when we print out the version reported.
2515 */
2516 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2517 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2518 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2519 pci_name(pdev));
2520 goto error_out_reset;
2521 }
2522
2523 tp->capabilities = typhoon_card_info[card_id].capabilities;
2524 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2525
2526 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2527 * READ_VERSIONS command. Those versions are OK after waking up
2528 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2529 * seem to need a little extra help to get started. Since we don't
2530 * know how to nudge it along, just kick it.
2531 */
2532 if(xp_resp[0].numDesc != 0)
2533 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2534
2535 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2536 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2537 pci_name(pdev));
2538 err = -EIO;
2539 goto error_out_reset;
2540 }
2541
2542 /* The chip-specific entries in the device structure. */
2543 dev->open = typhoon_open;
2544 dev->hard_start_xmit = typhoon_start_tx;
2545 dev->stop = typhoon_close;
2546 dev->set_multicast_list = typhoon_set_rx_mode;
2547 dev->tx_timeout = typhoon_tx_timeout;
2548 dev->poll = typhoon_poll;
2549 dev->weight = 16;
2550 dev->watchdog_timeo = TX_TIMEOUT;
2551 dev->get_stats = typhoon_get_stats;
2552 dev->set_mac_address = typhoon_set_mac_address;
2553 dev->vlan_rx_register = typhoon_vlan_rx_register;
2554 dev->vlan_rx_kill_vid = typhoon_vlan_rx_kill_vid;
2555 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2556
2557 /* We can handle scatter gather, up to 16 entries, and
2558 * we can do IP checksumming (only version 4, doh...)
2559 */
2560 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2561 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2562 dev->features |= NETIF_F_TSO;
2563
2564 if(register_netdev(dev) < 0)
2565 goto error_out_reset;
2566
2567 /* fixup our local name */
2568 tp->name = dev->name;
2569
2570 pci_set_drvdata(pdev, dev);
2571
2572 printk(KERN_INFO "%s: %s at %s 0x%lx, ",
2573 dev->name, typhoon_card_info[card_id].name,
2574 use_mmio ? "MMIO" : "IO", pci_resource_start(pdev, use_mmio));
2575 for(i = 0; i < 5; i++)
2576 printk("%2.2x:", dev->dev_addr[i]);
2577 printk("%2.2x\n", dev->dev_addr[i]);
2578
2579 /* xp_resp still contains the response to the READ_VERSIONS command.
2580 * For debugging, let the user know what version he has.
2581 */
2582 if(xp_resp[0].numDesc == 0) {
2583 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2584 * of version is Month/Day of build.
2585 */
2586 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2587 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2588 "%02u/%02u/2000\n", dev->name, monthday >> 8,
2589 monthday & 0xff);
2590 } else if(xp_resp[0].numDesc == 2) {
2591 /* This is the Typhoon 1.1+ type Sleep Image
2592 */
2593 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2594 u8 *ver_string = (u8 *) &xp_resp[1];
2595 ver_string[25] = 0;
2596 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2597 "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2598 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2599 ver_string);
2600 } else {
2601 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2602 "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2603 le32_to_cpu(xp_resp[0].parm2));
2604 }
2605
2606 return 0;
2607
2608error_out_reset:
2609 typhoon_reset(ioaddr, NoWait);
2610
2611error_out_dma:
2612 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2613 shared, shared_dma);
2614error_out_remap:
2615 pci_iounmap(pdev, ioaddr);
2616error_out_regions:
2617 pci_release_regions(pdev);
2618error_out_mwi:
2619 pci_clear_mwi(pdev);
2620error_out_disable:
2621 pci_disable_device(pdev);
2622error_out_dev:
2623 free_netdev(dev);
2624error_out:
2625 return err;
2626}
2627
2628static void __devexit
2629typhoon_remove_one(struct pci_dev *pdev)
2630{
2631 struct net_device *dev = pci_get_drvdata(pdev);
2632 struct typhoon *tp = netdev_priv(dev);
2633
2634 unregister_netdev(dev);
2635 pci_set_power_state(pdev, PCI_D0);
2636 pci_restore_state(pdev);
2637 typhoon_reset(tp->ioaddr, NoWait);
2638 pci_iounmap(pdev, tp->ioaddr);
2639 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2640 tp->shared, tp->shared_dma);
2641 pci_release_regions(pdev);
2642 pci_clear_mwi(pdev);
2643 pci_disable_device(pdev);
2644 pci_set_drvdata(pdev, NULL);
2645 free_netdev(dev);
2646}
2647
2648static struct pci_driver typhoon_driver = {
2649 .name = DRV_MODULE_NAME,
2650 .id_table = typhoon_pci_tbl,
2651 .probe = typhoon_init_one,
2652 .remove = __devexit_p(typhoon_remove_one),
2653#ifdef CONFIG_PM
2654 .suspend = typhoon_suspend,
2655 .resume = typhoon_resume,
2656 .enable_wake = typhoon_enable_wake,
2657#endif
2658};
2659
2660static int __init
2661typhoon_init(void)
2662{
2663 return pci_module_init(&typhoon_driver);
2664}
2665
2666static void __exit
2667typhoon_cleanup(void)
2668{
2669 pci_unregister_driver(&typhoon_driver);
2670}
2671
2672module_init(typhoon_init);
2673module_exit(typhoon_cleanup);