blob: b72b89d53ec8347061de4b4feb215bc633ed2edb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
2 *
3 * (c) Copyright 1998 Red Hat Software Inc
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004 * Written by Alan Cox.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
8 *
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
11 *
12 * Thanks to 3Com for making this possible by providing me with the
13 * documentation.
14 *
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
17 *
18 */
19
20#define DRV_NAME "3c527"
21#define DRV_VERSION "0.7-SMP"
22#define DRV_RELDATE "2003/09/21"
23
24static const char *version =
25DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
26
27/**
28 * DOC: Traps for the unwary
29 *
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
32 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -040033 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 *
37 * Setting the SAV BP bit does not save bad packets, but
Jeff Garzik6aa20a22006-09-13 13:24:59 -040038 * only enables RX on-card stats collection.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 *
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
43 *
44 * DOC: Theory Of Operation
45 *
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
52 *
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for inital setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
62 *
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040067 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
Jeff Garzik6aa20a22006-09-13 13:24:59 -040073 * implementation was made necessary --- see mc32_update_stats().
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 *
75 * DOC: Notes
Jeff Garzik6aa20a22006-09-13 13:24:59 -040076 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
79 *
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
82 * senders.
83**/
84
85#include <linux/module.h>
86
87#include <linux/errno.h>
88#include <linux/netdevice.h>
89#include <linux/etherdevice.h>
90#include <linux/if_ether.h>
91#include <linux/init.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/mca-legacy.h>
97#include <linux/ioport.h>
98#include <linux/in.h>
99#include <linux/skbuff.h>
100#include <linux/slab.h>
101#include <linux/string.h>
102#include <linux/wait.h>
103#include <linux/ethtool.h>
104#include <linux/completion.h>
105#include <linux/bitops.h>
106
107#include <asm/semaphore.h>
108#include <asm/uaccess.h>
109#include <asm/system.h>
110#include <asm/io.h>
111#include <asm/dma.h>
112
113#include "3c527.h"
114
115MODULE_LICENSE("GPL");
116
117/*
118 * The name of the card. Is used for messages and in the requests for
119 * io regions, irqs and dma channels
120 */
121static const char* cardname = DRV_NAME;
122
123/* use 0 for production, 1 for verification, >2 for debug */
124#ifndef NET_DEBUG
125#define NET_DEBUG 2
126#endif
127
128#undef DEBUG_IRQ
129
130static unsigned int mc32_debug = NET_DEBUG;
131
132/* The number of low I/O ports used by the ethercard. */
133#define MC32_IO_EXTENT 8
134
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400135/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136#define TX_RING_LEN 32 /* Typically the card supports 37 */
137#define RX_RING_LEN 8 /* " " " */
138
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400139/* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#define RX_COPYBREAK 200 /* Value from 3c59x.c */
142
143/* Issue the 82586 workaround command - this is for "busy lans", but
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400144 * basically means for all lans now days - has a performance (latency)
145 * cost, but best set. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146static const int WORKAROUND_82586=1;
147
148/* Pointers to buffers and their on-card records */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400149struct mc32_ring_desc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400151 volatile struct skb_header *p;
152 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153};
154
155/* Information that needs to be kept for each board. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400156struct mc32_local
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 int slot;
159
160 u32 base;
161 struct net_device_stats net_stats;
162 volatile struct mc32_mailbox *rx_box;
163 volatile struct mc32_mailbox *tx_box;
164 volatile struct mc32_mailbox *exec_box;
165 volatile struct mc32_stats *stats; /* Start of on-card statistics */
166 u16 tx_chain; /* Transmit list start offset */
167 u16 rx_chain; /* Receive list start offset */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400168 u16 tx_len; /* Transmit list count */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 u16 rx_len; /* Receive list count */
170
171 u16 xceiver_desired_state; /* HALTED or RUNNING */
172 u16 cmd_nonblocking; /* Thread is uninterested in command result */
173 u16 mc_reload_wait; /* A multicast load request is pending */
174 u32 mc_list_valid; /* True when the mclist is set */
175
176 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
177 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
178
179 atomic_t tx_count; /* buffers left */
180 atomic_t tx_ring_head; /* index to tx en-queue end */
181 u16 tx_ring_tail; /* index to tx de-queue end */
182
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400183 u16 rx_ring_tail; /* index to rx de-queue end */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
186 struct completion execution_cmd; /* Card has completed an execute command */
187 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
188};
189
190/* The station (ethernet) address prefix, used for a sanity check. */
191#define SA_ADDR0 0x02
192#define SA_ADDR1 0x60
193#define SA_ADDR2 0xAC
194
195struct mca_adapters_t {
196 unsigned int id;
197 char *name;
198};
199
200static const struct mca_adapters_t mc32_adapters[] = {
201 { 0x0041, "3COM EtherLink MC/32" },
202 { 0x8EF5, "IBM High Performance Lan Adapter" },
203 { 0x0000, NULL }
204};
205
206
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400207/* Macros for ring index manipulations */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
209static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
210
211static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
212
213
214/* Index to functions, as function prototypes. */
215static int mc32_probe1(struct net_device *dev, int ioaddr);
216static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
217static int mc32_open(struct net_device *dev);
218static void mc32_timeout(struct net_device *dev);
219static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100220static irqreturn_t mc32_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221static int mc32_close(struct net_device *dev);
222static struct net_device_stats *mc32_get_stats(struct net_device *dev);
223static void mc32_set_multicast_list(struct net_device *dev);
224static void mc32_reset_multicast_list(struct net_device *dev);
Jeff Garzik7282d492006-09-13 14:30:00 -0400225static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227static void cleanup_card(struct net_device *dev)
228{
229 struct mc32_local *lp = netdev_priv(dev);
230 unsigned slot = lp->slot;
231 mca_mark_as_unused(slot);
232 mca_set_adapter_name(slot, NULL);
233 free_irq(dev->irq, dev);
234 release_region(dev->base_addr, MC32_IO_EXTENT);
235}
236
237/**
238 * mc32_probe - Search for supported boards
239 * @unit: interface number to use
240 *
241 * Because MCA bus is a real bus and we can scan for cards we could do a
242 * single scan for all boards here. Right now we use the passed in device
243 * structure and scan for only one board. This needs fixing for modules
244 * in particular.
245 */
246
247struct net_device *__init mc32_probe(int unit)
248{
249 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
250 static int current_mca_slot = -1;
251 int i;
252 int err;
253
254 if (!dev)
255 return ERR_PTR(-ENOMEM);
256
257 if (unit >= 0)
258 sprintf(dev->name, "eth%d", unit);
259
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400260 /* Do not check any supplied i/o locations.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 POS registers usually don't fail :) */
262
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400263 /* MCA cards have POS registers.
264 Autodetecting MCA cards is extremely simple.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 Just search for the card. */
266
267 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400268 current_mca_slot =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 mca_find_unused_adapter(mc32_adapters[i].id, 0);
270
271 if(current_mca_slot != MCA_NOTFOUND) {
272 if(!mc32_probe1(dev, current_mca_slot))
273 {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400274 mca_set_adapter_name(current_mca_slot,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 mc32_adapters[i].name);
276 mca_mark_as_used(current_mca_slot);
277 err = register_netdev(dev);
278 if (err) {
279 cleanup_card(dev);
280 free_netdev(dev);
281 dev = ERR_PTR(err);
282 }
283 return dev;
284 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 }
287 }
288 free_netdev(dev);
289 return ERR_PTR(-ENODEV);
290}
291
292/**
293 * mc32_probe1 - Check a given slot for a board and test the card
294 * @dev: Device structure to fill in
295 * @slot: The MCA bus slot being used by this card
296 *
297 * Decode the slot data and configure the card structures. Having done this we
298 * can reset the card and configure it. The card does a full self test cycle
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400299 * in firmware so we have to wait for it to return and post us either a
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 * failure case or some addresses we use to find the board internals.
301 */
302
303static int __init mc32_probe1(struct net_device *dev, int slot)
304{
305 static unsigned version_printed;
306 int i, err;
307 u8 POS;
308 u32 base;
309 struct mc32_local *lp = netdev_priv(dev);
310 static u16 mca_io_bases[]={
311 0x7280,0x7290,
312 0x7680,0x7690,
313 0x7A80,0x7A90,
314 0x7E80,0x7E90
315 };
316 static u32 mca_mem_bases[]={
317 0x00C0000,
318 0x00C4000,
319 0x00C8000,
320 0x00CC000,
321 0x00D0000,
322 0x00D4000,
323 0x00D8000,
324 0x00DC000
325 };
326 static char *failures[]={
327 "Processor instruction",
328 "Processor data bus",
329 "Processor data bus",
330 "Processor data bus",
331 "Adapter bus",
332 "ROM checksum",
333 "Base RAM",
334 "Extended RAM",
335 "82586 internal loopback",
336 "82586 initialisation failure",
337 "Adapter list configuration error"
338 };
Joe Perches0795af52007-10-03 17:59:30 -0700339 DECLARE_MAC_BUF(mac);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 /* Time to play MCA games */
342
343 if (mc32_debug && version_printed++ == 0)
344 printk(KERN_DEBUG "%s", version);
345
346 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
347
348 POS = mca_read_stored_pos(slot, 2);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 if(!(POS&1))
351 {
352 printk(" disabled.\n");
353 return -ENODEV;
354 }
355
356 /* Fill in the 'dev' fields. */
357 dev->base_addr = mca_io_bases[(POS>>1)&7];
358 dev->mem_start = mca_mem_bases[(POS>>4)&7];
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 POS = mca_read_stored_pos(slot, 4);
361 if(!(POS&1))
362 {
363 printk("memory window disabled.\n");
364 return -ENODEV;
365 }
366
367 POS = mca_read_stored_pos(slot, 5);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 i=(POS>>4)&3;
370 if(i==3)
371 {
372 printk("invalid memory window.\n");
373 return -ENODEV;
374 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 i*=16384;
377 i+=16384;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 dev->mem_end=dev->mem_start + i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 dev->irq = ((POS>>2)&3)+9;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
384 {
385 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
386 return -EBUSY;
387 }
388
389 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
390 dev->base_addr, dev->irq, dev->mem_start, i/1024);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400391
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 /* We ought to set the cache line size here.. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400394
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 /*
397 * Go PROM browsing
398 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /* Retrieve and print the ethernet address. */
401 for (i = 0; i < 6; i++)
402 {
403 mca_write_pos(slot, 6, i+12);
404 mca_write_pos(slot, 7, 0);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400405
Joe Perches0795af52007-10-03 17:59:30 -0700406 dev->dev_addr[i] = mca_read_pos(slot,3);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 }
408
Joe Perches0795af52007-10-03 17:59:30 -0700409 printk("%s: Address %s", dev->name, print_mac(mac, dev->dev_addr));
410
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 mca_write_pos(slot, 6, 0);
412 mca_write_pos(slot, 7, 0);
413
414 POS = mca_read_stored_pos(slot, 4);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if(POS&2)
417 printk(" : BNC port selected.\n");
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400418 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 printk(" : AUI port selected.\n");
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 POS=inb(dev->base_addr+HOST_CTRL);
422 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
423 POS&=~HOST_CTRL_INTE;
424 outb(POS, dev->base_addr+HOST_CTRL);
425 /* Reset adapter */
426 udelay(100);
427 /* Reset off */
428 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
429 outb(POS, dev->base_addr+HOST_CTRL);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 udelay(300);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 /*
434 * Grab the IRQ
435 */
436
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700437 err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 if (err) {
439 release_region(dev->base_addr, MC32_IO_EXTENT);
440 printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
441 goto err_exit_ports;
442 }
443
444 memset(lp, 0, sizeof(struct mc32_local));
445 lp->slot = slot;
446
447 i=0;
448
449 base = inb(dev->base_addr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 while(base == 0xFF)
452 {
453 i++;
454 if(i == 1000)
455 {
456 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400457 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 goto err_exit_irq;
459 }
460 udelay(1000);
461 if(inb(dev->base_addr+2)&(1<<5))
462 base = inb(dev->base_addr);
463 }
464
465 if(base>0)
466 {
467 if(base < 0x0C)
468 printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
469 base<0x0A?" test failure":"");
470 else
471 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400472 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 goto err_exit_irq;
474 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 base=0;
477 for(i=0;i<4;i++)
478 {
479 int n=0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 while(!(inb(dev->base_addr+2)&(1<<5)))
482 {
483 n++;
484 udelay(50);
485 if(n>100)
486 {
487 printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
488 err = -ENODEV;
489 goto err_exit_irq;
490 }
491 }
492
493 base|=(inb(dev->base_addr)<<(8*i));
494 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400497
498 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 lp->base = dev->mem_start+base;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400501
502 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
506
507 /*
508 * Descriptor chains (card relative)
509 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
512 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400513 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
515
516 init_MUTEX_LOCKED(&lp->cmd_mutex);
517 init_completion(&lp->execution_cmd);
518 init_completion(&lp->xceiver_cmd);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
521 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
522
523 dev->open = mc32_open;
524 dev->stop = mc32_close;
525 dev->hard_start_xmit = mc32_send_packet;
526 dev->get_stats = mc32_get_stats;
527 dev->set_multicast_list = mc32_set_multicast_list;
528 dev->tx_timeout = mc32_timeout;
529 dev->watchdog_timeo = HZ*5; /* Board does all the work */
530 dev->ethtool_ops = &netdev_ethtool_ops;
531
532 return 0;
533
534err_exit_irq:
535 free_irq(dev->irq, dev);
536err_exit_ports:
537 release_region(dev->base_addr, MC32_IO_EXTENT);
538 return err;
539}
540
541
542/**
543 * mc32_ready_poll - wait until we can feed it a command
544 * @dev: The device to wait for
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400545 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 * Wait until the card becomes ready to accept a command via the
547 * command register. This tells us nothing about the completion
548 * status of any pending commands and takes very little time at all.
549 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551static inline void mc32_ready_poll(struct net_device *dev)
552{
553 int ioaddr = dev->base_addr;
554 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
555}
556
557
558/**
559 * mc32_command_nowait - send a command non blocking
560 * @dev: The 3c527 to issue the command to
561 * @cmd: The command word to write to the mailbox
562 * @data: A data block if the command expects one
563 * @len: Length of the data block
564 *
565 * Send a command from interrupt state. If there is a command
566 * currently being executed then we return an error of -1. It
567 * simply isn't viable to wait around as commands may be
568 * slow. This can theoretically be starved on SMP, but it's hard
569 * to see a realistic situation. We do not wait for the command
570 * to complete --- we rely on the interrupt handler to tidy up
571 * after us.
572 */
573
574static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
575{
576 struct mc32_local *lp = netdev_priv(dev);
577 int ioaddr = dev->base_addr;
578 int ret = -1;
579
580 if (down_trylock(&lp->cmd_mutex) == 0)
581 {
582 lp->cmd_nonblocking=1;
583 lp->exec_box->mbox=0;
584 lp->exec_box->mbox=cmd;
585 memcpy((void *)lp->exec_box->data, data, len);
586 barrier(); /* the memcpy forgot the volatile so be sure */
587
588 /* Send the command */
589 mc32_ready_poll(dev);
590 outb(1<<6, ioaddr+HOST_CMD);
591
592 ret = 0;
593
594 /* Interrupt handler will signal mutex on completion */
595 }
596
597 return ret;
598}
599
600
601/**
602 * mc32_command - send a command and sleep until completion
603 * @dev: The 3c527 card to issue the command to
604 * @cmd: The command word to write to the mailbox
605 * @data: A data block if the command expects one
606 * @len: Length of the data block
607 *
608 * Sends exec commands in a user context. This permits us to wait around
609 * for the replies and also to wait for the command buffer to complete
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400610 * from a previous command before we execute our command. After our
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 * command completes we will attempt any pending multicast reload
612 * we blocked off by hogging the exec buffer.
613 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400614 * You feed the card a command, you wait, it interrupts you get a
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 * reply. All well and good. The complication arises because you use
616 * commands for filter list changes which come in at bh level from things
617 * like IPV6 group stuff.
618 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
621{
622 struct mc32_local *lp = netdev_priv(dev);
623 int ioaddr = dev->base_addr;
624 int ret = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 down(&lp->cmd_mutex);
627
628 /*
629 * My Turn
630 */
631
632 lp->cmd_nonblocking=0;
633 lp->exec_box->mbox=0;
634 lp->exec_box->mbox=cmd;
635 memcpy((void *)lp->exec_box->data, data, len);
636 barrier(); /* the memcpy forgot the volatile so be sure */
637
638 mc32_ready_poll(dev);
639 outb(1<<6, ioaddr+HOST_CMD);
640
641 wait_for_completion(&lp->execution_cmd);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400642
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 if(lp->exec_box->mbox&(1<<13))
644 ret = -1;
645
646 up(&lp->cmd_mutex);
647
648 /*
649 * A multicast set got blocked - try it now
650 */
651
652 if(lp->mc_reload_wait)
653 {
654 mc32_reset_multicast_list(dev);
655 }
656
657 return ret;
658}
659
660
661/**
662 * mc32_start_transceiver - tell board to restart tx/rx
663 * @dev: The 3c527 card to issue the command to
664 *
665 * This may be called from the interrupt state, where it is used
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400666 * to restart the rx ring if the card runs out of rx buffers.
667 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 * We must first check if it's ok to (re)start the transceiver. See
669 * mc32_close for details.
670 */
671
672static void mc32_start_transceiver(struct net_device *dev) {
673
674 struct mc32_local *lp = netdev_priv(dev);
675 int ioaddr = dev->base_addr;
676
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400677 /* Ignore RX overflow on device closure */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (lp->xceiver_desired_state==HALTED)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400679 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
681 /* Give the card the offset to the post-EOL-bit RX descriptor */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400682 mc32_ready_poll(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 lp->rx_box->mbox=0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400684 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
685 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400687 mc32_ready_poll(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 lp->tx_box->mbox=0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400689 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
690
691 /* We are not interrupted on start completion */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692}
693
694
695/**
696 * mc32_halt_transceiver - tell board to stop tx/rx
697 * @dev: The 3c527 card to issue the command to
698 *
699 * We issue the commands to halt the card's transceiver. In fact,
700 * after some experimenting we now simply tell the card to
701 * suspend. When issuing aborts occasionally odd things happened.
702 *
703 * We then sleep until the card has notified us that both rx and
704 * tx have been suspended.
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400705 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400707static void mc32_halt_transceiver(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct mc32_local *lp = netdev_priv(dev);
710 int ioaddr = dev->base_addr;
711
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400712 mc32_ready_poll(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 lp->rx_box->mbox=0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400714 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 wait_for_completion(&lp->xceiver_cmd);
716
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400717 mc32_ready_poll(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 lp->tx_box->mbox=0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400719 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 wait_for_completion(&lp->xceiver_cmd);
721}
722
723
724/**
725 * mc32_load_rx_ring - load the ring of receive buffers
726 * @dev: 3c527 to build the ring for
727 *
728 * This initalises the on-card and driver datastructures to
729 * the point where mc32_start_transceiver() can be called.
730 *
731 * The card sets up the receive ring for us. We are required to use the
732 * ring it provides, although the size of the ring is configurable.
733 *
734 * We allocate an sk_buff for each ring entry in turn and
735 * initalise its house-keeping info. At the same time, we read
736 * each 'next' pointer in our rx_ring array. This reduces slow
737 * shared-memory reads and makes it easy to access predecessor
738 * descriptors.
739 *
740 * We then set the end-of-list bit for the last entry so that the
741 * card will know when it has run out of buffers.
742 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744static int mc32_load_rx_ring(struct net_device *dev)
745{
746 struct mc32_local *lp = netdev_priv(dev);
747 int i;
748 u16 rx_base;
749 volatile struct skb_header *p;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 rx_base=lp->rx_chain;
752
753 for(i=0; i<RX_RING_LEN; i++) {
754 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
755 if (lp->rx_ring[i].skb==NULL) {
756 for (;i>=0;i--)
757 kfree_skb(lp->rx_ring[i].skb);
758 return -ENOBUFS;
759 }
760 skb_reserve(lp->rx_ring[i].skb, 18);
761
762 p=isa_bus_to_virt(lp->base+rx_base);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 p->control=0;
765 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
766 p->status=0;
767 p->length=1532;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400768
769 lp->rx_ring[i].p=p;
770 rx_base=p->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 }
772
773 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
774
775 lp->rx_ring_tail=0;
776
777 return 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400778}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780
781/**
782 * mc32_flush_rx_ring - free the ring of receive buffers
783 * @lp: Local data of 3c527 to flush the rx ring of
784 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400785 * Free the buffer for each ring slot. This may be called
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 * before mc32_load_rx_ring(), eg. on error in mc32_open().
787 * Requires rx skb pointers to point to a valid skb, or NULL.
788 */
789
790static void mc32_flush_rx_ring(struct net_device *dev)
791{
792 struct mc32_local *lp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400793 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400795 for(i=0; i < RX_RING_LEN; i++)
796 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 if (lp->rx_ring[i].skb) {
798 dev_kfree_skb(lp->rx_ring[i].skb);
799 lp->rx_ring[i].skb = NULL;
800 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400801 lp->rx_ring[i].p=NULL;
802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803}
804
805
806/**
807 * mc32_load_tx_ring - load transmit ring
808 * @dev: The 3c527 card to issue the command to
809 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400810 * This sets up the host transmit data-structures.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 *
812 * First, we obtain from the card it's current postion in the tx
813 * ring, so that we will know where to begin transmitting
814 * packets.
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400815 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * Then, we read the 'next' pointers from the on-card tx ring into
817 * our tx_ring array to reduce slow shared-mem reads. Finally, we
818 * intitalise the tx house keeping variables.
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400819 *
820 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
822static void mc32_load_tx_ring(struct net_device *dev)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400823{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 struct mc32_local *lp = netdev_priv(dev);
825 volatile struct skb_header *p;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400826 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 u16 tx_base;
828
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400829 tx_base=lp->tx_box->data[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
831 for(i=0 ; i<TX_RING_LEN ; i++)
832 {
833 p=isa_bus_to_virt(lp->base+tx_base);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400834 lp->tx_ring[i].p=p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 lp->tx_ring[i].skb=NULL;
836
837 tx_base=p->next;
838 }
839
840 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
841 /* see mc32_tx_ring */
842
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400843 atomic_set(&lp->tx_count, TX_RING_LEN-1);
844 atomic_set(&lp->tx_ring_head, 0);
845 lp->tx_ring_tail=0;
846}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848
849/**
850 * mc32_flush_tx_ring - free transmit ring
851 * @lp: Local data of 3c527 to flush the tx ring of
852 *
853 * If the ring is non-empty, zip over the it, freeing any
854 * allocated skb_buffs. The tx ring house-keeping variables are
855 * then reset. Requires rx skb pointers to point to a valid skb,
856 * or NULL.
857 */
858
859static void mc32_flush_tx_ring(struct net_device *dev)
860{
861 struct mc32_local *lp = netdev_priv(dev);
862 int i;
863
864 for (i=0; i < TX_RING_LEN; i++)
865 {
866 if (lp->tx_ring[i].skb)
867 {
868 dev_kfree_skb(lp->tx_ring[i].skb);
869 lp->tx_ring[i].skb = NULL;
870 }
871 }
872
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400873 atomic_set(&lp->tx_count, 0);
874 atomic_set(&lp->tx_ring_head, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 lp->tx_ring_tail=0;
876}
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400877
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879/**
880 * mc32_open - handle 'up' of card
881 * @dev: device to open
882 *
883 * The user is trying to bring the card into ready state. This requires
884 * a brief dialogue with the card. Firstly we enable interrupts and then
885 * 'indications'. Without these enabled the card doesn't bother telling
886 * us what it has done. This had me puzzled for a week.
887 *
888 * We configure the number of card descriptors, then load the network
889 * address and multicast filters. Turn on the workaround mode. This
890 * works around a bug in the 82586 - it asks the firmware to do
891 * so. It has a performance (latency) hit but is needed on busy
892 * [read most] lans. We load the ring with buffers then we kick it
893 * all off.
894 */
895
896static int mc32_open(struct net_device *dev)
897{
898 int ioaddr = dev->base_addr;
899 struct mc32_local *lp = netdev_priv(dev);
900 u8 one=1;
901 u8 regs;
902 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
903
904 /*
905 * Interrupts enabled
906 */
907
908 regs=inb(ioaddr+HOST_CTRL);
909 regs|=HOST_CTRL_INTE;
910 outb(regs, ioaddr+HOST_CTRL);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400911
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 /*
913 * Allow ourselves to issue commands
914 */
915
916 up(&lp->cmd_mutex);
917
918
919 /*
920 * Send the indications on command
921 */
922
923 mc32_command(dev, 4, &one, 2);
924
925 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400926 * Poke it to make sure it's really dead.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 */
928
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400929 mc32_halt_transceiver(dev);
930 mc32_flush_tx_ring(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400932 /*
933 * Ask card to set up on-card descriptors to our spec
934 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400936 if(mc32_command(dev, 8, descnumbuffs, 4)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 printk("%s: %s rejected our buffer configuration!\n",
938 dev->name, cardname);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400939 mc32_close(dev);
940 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400942
943 /* Report new configuration */
944 mc32_command(dev, 6, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
947 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400948 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 /* Set Network Address */
952 mc32_command(dev, 1, dev->dev_addr, 6);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 /* Set the filters */
955 mc32_set_multicast_list(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400956
957 if (WORKAROUND_82586) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 u16 zero_word=0;
959 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
960 }
961
962 mc32_load_tx_ring(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400963
964 if(mc32_load_rx_ring(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 {
966 mc32_close(dev);
967 return -ENOBUFS;
968 }
969
970 lp->xceiver_desired_state = RUNNING;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 /* And finally, set the ball rolling... */
973 mc32_start_transceiver(dev);
974
975 netif_start_queue(dev);
976
977 return 0;
978}
979
980
981/**
982 * mc32_timeout - handle a timeout from the network layer
983 * @dev: 3c527 that timed out
984 *
985 * Handle a timeout on transmit from the 3c527. This normally means
986 * bad things as the hardware handles cable timeouts and mess for
987 * us.
988 *
989 */
990
991static void mc32_timeout(struct net_device *dev)
992{
993 printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
994 /* Try to restart the adaptor. */
995 netif_wake_queue(dev);
996}
997
998
999/**
1000 * mc32_send_packet - queue a frame for transmit
1001 * @skb: buffer to transmit
1002 * @dev: 3c527 to send it out of
1003 *
1004 * Transmit a buffer. This normally means throwing the buffer onto
1005 * the transmit queue as the queue is quite large. If the queue is
1006 * full then we set tx_busy and return. Once the interrupt handler
1007 * gets messages telling it to reclaim transmit queue entries, we will
1008 * clear tx_busy and the kernel will start calling this again.
1009 *
1010 * We do not disable interrupts or acquire any locks; this can
1011 * run concurrently with mc32_tx_ring(), and the function itself
1012 * is serialised at a higher layer. However, similarly for the
1013 * card itself, we must ensure that we update tx_ring_head only
1014 * after we've established a valid packet on the tx ring (and
1015 * before we let the card "see" it, to prevent it racing with the
1016 * irq handler).
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001017 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 */
1019
1020static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1021{
1022 struct mc32_local *lp = netdev_priv(dev);
1023 u32 head = atomic_read(&lp->tx_ring_head);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 volatile struct skb_header *p, *np;
1026
1027 netif_stop_queue(dev);
1028
1029 if(atomic_read(&lp->tx_count)==0) {
1030 return 1;
1031 }
1032
Herbert Xu5b057c62006-06-23 02:06:41 -07001033 if (skb_padto(skb, ETH_ZLEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 netif_wake_queue(dev);
1035 return 0;
1036 }
1037
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001038 atomic_dec(&lp->tx_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 /* P is the last sending/sent buffer as a pointer */
1041 p=lp->tx_ring[head].p;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 head = next_tx(head);
1044
1045 /* NP is the buffer we will be loading */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001046 np=lp->tx_ring[head].p;
1047
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 /* We will need this to flush the buffer out */
1049 lp->tx_ring[head].skb=skb;
1050
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001051 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 np->data = isa_virt_to_bus(skb->data);
1053 np->status = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001054 np->control = CONTROL_EOP | CONTROL_EOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 wmb();
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 /*
1058 * The new frame has been setup; we can now
1059 * let the interrupt handler and card "see" it
1060 */
1061
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001062 atomic_set(&lp->tx_ring_head, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 p->control &= ~CONTROL_EOL;
1064
1065 netif_wake_queue(dev);
1066 return 0;
1067}
1068
1069
1070/**
1071 * mc32_update_stats - pull off the on board statistics
1072 * @dev: 3c527 to service
1073 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001074 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 * Query and reset the on-card stats. There's the small possibility
1076 * of a race here, which would result in an underestimation of
1077 * actual errors. As such, we'd prefer to keep all our stats
1078 * collection in software. As a rule, we do. However it can't be
1079 * used for rx errors and collisions as, by default, the card discards
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001080 * bad rx packets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 *
1082 * Setting the SAV BP in the rx filter command supposedly
1083 * stops this behaviour. However, testing shows that it only seems to
1084 * enable the collation of on-card rx statistics --- the driver
1085 * never sees an RX descriptor with an error status set.
1086 *
1087 */
1088
1089static void mc32_update_stats(struct net_device *dev)
1090{
1091 struct mc32_local *lp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001092 volatile struct mc32_stats *st = lp->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001094 u32 rx_errors=0;
1095
1096 rx_errors+=lp->net_stats.rx_crc_errors +=st->rx_crc_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 st->rx_crc_errors=0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001098 rx_errors+=lp->net_stats.rx_fifo_errors +=st->rx_overrun_errors;
1099 st->rx_overrun_errors=0;
1100 rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 st->rx_alignment_errors=0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001102 rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 st->rx_tooshort_errors=0;
1104 rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001105 st->rx_outofresource_errors=0;
1106 lp->net_stats.rx_errors=rx_errors;
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 /* Number of packets which saw one collision */
1109 lp->net_stats.collisions+=st->dataC[10];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001110 st->dataC[10]=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001112 /* Number of packets which saw 2--15 collisions */
1113 lp->net_stats.collisions+=st->dataC[11];
1114 st->dataC[11]=0;
1115}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117
1118/**
1119 * mc32_rx_ring - process the receive ring
1120 * @dev: 3c527 that needs its receive ring processing
1121 *
1122 *
1123 * We have received one or more indications from the card that a
1124 * receive has completed. The buffer ring thus contains dirty
1125 * entries. We walk the ring by iterating over the circular rx_ring
1126 * array, starting at the next dirty buffer (which happens to be the
1127 * one we finished up at last time around).
1128 *
1129 * For each completed packet, we will either copy it and pass it up
1130 * the stack or, if the packet is near MTU sized, we allocate
1131 * another buffer and flip the old one up the stack.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001132 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 * We must succeed in keeping a buffer on the ring. If necessary we
1134 * will toss a received packet rather than lose a ring entry. Once
1135 * the first uncompleted descriptor is found, we move the
1136 * End-Of-List bit to include the buffers just processed.
1137 *
1138 */
1139
1140static void mc32_rx_ring(struct net_device *dev)
1141{
1142 struct mc32_local *lp = netdev_priv(dev);
1143 volatile struct skb_header *p;
1144 u16 rx_ring_tail;
1145 u16 rx_old_tail;
1146 int x=0;
1147
1148 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001150 do
1151 {
1152 p=lp->rx_ring[rx_ring_tail].p;
1153
1154 if(!(p->status & (1<<7))) { /* Not COMPLETED */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 if(p->status & (1<<6)) /* COMPLETED_OK */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001158 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
1160 u16 length=p->length;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001161 struct sk_buff *skb;
1162 struct sk_buff *newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 /* Try to save time by avoiding a copy on big frames */
1165
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001166 if ((length > RX_COPYBREAK)
1167 && ((newskb=dev_alloc_skb(1532)) != NULL))
1168 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 skb=lp->rx_ring[rx_ring_tail].skb;
1170 skb_put(skb, length);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001171
1172 skb_reserve(newskb,18);
1173 lp->rx_ring[rx_ring_tail].skb=newskb;
1174 p->data=isa_virt_to_bus(newskb->data);
1175 }
1176 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001178 skb=dev_alloc_skb(length+2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
1180 if(skb==NULL) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001181 lp->net_stats.rx_dropped++;
1182 goto dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 }
1184
1185 skb_reserve(skb,2);
1186 memcpy(skb_put(skb, length),
1187 lp->rx_ring[rx_ring_tail].skb->data, length);
1188 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001189
1190 skb->protocol=eth_type_trans(skb,dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 dev->last_rx = jiffies;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001192 lp->net_stats.rx_packets++;
1193 lp->net_stats.rx_bytes += length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 netif_rx(skb);
1195 }
1196
1197 dropped:
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001198 p->length = 1532;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 p->status = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001200
1201 rx_ring_tail=next_rx(rx_ring_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001203 while(x++<48);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001205 /* If there was actually a frame to be processed, place the EOL bit */
1206 /* at the descriptor prior to the one to be filled next */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001208 if (rx_ring_tail != rx_old_tail)
1209 {
1210 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1211 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001213 lp->rx_ring_tail=rx_ring_tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 }
1215}
1216
1217
1218/**
1219 * mc32_tx_ring - process completed transmits
1220 * @dev: 3c527 that needs its transmit ring processing
1221 *
1222 *
1223 * This operates in a similar fashion to mc32_rx_ring. We iterate
1224 * over the transmit ring. For each descriptor which has been
1225 * processed by the card, we free its associated buffer and note
1226 * any errors. This continues until the transmit ring is emptied
1227 * or we reach a descriptor that hasn't yet been processed by the
1228 * card.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001229 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 */
1231
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001232static void mc32_tx_ring(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
1234 struct mc32_local *lp = netdev_priv(dev);
1235 volatile struct skb_header *np;
1236
1237 /*
1238 * We rely on head==tail to mean 'queue empty'.
1239 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1240 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1241 * condition with 'queue full'
1242 */
1243
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001244 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1245 {
1246 u16 t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001248 t=next_tx(lp->tx_ring_tail);
1249 np=lp->tx_ring[t].p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001251 if(!(np->status & (1<<7)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001253 /* Not COMPLETED */
1254 break;
1255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 lp->net_stats.tx_packets++;
1257 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1258 {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001259 lp->net_stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 switch(np->status&0x0F)
1262 {
1263 case 1:
1264 lp->net_stats.tx_aborted_errors++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001265 break; /* Max collisions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 case 2:
1267 lp->net_stats.tx_fifo_errors++;
1268 break;
1269 case 3:
1270 lp->net_stats.tx_carrier_errors++;
1271 break;
1272 case 4:
1273 lp->net_stats.tx_window_errors++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001274 break; /* CTS Lost */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 case 5:
1276 lp->net_stats.tx_aborted_errors++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001277 break; /* Transmit timeout */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 }
1279 }
1280 /* Packets are sent in order - this is
1281 basically a FIFO queue of buffers matching
1282 the card ring */
1283 lp->net_stats.tx_bytes+=lp->tx_ring[t].skb->len;
1284 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1285 lp->tx_ring[t].skb=NULL;
1286 atomic_inc(&lp->tx_count);
1287 netif_wake_queue(dev);
1288
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001289 lp->tx_ring_tail=t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
1291
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001292}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294
1295/**
1296 * mc32_interrupt - handle an interrupt from a 3c527
1297 * @irq: Interrupt number
1298 * @dev_id: 3c527 that requires servicing
1299 * @regs: Registers (unused)
1300 *
1301 *
1302 * An interrupt is raised whenever the 3c527 writes to the command
1303 * register. This register contains the message it wishes to send us
1304 * packed into a single byte field. We keep reading status entries
1305 * until we have processed all the control items, but simply count
1306 * transmit and receive reports. When all reports are in we empty the
1307 * transceiver rings as appropriate. This saves the overhead of
1308 * multiple command requests.
1309 *
1310 * Because MCA is level-triggered, we shouldn't miss indications.
1311 * Therefore, we needn't ask the card to suspend interrupts within
1312 * this handler. The card receives an implicit acknowledgment of the
1313 * current interrupt when we read the command register.
1314 *
1315 */
1316
David Howells7d12e782006-10-05 14:55:46 +01001317static irqreturn_t mc32_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318{
1319 struct net_device *dev = dev_id;
1320 struct mc32_local *lp;
1321 int ioaddr, status, boguscount = 0;
1322 int rx_event = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001323 int tx_event = 0;
1324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 ioaddr = dev->base_addr;
1326 lp = netdev_priv(dev);
1327
1328 /* See whats cooking */
1329
1330 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1331 {
1332 status=inb(ioaddr+HOST_CMD);
1333
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001334#ifdef DEBUG_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1336 (status&7), (status>>3)&7, (status>>6)&1,
1337 (status>>7)&1, boguscount);
1338#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 switch(status&7)
1341 {
1342 case 0:
1343 break;
1344 case 6: /* TX fail */
1345 case 2: /* TX ok */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001346 tx_event = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 break;
1348 case 3: /* Halt */
1349 case 4: /* Abort */
1350 complete(&lp->xceiver_cmd);
1351 break;
1352 default:
1353 printk("%s: strange tx ack %d\n", dev->name, status&7);
1354 }
1355 status>>=3;
1356 switch(status&7)
1357 {
1358 case 0:
1359 break;
1360 case 2: /* RX */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001361 rx_event=1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 break;
1363 case 3: /* Halt */
1364 case 4: /* Abort */
1365 complete(&lp->xceiver_cmd);
1366 break;
1367 case 6:
1368 /* Out of RX buffers stat */
1369 /* Must restart rx */
1370 lp->net_stats.rx_dropped++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001371 mc32_rx_ring(dev);
1372 mc32_start_transceiver(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 break;
1374 default:
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001375 printk("%s: strange rx ack %d\n",
1376 dev->name, status&7);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 }
1378 status>>=3;
1379 if(status&1)
1380 {
1381 /*
1382 * No thread is waiting: we need to tidy
1383 * up ourself.
1384 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 if (lp->cmd_nonblocking) {
1387 up(&lp->cmd_mutex);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001388 if (lp->mc_reload_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 mc32_reset_multicast_list(dev);
1390 }
1391 else complete(&lp->execution_cmd);
1392 }
1393 if(status&2)
1394 {
1395 /*
1396 * We get interrupted once per
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001397 * counter that is about to overflow.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 */
1399
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001400 mc32_update_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 }
1402 }
1403
1404
1405 /*
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001406 * Process the transmit and receive rings
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 */
1408
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001409 if(tx_event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 mc32_tx_ring(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001411
1412 if(rx_event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 mc32_rx_ring(dev);
1414
1415 return IRQ_HANDLED;
1416}
1417
1418
1419/**
1420 * mc32_close - user configuring the 3c527 down
1421 * @dev: 3c527 card to shut down
1422 *
1423 * The 3c527 is a bus mastering device. We must be careful how we
1424 * shut it down. It may also be running shared interrupt so we have
1425 * to be sure to silence it properly
1426 *
1427 * We indicate that the card is closing to the rest of the
1428 * driver. Otherwise, it is possible that the card may run out
1429 * of receive buffers and restart the transceiver while we're
1430 * trying to close it.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001431 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 * We abort any receive and transmits going on and then wait until
1433 * any pending exec commands have completed in other code threads.
1434 * In theory we can't get here while that is true, in practice I am
1435 * paranoid
1436 *
1437 * We turn off the interrupt enable for the board to be sure it can't
1438 * intefere with other devices.
1439 */
1440
1441static int mc32_close(struct net_device *dev)
1442{
1443 struct mc32_local *lp = netdev_priv(dev);
1444 int ioaddr = dev->base_addr;
1445
1446 u8 regs;
1447 u16 one=1;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 lp->xceiver_desired_state = HALTED;
1450 netif_stop_queue(dev);
1451
1452 /*
1453 * Send the indications on command (handy debug check)
1454 */
1455
1456 mc32_command(dev, 4, &one, 2);
1457
1458 /* Shut down the transceiver */
1459
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001460 mc32_halt_transceiver(dev);
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 /* Ensure we issue no more commands beyond this point */
1463
1464 down(&lp->cmd_mutex);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001465
1466 /* Ok the card is now stopping */
1467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 regs=inb(ioaddr+HOST_CTRL);
1469 regs&=~HOST_CTRL_INTE;
1470 outb(regs, ioaddr+HOST_CTRL);
1471
1472 mc32_flush_rx_ring(dev);
1473 mc32_flush_tx_ring(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001474
1475 mc32_update_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
1477 return 0;
1478}
1479
1480
1481/**
1482 * mc32_get_stats - hand back stats to network layer
1483 * @dev: The 3c527 card to handle
1484 *
1485 * We've collected all the stats we can in software already. Now
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001486 * it's time to update those kept on-card and return the lot.
1487 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 */
1489
1490static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1491{
1492 struct mc32_local *lp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001493
1494 mc32_update_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 return &lp->net_stats;
1496}
1497
1498
1499/**
1500 * do_mc32_set_multicast_list - attempt to update multicasts
1501 * @dev: 3c527 device to load the list on
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001502 * @retry: indicates this is not the first call.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 *
1504 *
1505 * Actually set or clear the multicast filter for this adaptor. The
1506 * locking issues are handled by this routine. We have to track
1507 * state as it may take multiple calls to get the command sequence
1508 * completed. We just keep trying to schedule the loads until we
1509 * manage to process them all.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001511 * num_addrs == -1 Promiscuous mode, receive all packets
1512 *
1513 * num_addrs == 0 Normal mode, clear multicast list
1514 *
1515 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1516 * and do best-effort filtering.
1517 *
1518 * See mc32_update_stats() regards setting the SAV BP bit.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 *
1520 */
1521
1522static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1523{
1524 struct mc32_local *lp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001525 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
1527 if (dev->flags&IFF_PROMISC)
1528 /* Enable promiscuous mode */
1529 filt |= 1;
1530 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
1531 {
1532 dev->flags|=IFF_PROMISC;
1533 filt |= 1;
1534 }
1535 else if(dev->mc_count)
1536 {
1537 unsigned char block[62];
1538 unsigned char *bp;
1539 struct dev_mc_list *dmc=dev->mc_list;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001540
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 if(retry==0)
1544 lp->mc_list_valid = 0;
1545 if(!lp->mc_list_valid)
1546 {
1547 block[1]=0;
1548 block[0]=dev->mc_count;
1549 bp=block+2;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 for(i=0;i<dev->mc_count;i++)
1552 {
1553 memcpy(bp, dmc->dmi_addr, 6);
1554 bp+=6;
1555 dmc=dmc->next;
1556 }
1557 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1558 {
1559 lp->mc_reload_wait = 1;
1560 return;
1561 }
1562 lp->mc_list_valid=1;
1563 }
1564 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001565
1566 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 {
1568 lp->mc_reload_wait = 1;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001569 }
1570 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 lp->mc_reload_wait = 0;
1572 }
1573}
1574
1575
1576/**
1577 * mc32_set_multicast_list - queue multicast list update
1578 * @dev: The 3c527 to use
1579 *
1580 * Commence loading the multicast list. This is called when the kernel
1581 * changes the lists. It will override any pending list we are trying to
1582 * load.
1583 */
1584
1585static void mc32_set_multicast_list(struct net_device *dev)
1586{
1587 do_mc32_set_multicast_list(dev,0);
1588}
1589
1590
1591/**
1592 * mc32_reset_multicast_list - reset multicast list
1593 * @dev: The 3c527 to use
1594 *
1595 * Attempt the next step in loading the multicast lists. If this attempt
1596 * fails to complete then it will be scheduled and this function called
1597 * again later from elsewhere.
1598 */
1599
1600static void mc32_reset_multicast_list(struct net_device *dev)
1601{
1602 do_mc32_set_multicast_list(dev,1);
1603}
1604
1605static void netdev_get_drvinfo(struct net_device *dev,
1606 struct ethtool_drvinfo *info)
1607{
1608 strcpy(info->driver, DRV_NAME);
1609 strcpy(info->version, DRV_VERSION);
1610 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1611}
1612
1613static u32 netdev_get_msglevel(struct net_device *dev)
1614{
1615 return mc32_debug;
1616}
1617
1618static void netdev_set_msglevel(struct net_device *dev, u32 level)
1619{
1620 mc32_debug = level;
1621}
1622
Jeff Garzik7282d492006-09-13 14:30:00 -04001623static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 .get_drvinfo = netdev_get_drvinfo,
1625 .get_msglevel = netdev_get_msglevel,
1626 .set_msglevel = netdev_set_msglevel,
1627};
1628
1629#ifdef MODULE
1630
1631static struct net_device *this_device;
1632
1633/**
1634 * init_module - entry point
1635 *
1636 * Probe and locate a 3c527 card. This really should probe and locate
1637 * all the 3c527 cards in the machine not just one of them. Yes you can
1638 * insmod multiple modules for now but it's a hack.
1639 */
1640
Randy Dunlap96e672c2006-06-10 13:33:48 -07001641int __init init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642{
1643 this_device = mc32_probe(-1);
1644 if (IS_ERR(this_device))
1645 return PTR_ERR(this_device);
1646 return 0;
1647}
1648
1649/**
1650 * cleanup_module - free resources for an unload
1651 *
1652 * Unloading time. We release the MCA bus resources and the interrupt
1653 * at which point everything is ready to unload. The card must be stopped
1654 * at this point or we would not have been called. When we unload we
1655 * leave the card stopped but not totally shut down. When the card is
1656 * initialized it must be rebooted or the rings reloaded before any
1657 * transmit operations are allowed to start scribbling into memory.
1658 */
1659
Al Viroafc8eb42006-06-14 18:50:53 -04001660void __exit cleanup_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661{
1662 unregister_netdev(this_device);
1663 cleanup_card(this_device);
1664 free_netdev(this_device);
1665}
1666
1667#endif /* MODULE */