blob: 6b49fc4bd1a1c9b475c779f512209d174bbefbce [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
3
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
7
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008 3 primary sources of the mess --
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 1) hppa needs *lots* of cacheline flushing to keep this kind of
10 MMIO running.
11
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
14
Jeff Garzik6aa20a22006-09-13 13:24:59 -040015 3) The implementation HP is using seems to be significantly pickier
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 about when and how the command and RX units are started. some
17 command ordering was changed.
18
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
22
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
Jeff Garzik6aa20a22006-09-13 13:24:59 -040024
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
27*/
28
29/* 82596.c: A generic 82596 ethernet driver for linux. */
30/*
31 Based on Apricot.c
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
34
35 Modularised 12/94 Mark Evans
36
37
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
40 Renamed to be 82596.c
41
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
46
47 TBD:
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
51
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
57 i596.
58
59 Driver skeleton
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
65
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69 */
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/ptrace.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/slab.h>
78#include <linux/interrupt.h>
79#include <linux/delay.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/types.h>
85#include <linux/bitops.h>
86
87#include <asm/io.h>
88#include <asm/pgtable.h>
89#include <asm/irq.h>
90#include <asm/pdc.h>
91#include <asm/cache.h>
92#include <asm/parisc-device.h>
93
94#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
95
96/* DEBUG flags
97 */
98
99#define DEB_INIT 0x0001
100#define DEB_PROBE 0x0002
101#define DEB_SERIOUS 0x0004
102#define DEB_ERRORS 0x0008
103#define DEB_MULTI 0x0010
104#define DEB_TDR 0x0020
105#define DEB_OPEN 0x0040
106#define DEB_RESET 0x0080
107#define DEB_ADDCMD 0x0100
108#define DEB_STATUS 0x0200
109#define DEB_STARTTX 0x0400
110#define DEB_RXADDR 0x0800
111#define DEB_TXADDR 0x1000
112#define DEB_RXFRAME 0x2000
113#define DEB_INTS 0x4000
114#define DEB_STRUCT 0x8000
115#define DEB_ANY 0xffff
116
117
118#define DEB(x,y) if (i596_debug & (x)) { y; }
119
120
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800121#define CHECK_WBACK(priv, addr,len) \
122 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800124#define CHECK_INV(priv, addr,len) \
125 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800127#define CHECK_WBACK_INV(priv, addr,len) \
128 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130
131#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
132#define PA_CPU_PORT_L_ACCESS 4
133#define PA_CHANNEL_ATTENTION 8
134
135
136/*
137 * Define various macros for Channel Attention, word swapping etc., dependent
138 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
139 */
140
141#ifdef __BIG_ENDIAN
142#define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
143#define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
144#define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
145#define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
146#define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
147#define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
148#define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
149#define ISCP_BUSY 0x00010000
150#define MACH_IS_APRICOT 0
151#else
152#define WSWAPrfd(x) ((struct i596_rfd *)(x))
153#define WSWAPrbd(x) ((struct i596_rbd *)(x))
154#define WSWAPiscp(x) ((struct i596_iscp *)(x))
155#define WSWAPscb(x) ((struct i596_scb *)(x))
156#define WSWAPcmd(x) ((struct i596_cmd *)(x))
157#define WSWAPtbd(x) ((struct i596_tbd *)(x))
158#define WSWAPchar(x) ((char *)(x))
159#define ISCP_BUSY 0x0001
160#define MACH_IS_APRICOT 1
161#endif
162
163/*
164 * The MPU_PORT command allows direct access to the 82596. With PORT access
165 * the following commands are available (p5-18). The 32-bit port command
166 * must be word-swapped with the most significant word written first.
167 * This only applies to VME boards.
168 */
169#define PORT_RESET 0x00 /* reset 82596 */
170#define PORT_SELFTEST 0x01 /* selftest */
171#define PORT_ALTSCP 0x02 /* alternate SCB address */
172#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
173
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400174static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176MODULE_AUTHOR("Richard Hirst");
177MODULE_DESCRIPTION("i82596 driver");
178MODULE_LICENSE("GPL");
Rusty Russell8d3b33f2006-03-25 03:07:05 -0800179module_param(i596_debug, int, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
181
182/* Copy frames shorter than rx_copybreak, otherwise pass on up in
183 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
184 */
185static int rx_copybreak = 100;
186
187#define MAX_DRIVERS 4 /* max count of drivers */
188
189#define PKT_BUF_SZ 1536
190#define MAX_MC_CNT 64
191
192#define I596_NULL ((u32)0xffffffff)
193
194#define CMD_EOL 0x8000 /* The last command of the list, stop. */
195#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
196#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
197
198#define CMD_FLEX 0x0008 /* Enable flexible memory model */
199
200enum commands {
201 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
202 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
203};
204
205#define STAT_C 0x8000 /* Set to 0 after execution */
206#define STAT_B 0x4000 /* Command being executed */
207#define STAT_OK 0x2000 /* Command executed ok */
208#define STAT_A 0x1000 /* Command aborted */
209
210#define CUC_START 0x0100
211#define CUC_RESUME 0x0200
212#define CUC_SUSPEND 0x0300
213#define CUC_ABORT 0x0400
214#define RX_START 0x0010
215#define RX_RESUME 0x0020
216#define RX_SUSPEND 0x0030
217#define RX_ABORT 0x0040
218
219#define TX_TIMEOUT 5
220
221#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
222
223
224struct i596_reg {
225 unsigned short porthi;
226 unsigned short portlo;
227 u32 ca;
228};
229
230#define EOF 0x8000
231#define SIZE_MASK 0x3fff
232
233struct i596_tbd {
234 unsigned short size;
235 unsigned short pad;
236 dma_addr_t next;
237 dma_addr_t data;
238 u32 cache_pad[5]; /* Total 32 bytes... */
239};
240
241/* The command structure has two 'next' pointers; v_next is the address of
242 * the next command as seen by the CPU, b_next is the address of the next
243 * command as seen by the 82596. The b_next pointer, as used by the 82596
244 * always references the status field of the next command, rather than the
245 * v_next field, because the 82596 is unaware of v_next. It may seem more
246 * logical to put v_next at the end of the structure, but we cannot do that
247 * because the 82596 expects other fields to be there, depending on command
248 * type.
249 */
250
251struct i596_cmd {
252 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
253 unsigned short status;
254 unsigned short command;
255 dma_addr_t b_next; /* Address from i596 viewpoint */
256};
257
258struct tx_cmd {
259 struct i596_cmd cmd;
260 dma_addr_t tbd;
261 unsigned short size;
262 unsigned short pad;
263 struct sk_buff *skb; /* So we can free it after tx */
264 dma_addr_t dma_addr;
265#ifdef __LP64__
266 u32 cache_pad[6]; /* Total 64 bytes... */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400267#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 u32 cache_pad[1]; /* Total 32 bytes... */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400269#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270};
271
272struct tdr_cmd {
273 struct i596_cmd cmd;
274 unsigned short status;
275 unsigned short pad;
276};
277
278struct mc_cmd {
279 struct i596_cmd cmd;
280 short mc_cnt;
281 char mc_addrs[MAX_MC_CNT*6];
282};
283
284struct sa_cmd {
285 struct i596_cmd cmd;
286 char eth_addr[8];
287};
288
289struct cf_cmd {
290 struct i596_cmd cmd;
291 char i596_config[16];
292};
293
294struct i596_rfd {
295 unsigned short stat;
296 unsigned short cmd;
297 dma_addr_t b_next; /* Address from i596 viewpoint */
298 dma_addr_t rbd;
299 unsigned short count;
300 unsigned short size;
301 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
302 struct i596_rfd *v_prev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400303#ifndef __LP64__
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 u32 cache_pad[2]; /* Total 32 bytes... */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400305#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306};
307
308struct i596_rbd {
309 /* hardware data */
310 unsigned short count;
311 unsigned short zero1;
312 dma_addr_t b_next;
313 dma_addr_t b_data; /* Address from i596 viewpoint */
314 unsigned short size;
315 unsigned short zero2;
316 /* driver data */
317 struct sk_buff *skb;
318 struct i596_rbd *v_next;
319 dma_addr_t b_addr; /* This rbd addr from i596 view */
320 unsigned char *v_data; /* Address from CPUs viewpoint */
321 /* Total 32 bytes... */
322#ifdef __LP64__
323 u32 cache_pad[4];
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400324#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325};
326
327/* These values as chosen so struct i596_private fits in one page... */
328
329#define TX_RING_SIZE 32
330#define RX_RING_SIZE 16
331
332struct i596_scb {
333 unsigned short status;
334 unsigned short command;
335 dma_addr_t cmd;
336 dma_addr_t rfd;
337 u32 crc_err;
338 u32 align_err;
339 u32 resource_err;
340 u32 over_err;
341 u32 rcvdt_err;
342 u32 short_err;
343 unsigned short t_on;
344 unsigned short t_off;
345};
346
347struct i596_iscp {
348 u32 stat;
349 dma_addr_t scb;
350};
351
352struct i596_scp {
353 u32 sysbus;
354 u32 pad;
355 dma_addr_t iscp;
356};
357
358struct i596_private {
359 volatile struct i596_scp scp __attribute__((aligned(32)));
360 volatile struct i596_iscp iscp __attribute__((aligned(32)));
361 volatile struct i596_scb scb __attribute__((aligned(32)));
362 struct sa_cmd sa_cmd __attribute__((aligned(32)));
363 struct cf_cmd cf_cmd __attribute__((aligned(32)));
364 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
365 struct mc_cmd mc_cmd __attribute__((aligned(32)));
366 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
367 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
368 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
369 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
370 u32 stat;
371 int last_restart;
372 struct i596_rfd *rfd_head;
373 struct i596_rbd *rbd_head;
374 struct i596_cmd *cmd_tail;
375 struct i596_cmd *cmd_head;
376 int cmd_backlog;
377 u32 last_cmd;
378 struct net_device_stats stats;
379 int next_tx_cmd;
380 int options;
381 spinlock_t lock;
382 dma_addr_t dma_addr;
383 struct device *dev;
384};
385
Helge Deller93ea7742006-10-14 22:11:32 +0200386static const char init_setup[] =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 0x8E, /* length, prefetch on */
389 0xC8, /* fifo to 8, monitor off */
390 0x80, /* don't save bad frames */
391 0x2E, /* No source address insertion, 8 byte preamble */
392 0x00, /* priority and backoff defaults */
393 0x60, /* interframe spacing */
394 0x00, /* slot time LSB */
395 0xf2, /* slot time and retries */
396 0x00, /* promiscuous mode */
397 0x00, /* collision detect */
398 0x40, /* minimum frame length */
399 0xff,
400 0x00,
401 0x7f /* *multi IA */ };
402
403static int i596_open(struct net_device *dev);
404static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100405static irqreturn_t i596_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406static int i596_close(struct net_device *dev);
407static struct net_device_stats *i596_get_stats(struct net_device *dev);
408static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
409static void i596_tx_timeout (struct net_device *dev);
410static void print_eth(unsigned char *buf, char *str);
411static void set_multicast_list(struct net_device *dev);
412
413static int rx_ring_size = RX_RING_SIZE;
414static int ticks_limit = 100;
415static int max_cmd_backlog = TX_RING_SIZE-1;
416
Sven Schnellec27090202005-10-21 22:55:15 -0400417#ifdef CONFIG_NET_POLL_CONTROLLER
418static void i596_poll_controller(struct net_device *dev);
419#endif
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422static inline void CA(struct net_device *dev)
423{
424 gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
425}
426
427
428static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
429{
430 struct i596_private *lp = dev->priv;
431
432 u32 v = (u32) (c) | (u32) (x);
433 u16 a, b;
434
435 if (lp->options & OPT_SWAP_PORT) {
436 a = v >> 16;
437 b = v & 0xffff;
438 } else {
439 a = v & 0xffff;
440 b = v >> 16;
441 }
442
443 gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
444 udelay(1);
445 gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
446}
447
448
449static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
450{
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800451 CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 while (--delcnt && lp->iscp.stat) {
453 udelay(10);
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800454 CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 }
456 if (!delcnt) {
457 printk("%s: %s, iscp.stat %04x, didn't clear\n",
458 dev->name, str, lp->iscp.stat);
459 return -1;
460 }
461 else
462 return 0;
463}
464
465
466static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
467{
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800468 CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 while (--delcnt && lp->scb.command) {
470 udelay(10);
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800471 CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
473 if (!delcnt) {
474 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
475 dev->name, str, lp->scb.status, lp->scb.command);
476 return -1;
477 }
478 else
479 return 0;
480}
481
482
483static void i596_display_data(struct net_device *dev)
484{
485 struct i596_private *lp = dev->priv;
486 struct i596_cmd *cmd;
487 struct i596_rfd *rfd;
488 struct i596_rbd *rbd;
489
490 printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
491 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
492 printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
493 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
494 printk("scb at %p, scb.status = %04x, .command = %04x,"
495 " .cmd = %08x, .rfd = %08x\n",
496 &lp->scb, lp->scb.status, lp->scb.command,
497 lp->scb.cmd, lp->scb.rfd);
498 printk(" errors: crc %x, align %x, resource %x,"
499 " over %x, rcvdt %x, short %x\n",
500 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
501 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
502 cmd = lp->cmd_head;
503 while (cmd != NULL) {
504 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
505 cmd, cmd->status, cmd->command, cmd->b_next);
506 cmd = cmd->v_next;
507 }
508 rfd = lp->rfd_head;
509 printk("rfd_head = %p\n", rfd);
510 do {
511 printk(" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
512 " count %04x\n",
513 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
514 rfd->count);
515 rfd = rfd->v_next;
516 } while (rfd != lp->rfd_head);
517 rbd = lp->rbd_head;
518 printk("rbd_head = %p\n", rbd);
519 do {
520 printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
521 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
522 rbd = rbd->v_next;
523 } while (rbd != lp->rbd_head);
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800524 CHECK_INV(lp, lp, sizeof(struct i596_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525}
526
527
528#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
David Howells7d12e782006-10-05 14:55:46 +0100529static void i596_error(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 struct net_device *dev = dev_id;
532 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
533
534 pcc2[0x28] = 1;
535 pcc2[0x2b] = 0x1d;
536 printk("%s: Error interrupt\n", dev->name);
537 i596_display_data(dev);
538}
539#endif
540
541#define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
542
543static inline void init_rx_bufs(struct net_device *dev)
544{
545 struct i596_private *lp = dev->priv;
546 int i;
547 struct i596_rfd *rfd;
548 struct i596_rbd *rbd;
549
550 /* First build the Receive Buffer Descriptor List */
551
552 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
553 dma_addr_t dma_addr;
554 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
555
556 if (skb == NULL)
557 panic("%s: alloc_skb() failed", __FILE__);
558 skb_reserve(skb, 2);
David S. Miller689be432005-06-28 15:25:31 -0700559 dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 DMA_FROM_DEVICE);
561 skb->dev = dev;
562 rbd->v_next = rbd+1;
563 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
564 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
565 rbd->skb = skb;
David S. Miller689be432005-06-28 15:25:31 -0700566 rbd->v_data = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 rbd->b_data = WSWAPchar(dma_addr);
568 rbd->size = PKT_BUF_SZ;
569 }
570 lp->rbd_head = lp->rbds;
571 rbd = lp->rbds + rx_ring_size - 1;
572 rbd->v_next = lp->rbds;
573 rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
574
575 /* Now build the Receive Frame Descriptor List */
576
577 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
578 rfd->rbd = I596_NULL;
579 rfd->v_next = rfd+1;
580 rfd->v_prev = rfd-1;
581 rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
582 rfd->cmd = CMD_FLEX;
583 }
584 lp->rfd_head = lp->rfds;
585 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
586 rfd = lp->rfds;
587 rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
588 rfd->v_prev = lp->rfds + rx_ring_size - 1;
589 rfd = lp->rfds + rx_ring_size - 1;
590 rfd->v_next = lp->rfds;
591 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
592 rfd->cmd = CMD_EOL|CMD_FLEX;
593
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800594 CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595}
596
597static inline void remove_rx_bufs(struct net_device *dev)
598{
599 struct i596_private *lp = dev->priv;
600 struct i596_rbd *rbd;
601 int i;
602
603 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
604 if (rbd->skb == NULL)
605 break;
606 dma_unmap_single(lp->dev,
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400607 (dma_addr_t)WSWAPchar(rbd->b_data),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 PKT_BUF_SZ, DMA_FROM_DEVICE);
609 dev_kfree_skb(rbd->skb);
610 }
611}
612
613
614static void rebuild_rx_bufs(struct net_device *dev)
615{
616 struct i596_private *lp = dev->priv;
617 int i;
618
619 /* Ensure rx frame/buffer descriptors are tidy */
620
621 for (i = 0; i < rx_ring_size; i++) {
622 lp->rfds[i].rbd = I596_NULL;
623 lp->rfds[i].cmd = CMD_FLEX;
624 }
625 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
626 lp->rfd_head = lp->rfds;
627 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
628 lp->rbd_head = lp->rbds;
629 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
630
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800631 CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632}
633
634
635static int init_i596_mem(struct net_device *dev)
636{
637 struct i596_private *lp = dev->priv;
638 unsigned long flags;
639
640 disable_irq(dev->irq); /* disable IRQs from LAN */
641 DEB(DEB_INIT,
Sven Schnellec27090202005-10-21 22:55:15 -0400642 printk("RESET 82596 port: %lx (with IRQ %d disabled)\n",
643 (dev->base_addr + PA_I82596_RESET),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 dev->irq));
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645
Sven Schnellec27090202005-10-21 22:55:15 -0400646 gsc_writel(0, (dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 udelay(100); /* Wait 100us - seems to help */
648
649 /* change the scp address */
650
651 lp->last_cmd = jiffies;
652
653
654 lp->scp.sysbus = 0x0000006c;
655 lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
656 lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
657 lp->iscp.stat = ISCP_BUSY;
658 lp->cmd_backlog = 0;
659
660 lp->cmd_head = NULL;
661 lp->scb.cmd = I596_NULL;
662
663 DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
664
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800665 CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
666 CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400668 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670 CA(dev);
671
672 if (wait_istat(dev, lp, 1000, "initialization timed out"))
673 goto failed;
674 DEB(DEB_INIT, printk("%s: i82596 initialization successful\n", dev->name));
675
676 /* Ensure rx frame/buffer descriptors are tidy */
677 rebuild_rx_bufs(dev);
678
679 lp->scb.command = 0;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800680 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682 enable_irq(dev->irq); /* enable IRQs from LAN */
683
684 DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
Helge Deller93ea7742006-10-14 22:11:32 +0200685 memcpy(lp->cf_cmd.i596_config, init_setup, sizeof(init_setup));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 lp->cf_cmd.cmd.command = CmdConfigure;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800687 CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 i596_add_cmd(dev, &lp->cf_cmd.cmd);
689
690 DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
691 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
692 lp->sa_cmd.cmd.command = CmdSASetup;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800693 CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 i596_add_cmd(dev, &lp->sa_cmd.cmd);
695
696 DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
697 lp->tdr_cmd.cmd.command = CmdTDR;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800698 CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
700
701 spin_lock_irqsave (&lp->lock, flags);
702
703 if (wait_cmd(dev, lp, 1000, "timed out waiting to issue RX_START")) {
704 spin_unlock_irqrestore (&lp->lock, flags);
705 goto failed;
706 }
707 DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
708 lp->scb.command = RX_START;
709 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800710 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 CA(dev);
713
714 spin_unlock_irqrestore (&lp->lock, flags);
715
716 if (wait_cmd(dev, lp, 1000, "RX_START not processed"))
717 goto failed;
718 DEB(DEB_INIT, printk("%s: Receive unit started OK\n", dev->name));
719
720 return 0;
721
722failed:
723 printk("%s: Failed to initialise 82596\n", dev->name);
724 MPU_PORT(dev, PORT_RESET, 0);
725 return -1;
726}
727
728
729static inline int i596_rx(struct net_device *dev)
730{
731 struct i596_private *lp = dev->priv;
732 struct i596_rfd *rfd;
733 struct i596_rbd *rbd;
734 int frames = 0;
735
736 DEB(DEB_RXFRAME, printk("i596_rx(), rfd_head %p, rbd_head %p\n",
737 lp->rfd_head, lp->rbd_head));
738
739
740 rfd = lp->rfd_head; /* Ref next frame to check */
741
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800742 CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
744 if (rfd->rbd == I596_NULL)
745 rbd = NULL;
746 else if (rfd->rbd == lp->rbd_head->b_addr) {
747 rbd = lp->rbd_head;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800748 CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 }
750 else {
751 printk("%s: rbd chain broken!\n", dev->name);
752 /* XXX Now what? */
753 rbd = NULL;
754 }
755 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
756 rfd, rfd->rbd, rfd->stat));
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
759 /* a good frame */
760 int pkt_len = rbd->count & 0x3fff;
761 struct sk_buff *skb = rbd->skb;
762 int rx_in_place = 0;
763
764 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
765 frames++;
766
767 /* Check if the packet is long enough to just accept
768 * without copying to a properly sized skbuff.
769 */
770
771 if (pkt_len > rx_copybreak) {
772 struct sk_buff *newskb;
773 dma_addr_t dma_addr;
774
775 dma_unmap_single(lp->dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
776 /* Get fresh skbuff to replace filled one. */
777 newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
778 if (newskb == NULL) {
779 skb = NULL; /* drop pkt */
780 goto memory_squeeze;
781 }
782 skb_reserve(newskb, 2);
783
784 /* Pass up the skb already on the Rx ring. */
785 skb_put(skb, pkt_len);
786 rx_in_place = 1;
787 rbd->skb = newskb;
788 newskb->dev = dev;
David S. Miller689be432005-06-28 15:25:31 -0700789 dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
790 rbd->v_data = newskb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 rbd->b_data = WSWAPchar(dma_addr);
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800792 CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 }
794 else
795 skb = dev_alloc_skb(pkt_len + 2);
796memory_squeeze:
797 if (skb == NULL) {
798 /* XXX tulip.c can defer packets here!! */
799 printk("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
800 lp->stats.rx_dropped++;
801 }
802 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 if (!rx_in_place) {
804 /* 16 byte align the data fields */
805 dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
806 skb_reserve(skb, 2);
807 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
808 dma_sync_single_for_device(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
809 }
810 skb->len = pkt_len;
811 skb->protocol=eth_type_trans(skb,dev);
812 netif_rx(skb);
813 dev->last_rx = jiffies;
814 lp->stats.rx_packets++;
815 lp->stats.rx_bytes+=pkt_len;
816 }
817 }
818 else {
819 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
820 dev->name, rfd->stat));
821 lp->stats.rx_errors++;
822 if ((rfd->stat) & 0x0001)
823 lp->stats.collisions++;
824 if ((rfd->stat) & 0x0080)
825 lp->stats.rx_length_errors++;
826 if ((rfd->stat) & 0x0100)
827 lp->stats.rx_over_errors++;
828 if ((rfd->stat) & 0x0200)
829 lp->stats.rx_fifo_errors++;
830 if ((rfd->stat) & 0x0400)
831 lp->stats.rx_frame_errors++;
832 if ((rfd->stat) & 0x0800)
833 lp->stats.rx_crc_errors++;
834 if ((rfd->stat) & 0x1000)
835 lp->stats.rx_length_errors++;
836 }
837
838 /* Clear the buffer descriptor count and EOF + F flags */
839
840 if (rbd != NULL && (rbd->count & 0x4000)) {
841 rbd->count = 0;
842 lp->rbd_head = rbd->v_next;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800843 CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
845
846 /* Tidy the frame descriptor, marking it as end of list */
847
848 rfd->rbd = I596_NULL;
849 rfd->stat = 0;
850 rfd->cmd = CMD_EOL|CMD_FLEX;
851 rfd->count = 0;
852
853 /* Remove end-of-list from old end descriptor */
854
855 rfd->v_prev->cmd = CMD_FLEX;
856
857 /* Update record of next frame descriptor to process */
858
859 lp->scb.rfd = rfd->b_next;
860 lp->rfd_head = rfd->v_next;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800861 CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
862 CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 rfd = lp->rfd_head;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800864 CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 }
866
867 DEB(DEB_RXFRAME, printk("frames %d\n", frames));
868
869 return 0;
870}
871
872
873static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
874{
875 struct i596_cmd *ptr;
876
877 while (lp->cmd_head != NULL) {
878 ptr = lp->cmd_head;
879 lp->cmd_head = ptr->v_next;
880 lp->cmd_backlog--;
881
882 switch ((ptr->command) & 0x7) {
883 case CmdTx:
884 {
885 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
886 struct sk_buff *skb = tx_cmd->skb;
887 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
888
889 dev_kfree_skb(skb);
890
891 lp->stats.tx_errors++;
892 lp->stats.tx_aborted_errors++;
893
894 ptr->v_next = NULL;
895 ptr->b_next = I596_NULL;
896 tx_cmd->cmd.command = 0; /* Mark as free */
897 break;
898 }
899 default:
900 ptr->v_next = NULL;
901 ptr->b_next = I596_NULL;
902 }
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800903 CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 }
905
906 wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
907 lp->scb.cmd = I596_NULL;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800908 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
910
911
912static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
913{
914 unsigned long flags;
915
916 DEB(DEB_RESET, printk("i596_reset\n"));
917
918 spin_lock_irqsave (&lp->lock, flags);
919
920 wait_cmd(dev, lp, 100, "i596_reset timed out");
921
922 netif_stop_queue(dev);
923
924 /* FIXME: this command might cause an lpmc */
925 lp->scb.command = CUC_ABORT | RX_ABORT;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800926 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 CA(dev);
928
929 /* wait for shutdown */
930 wait_cmd(dev, lp, 1000, "i596_reset 2 timed out");
931 spin_unlock_irqrestore (&lp->lock, flags);
932
933 i596_cleanup_cmd(dev,lp);
934 i596_rx(dev);
935
936 netif_start_queue(dev);
937 init_i596_mem(dev);
938}
939
940
941static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
942{
943 struct i596_private *lp = dev->priv;
944 unsigned long flags;
945
946 DEB(DEB_ADDCMD, printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
947
948 cmd->status = 0;
949 cmd->command |= (CMD_EOL | CMD_INTR);
950 cmd->v_next = NULL;
951 cmd->b_next = I596_NULL;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800952 CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 spin_lock_irqsave (&lp->lock, flags);
955
956 if (lp->cmd_head != NULL) {
957 lp->cmd_tail->v_next = cmd;
958 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800959 CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 } else {
961 lp->cmd_head = cmd;
962 wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
963 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
964 lp->scb.command = CUC_START;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800965 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 CA(dev);
967 }
968 lp->cmd_tail = cmd;
969 lp->cmd_backlog++;
970
971 spin_unlock_irqrestore (&lp->lock, flags);
972
973 if (lp->cmd_backlog > max_cmd_backlog) {
974 unsigned long tickssofar = jiffies - lp->last_cmd;
975
976 if (tickssofar < ticks_limit)
977 return;
978
979 printk("%s: command unit timed out, status resetting.\n", dev->name);
980#if 1
981 i596_reset(dev, lp);
982#endif
983 }
984}
985
986#if 0
987/* this function makes a perfectly adequate probe... but we have a
988 device list */
989static int i596_test(struct net_device *dev)
990{
991 struct i596_private *lp = dev->priv;
992 volatile int *tint;
993 u32 data;
994
995 tint = (volatile int *)(&(lp->scp));
996 data = virt_to_dma(lp,tint);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 tint[1] = -1;
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800999 CHECK_WBACK(lp, tint, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 MPU_PORT(dev, 1, data);
1002
1003 for(data = 1000000; data; data--) {
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001004 CHECK_INV(lp, tint, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 if(tint[1] != -1)
1006 break;
1007
1008 }
1009
1010 printk("i596_test result %d\n", tint[1]);
1011
1012}
1013#endif
1014
1015
1016static int i596_open(struct net_device *dev)
1017{
1018 DEB(DEB_OPEN, printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
1019
1020 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1021 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
1022 goto out;
1023 }
1024
1025 init_rx_bufs(dev);
1026
1027 if (init_i596_mem(dev)) {
1028 printk("%s: Failed to init memory\n", dev->name);
1029 goto out_remove_rx_bufs;
1030 }
1031
1032 netif_start_queue(dev);
1033
1034 return 0;
1035
1036out_remove_rx_bufs:
1037 remove_rx_bufs(dev);
1038 free_irq(dev->irq, dev);
1039out:
1040 return -EAGAIN;
1041}
1042
1043static void i596_tx_timeout (struct net_device *dev)
1044{
1045 struct i596_private *lp = dev->priv;
1046
1047 /* Transmitter timeout, serious problems. */
1048 DEB(DEB_ERRORS, printk("%s: transmit timed out, status resetting.\n",
1049 dev->name));
1050
1051 lp->stats.tx_errors++;
1052
1053 /* Try to restart the adaptor */
1054 if (lp->last_restart == lp->stats.tx_packets) {
1055 DEB(DEB_ERRORS, printk("Resetting board.\n"));
1056 /* Shutdown and restart */
1057 i596_reset (dev, lp);
1058 } else {
1059 /* Issue a channel attention signal */
1060 DEB(DEB_ERRORS, printk("Kicking board.\n"));
1061 lp->scb.command = CUC_START | RX_START;
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001062 CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 CA (dev);
1064 lp->last_restart = lp->stats.tx_packets;
1065 }
1066
1067 dev->trans_start = jiffies;
1068 netif_wake_queue (dev);
1069}
1070
1071
1072static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1073{
1074 struct i596_private *lp = dev->priv;
1075 struct tx_cmd *tx_cmd;
1076 struct i596_tbd *tbd;
1077 short length = skb->len;
1078 dev->trans_start = jiffies;
1079
1080 DEB(DEB_STARTTX, printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
1081 skb->len, skb->data));
1082
1083 if (length < ETH_ZLEN) {
Herbert Xu5b057c62006-06-23 02:06:41 -07001084 if (skb_padto(skb, ETH_ZLEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 return 0;
1086 length = ETH_ZLEN;
1087 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 netif_stop_queue(dev);
1090
1091 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1092 tbd = lp->tbds + lp->next_tx_cmd;
1093
1094 if (tx_cmd->cmd.command) {
1095 DEB(DEB_ERRORS, printk("%s: xmit ring full, dropping packet.\n",
1096 dev->name));
1097 lp->stats.tx_dropped++;
1098
1099 dev_kfree_skb(skb);
1100 } else {
1101 if (++lp->next_tx_cmd == TX_RING_SIZE)
1102 lp->next_tx_cmd = 0;
1103 tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1104 tbd->next = I596_NULL;
1105
1106 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1107 tx_cmd->skb = skb;
1108
1109 tx_cmd->pad = 0;
1110 tx_cmd->size = 0;
1111 tbd->pad = 0;
1112 tbd->size = EOF | length;
1113
1114 tx_cmd->dma_addr = dma_map_single(lp->dev, skb->data, skb->len,
1115 DMA_TO_DEVICE);
1116 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1117
1118 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001119 CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
1120 CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 i596_add_cmd(dev, &tx_cmd->cmd);
1122
1123 lp->stats.tx_packets++;
1124 lp->stats.tx_bytes += length;
1125 }
1126
1127 netif_start_queue(dev);
1128
1129 return 0;
1130}
1131
1132static void print_eth(unsigned char *add, char *str)
1133{
1134 int i;
1135
1136 printk("i596 0x%p, ", add);
1137 for (i = 0; i < 6; i++)
1138 printk(" %02X", add[i + 6]);
1139 printk(" -->");
1140 for (i = 0; i < 6; i++)
1141 printk(" %02X", add[i]);
1142 printk(" %02X%02X, %s\n", add[12], add[13], str);
1143}
1144
1145
1146#define LAN_PROM_ADDR 0xF0810000
1147
1148static int __devinit i82596_probe(struct net_device *dev,
1149 struct device *gen_dev)
1150{
1151 int i;
1152 struct i596_private *lp;
1153 char eth_addr[6];
1154 dma_addr_t dma_addr;
1155
1156 /* This lot is ensure things have been cache line aligned. */
Helge Deller93ea7742006-10-14 22:11:32 +02001157 BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1158 BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
1159 BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
1160 BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161#ifndef __LP64__
Helge Deller93ea7742006-10-14 22:11:32 +02001162 BUILD_BUG_ON(sizeof(struct i596_private) > 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163#endif
1164
1165 if (!dev->base_addr || !dev->irq)
1166 return -ENODEV;
1167
1168 if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
1169 for (i=0; i < 6; i++) {
1170 eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
1171 }
1172 printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
1173 }
1174
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001175 dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
1177 if (!dev->mem_start) {
1178 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1179 return -ENOMEM;
1180 }
1181
1182 for (i = 0; i < 6; i++)
1183 dev->dev_addr[i] = eth_addr[i];
1184
1185 /* The 82596-specific entries in the device structure. */
1186 dev->open = i596_open;
1187 dev->stop = i596_close;
1188 dev->hard_start_xmit = i596_start_xmit;
1189 dev->get_stats = i596_get_stats;
1190 dev->set_multicast_list = set_multicast_list;
1191 dev->tx_timeout = i596_tx_timeout;
1192 dev->watchdog_timeo = TX_TIMEOUT;
Sven Schnellec27090202005-10-21 22:55:15 -04001193#ifdef CONFIG_NET_POLL_CONTROLLER
1194 dev->poll_controller = i596_poll_controller;
1195#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
1197 dev->priv = (void *)(dev->mem_start);
1198
1199 lp = dev->priv;
1200 memset(lp, 0, sizeof(struct i596_private));
1201
1202 lp->scb.command = 0;
1203 lp->scb.cmd = I596_NULL;
1204 lp->scb.rfd = I596_NULL;
1205 spin_lock_init(&lp->lock);
1206 lp->dma_addr = dma_addr;
1207 lp->dev = gen_dev;
1208
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001209 CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 i = register_netdev(dev);
1212 if (i) {
1213 lp = dev->priv;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001214 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 (void *)dev->mem_start, lp->dma_addr);
1216 return i;
1217 };
1218
1219 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1220 for (i = 0; i < 6; i++)
1221 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1222 DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1223 DEB(DEB_INIT, printk(KERN_INFO "%s: lp at 0x%p (%d bytes), lp->scb at 0x%p\n",
1224 dev->name, lp, (int)sizeof(struct i596_private), &lp->scb));
1225
1226 return 0;
1227}
1228
Sven Schnellec27090202005-10-21 22:55:15 -04001229#ifdef CONFIG_NET_POLL_CONTROLLER
1230static void i596_poll_controller(struct net_device *dev)
1231{
1232 disable_irq(dev->irq);
Matthew Wilcoxbe577a52006-10-06 20:47:23 -06001233 i596_interrupt(dev->irq, dev);
Sven Schnellec27090202005-10-21 22:55:15 -04001234 enable_irq(dev->irq);
1235}
1236#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
David Howells7d12e782006-10-05 14:55:46 +01001238static irqreturn_t i596_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239{
1240 struct net_device *dev = dev_id;
1241 struct i596_private *lp;
1242 unsigned short status, ack_cmd = 0;
1243
1244 if (dev == NULL) {
1245 printk("%s: irq %d for unknown device.\n", __FUNCTION__, irq);
1246 return IRQ_NONE;
1247 }
1248
1249 lp = dev->priv;
1250
1251 spin_lock (&lp->lock);
1252
1253 wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
1254 status = lp->scb.status;
1255
1256 DEB(DEB_INTS, printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1257 dev->name, irq, status));
1258
1259 ack_cmd = status & 0xf000;
1260
1261 if (!ack_cmd) {
1262 DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1263 spin_unlock (&lp->lock);
1264 return IRQ_NONE;
1265 }
1266
1267 if ((status & 0x8000) || (status & 0x2000)) {
1268 struct i596_cmd *ptr;
1269
1270 if ((status & 0x8000))
1271 DEB(DEB_INTS, printk("%s: i596 interrupt completed command.\n", dev->name));
1272 if ((status & 0x2000))
1273 DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1274
1275 while (lp->cmd_head != NULL) {
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001276 CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 if (!(lp->cmd_head->status & STAT_C))
1278 break;
1279
1280 ptr = lp->cmd_head;
1281
1282 DEB(DEB_STATUS, printk("cmd_head->status = %04x, ->command = %04x\n",
1283 lp->cmd_head->status, lp->cmd_head->command));
1284 lp->cmd_head = ptr->v_next;
1285 lp->cmd_backlog--;
1286
1287 switch ((ptr->command) & 0x7) {
1288 case CmdTx:
1289 {
1290 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1291 struct sk_buff *skb = tx_cmd->skb;
1292
1293 if ((ptr->status) & STAT_OK) {
1294 DEB(DEB_TXADDR, print_eth(skb->data, "tx-done"));
1295 } else {
1296 lp->stats.tx_errors++;
1297 if ((ptr->status) & 0x0020)
1298 lp->stats.collisions++;
1299 if (!((ptr->status) & 0x0040))
1300 lp->stats.tx_heartbeat_errors++;
1301 if ((ptr->status) & 0x0400)
1302 lp->stats.tx_carrier_errors++;
1303 if ((ptr->status) & 0x0800)
1304 lp->stats.collisions++;
1305 if ((ptr->status) & 0x1000)
1306 lp->stats.tx_aborted_errors++;
1307 }
1308 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
1309 dev_kfree_skb_irq(skb);
1310
1311 tx_cmd->cmd.command = 0; /* Mark free */
1312 break;
1313 }
1314 case CmdTDR:
1315 {
1316 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1317
1318 if (status & 0x8000) {
1319 DEB(DEB_ANY, printk("%s: link ok.\n", dev->name));
1320 } else {
1321 if (status & 0x4000)
1322 printk("%s: Transceiver problem.\n", dev->name);
1323 if (status & 0x2000)
1324 printk("%s: Termination problem.\n", dev->name);
1325 if (status & 0x1000)
1326 printk("%s: Short circuit.\n", dev->name);
1327
1328 DEB(DEB_TDR, printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1329 }
1330 break;
1331 }
1332 case CmdConfigure:
1333 /* Zap command so set_multicast_list() knows it is free */
1334 ptr->command = 0;
1335 break;
1336 }
1337 ptr->v_next = NULL;
1338 ptr->b_next = I596_NULL;
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001339 CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 lp->last_cmd = jiffies;
1341 }
1342
1343 /* This mess is arranging that only the last of any outstanding
1344 * commands has the interrupt bit set. Should probably really
1345 * only add to the cmd queue when the CU is stopped.
1346 */
1347 ptr = lp->cmd_head;
1348 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1349 struct i596_cmd *prev = ptr;
1350
1351 ptr->command &= 0x1fff;
1352 ptr = ptr->v_next;
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001353 CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 }
1355
1356 if ((lp->cmd_head != NULL))
1357 ack_cmd |= CUC_START;
1358 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001359 CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 }
1361 if ((status & 0x1000) || (status & 0x4000)) {
1362 if ((status & 0x4000))
1363 DEB(DEB_INTS, printk("%s: i596 interrupt received a frame.\n", dev->name));
1364 i596_rx(dev);
1365 /* Only RX_START if stopped - RGH 07-07-96 */
1366 if (status & 0x1000) {
1367 if (netif_running(dev)) {
1368 DEB(DEB_ERRORS, printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1369 ack_cmd |= RX_START;
1370 lp->stats.rx_errors++;
1371 lp->stats.rx_fifo_errors++;
1372 rebuild_rx_bufs(dev);
1373 }
1374 }
1375 }
1376 wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
1377 lp->scb.command = ack_cmd;
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001378 CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380 /* DANGER: I suspect that some kind of interrupt
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001381 acknowledgement aside from acking the 82596 might be needed
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 here... but it's running acceptably without */
1383
1384 CA(dev);
1385
1386 wait_cmd(dev, lp, 100, "i596 interrupt, exit timeout");
1387 DEB(DEB_INTS, printk("%s: exiting interrupt.\n", dev->name));
1388
1389 spin_unlock (&lp->lock);
1390 return IRQ_HANDLED;
1391}
1392
1393static int i596_close(struct net_device *dev)
1394{
1395 struct i596_private *lp = dev->priv;
1396 unsigned long flags;
1397
1398 netif_stop_queue(dev);
1399
1400 DEB(DEB_INIT, printk("%s: Shutting down ethercard, status was %4.4x.\n",
1401 dev->name, lp->scb.status));
1402
1403 spin_lock_irqsave(&lp->lock, flags);
1404
1405 wait_cmd(dev, lp, 100, "close1 timed out");
1406 lp->scb.command = CUC_ABORT | RX_ABORT;
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001407 CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409 CA(dev);
1410
1411 wait_cmd(dev, lp, 100, "close2 timed out");
1412 spin_unlock_irqrestore(&lp->lock, flags);
1413 DEB(DEB_STRUCT,i596_display_data(dev));
1414 i596_cleanup_cmd(dev,lp);
1415
1416 disable_irq(dev->irq);
1417
1418 free_irq(dev->irq, dev);
1419 remove_rx_bufs(dev);
1420
1421 return 0;
1422}
1423
1424static struct net_device_stats *
1425 i596_get_stats(struct net_device *dev)
1426{
1427 struct i596_private *lp = dev->priv;
1428
1429 return &lp->stats;
1430}
1431
1432/*
1433 * Set or clear the multicast filter for this adaptor.
1434 */
1435
1436static void set_multicast_list(struct net_device *dev)
1437{
1438 struct i596_private *lp = dev->priv;
1439 int config = 0, cnt;
1440
1441 DEB(DEB_MULTI, printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1442 dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF",
1443 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1444
1445 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1446 lp->cf_cmd.i596_config[8] |= 0x01;
1447 config = 1;
1448 }
1449 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1450 lp->cf_cmd.i596_config[8] &= ~0x01;
1451 config = 1;
1452 }
1453 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1454 lp->cf_cmd.i596_config[11] &= ~0x20;
1455 config = 1;
1456 }
1457 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1458 lp->cf_cmd.i596_config[11] |= 0x20;
1459 config = 1;
1460 }
1461 if (config) {
1462 if (lp->cf_cmd.cmd.command)
1463 printk("%s: config change request already queued\n",
1464 dev->name);
1465 else {
1466 lp->cf_cmd.cmd.command = CmdConfigure;
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001467 CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1469 }
1470 }
1471
1472 cnt = dev->mc_count;
1473 if (cnt > MAX_MC_CNT)
1474 {
1475 cnt = MAX_MC_CNT;
1476 printk("%s: Only %d multicast addresses supported",
1477 dev->name, cnt);
1478 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 if (dev->mc_count > 0) {
1481 struct dev_mc_list *dmi;
1482 unsigned char *cp;
1483 struct mc_cmd *cmd;
1484
1485 cmd = &lp->mc_cmd;
1486 cmd->cmd.command = CmdMulticastList;
1487 cmd->mc_cnt = dev->mc_count * 6;
1488 cp = cmd->mc_addrs;
1489 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1490 memcpy(cp, dmi->dmi_addr, 6);
1491 if (i596_debug > 1)
1492 DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1493 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1494 }
Ralf Baechled3fa72e2006-12-06 20:38:56 -08001495 CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 i596_add_cmd(dev, &cmd->cmd);
1497 }
1498}
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500static int debug = -1;
Rusty Russell8d3b33f2006-03-25 03:07:05 -08001501module_param(debug, int, 0);
1502MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504static int num_drivers;
1505static struct net_device *netdevs[MAX_DRIVERS];
1506
1507static int __devinit
1508lan_init_chip(struct parisc_device *dev)
1509{
1510 struct net_device *netdevice;
1511 int retval;
1512
1513 if (num_drivers >= MAX_DRIVERS) {
1514 /* max count of possible i82596 drivers reached */
1515 return -ENOMEM;
1516 }
1517
1518 if (num_drivers == 0)
1519 printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (!dev->irq) {
1522 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
Matthew Wilcox53f01bb2005-10-21 22:36:40 -04001523 __FILE__, dev->hpa.start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 return -ENODEV;
1525 }
1526
Matthew Wilcox53f01bb2005-10-21 22:36:40 -04001527 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start,
1528 dev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
1530 netdevice = alloc_etherdev(0);
1531 if (!netdevice)
1532 return -ENOMEM;
1533
Matthew Wilcox53f01bb2005-10-21 22:36:40 -04001534 netdevice->base_addr = dev->hpa.start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 netdevice->irq = dev->irq;
1536
1537 retval = i82596_probe(netdevice, &dev->dev);
1538 if (retval) {
1539 free_netdev(netdevice);
1540 return -ENODEV;
1541 }
1542
1543 if (dev->id.sversion == 0x72) {
1544 ((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
1545 }
1546
1547 netdevs[num_drivers++] = netdevice;
1548
1549 return retval;
1550}
1551
1552
1553static struct parisc_device_id lan_tbl[] = {
1554 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
1555 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
1556 { 0, }
1557};
1558
1559MODULE_DEVICE_TABLE(parisc, lan_tbl);
1560
1561static struct parisc_driver lan_driver = {
Matthew Wilcoxbdad1f82005-10-21 22:36:23 -04001562 .name = "lasi_82596",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 .id_table = lan_tbl,
1564 .probe = lan_init_chip,
1565};
1566
1567static int __devinit lasi_82596_init(void)
1568{
1569 if (debug >= 0)
1570 i596_debug = debug;
1571 return register_parisc_driver(&lan_driver);
1572}
1573
1574module_init(lasi_82596_init);
1575
1576static void __exit lasi_82596_exit(void)
1577{
1578 int i;
1579
1580 for (i=0; i<MAX_DRIVERS; i++) {
1581 struct i596_private *lp;
1582 struct net_device *netdevice;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001583
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 netdevice = netdevs[i];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001585 if (!netdevice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 continue;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001587
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 unregister_netdev(netdevice);
1589
1590 lp = netdevice->priv;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001591 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 (void *)netdevice->mem_start, lp->dma_addr);
1593 free_netdev(netdevice);
1594 }
1595 num_drivers = 0;
1596
1597 unregister_parisc_driver(&lan_driver);
1598}
1599
1600module_exit(lasi_82596_exit);