blob: a62065808881e5a13ccfdf40195b5429de27f9f9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
7 *
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
10 *
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
13 *
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
16 *
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
19 *
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
22 *
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
41 *
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
43 *
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
45 *
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
48 *
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
51 *
52 * 06/02/01 - Clean up, copy skb for small packets
53 *
54 * 06/22/01 - Add EISR error handling routines
55 *
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
69 *
70 * Wake on lan
71 *
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
76 */
77
78/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
79
80#define OLYMPIC_DEBUG 0
81
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/errno.h>
86#include <linux/timer.h>
87#include <linux/in.h>
88#include <linux/ioport.h>
89#include <linux/string.h>
90#include <linux/proc_fs.h>
91#include <linux/ptrace.h>
92#include <linux/skbuff.h>
93#include <linux/interrupt.h>
94#include <linux/delay.h>
95#include <linux/netdevice.h>
96#include <linux/trdevice.h>
97#include <linux/stddef.h>
98#include <linux/init.h>
99#include <linux/pci.h>
100#include <linux/spinlock.h>
101#include <linux/bitops.h>
Marcelo Feitoza Parisiff5688a2006-01-09 18:37:15 -0800102#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104#include <net/checksum.h>
105
106#include <asm/io.h>
107#include <asm/system.h>
108
109#include "olympic.h"
110
111/* I've got to put some intelligence into the version number so that Peter and I know
112 * which version of the code somebody has got.
113 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
114 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
115 *
116 * Official releases will only have an a.b.c version number format.
117 */
118
119static char version[] __devinitdata =
120"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
121
122static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
123 "Address Verification", "Neighbor Notification (Ring Poll)",
124 "Request Parameters","FDX Registration Request",
125 "FDX Duplicate Address Check", "Station registration Query Wait",
126 "Unknown stage"};
127
128static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
129 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
130 "Duplicate Node Address","Request Parameters","Remove Received",
131 "Reserved", "Reserved", "No Monitor Detected for RPL",
132 "Monitor Contention failer for RPL", "FDX Protocol Error"};
133
134/* Module paramters */
135
136MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
137MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
138
139/* Ring Speed 0,4,16,100
140 * 0 = Autosense
141 * 4,16 = Selected speed only, no autosense
142 * This allows the card to be the first on the ring
143 * and become the active monitor.
144 * 100 = Nothing at present, 100mbps is autodetected
145 * if FDX is turned on. May be implemented in the future to
146 * fail if 100mpbs is not detected.
147 *
148 * WARNING: Some hubs will allow you to insert
149 * at the wrong speed
150 */
151
152static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
153module_param_array(ringspeed, int, NULL, 0);
154
155/* Packet buffer size */
156
157static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
158module_param_array(pkt_buf_sz, int, NULL, 0) ;
159
160/* Message Level */
161
162static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
163module_param_array(message_level, int, NULL, 0) ;
164
165/* Change network_monitor to receive mac frames through the arb channel.
166 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
167 * device, i.e. tr0, tr1 etc.
168 * Intended to be used to create a ring-error reporting network module
169 * i.e. it will give you the source address of beaconers on the ring
170 */
171static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
172module_param_array(network_monitor, int, NULL, 0);
173
174static struct pci_device_id olympic_pci_tbl[] = {
175 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
176 { } /* Terminating Entry */
177};
178MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
179
180
181static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
182static int olympic_init(struct net_device *dev);
183static int olympic_open(struct net_device *dev);
184static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
185static int olympic_close(struct net_device *dev);
186static void olympic_set_rx_mode(struct net_device *dev);
187static void olympic_freemem(struct net_device *dev) ;
David Howells7d12e782006-10-05 14:55:46 +0100188static irqreturn_t olympic_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189static struct net_device_stats * olympic_get_stats(struct net_device *dev);
190static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
191static void olympic_arb_cmd(struct net_device *dev);
192static int olympic_change_mtu(struct net_device *dev, int mtu);
193static void olympic_srb_bh(struct net_device *dev) ;
194static void olympic_asb_bh(struct net_device *dev) ;
195static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
196
197static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
198{
199 struct net_device *dev ;
200 struct olympic_private *olympic_priv;
201 static int card_no = -1 ;
202 int i ;
203
204 card_no++ ;
205
206 if ((i = pci_enable_device(pdev))) {
207 return i ;
208 }
209
210 pci_set_master(pdev);
211
212 if ((i = pci_request_regions(pdev,"olympic"))) {
213 goto op_disable_dev;
214 }
215
216 dev = alloc_trdev(sizeof(struct olympic_private)) ;
217 if (!dev) {
218 i = -ENOMEM;
Eric Sesterhenn6d56ab92006-06-21 16:17:17 +0200219 goto op_release_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221
222 olympic_priv = dev->priv ;
223
224 spin_lock_init(&olympic_priv->olympic_lock) ;
225
226 init_waitqueue_head(&olympic_priv->srb_wait);
227 init_waitqueue_head(&olympic_priv->trb_wait);
228#if OLYMPIC_DEBUG
229 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv);
230#endif
231 dev->irq=pdev->irq;
232 dev->base_addr=pci_resource_start(pdev, 0);
233 olympic_priv->olympic_card_name = pci_name(pdev);
234 olympic_priv->pdev = pdev;
235 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
236 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
237 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
238 goto op_free_iomap;
239 }
240
241 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
242 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
243 else
244 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
245
246 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
247 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
248 olympic_priv->olympic_message_level = message_level[card_no] ;
249 olympic_priv->olympic_network_monitor = network_monitor[card_no];
250
251 if ((i = olympic_init(dev))) {
252 goto op_free_iomap;
253 }
254
255 dev->open=&olympic_open;
256 dev->hard_start_xmit=&olympic_xmit;
257 dev->change_mtu=&olympic_change_mtu;
258 dev->stop=&olympic_close;
259 dev->do_ioctl=NULL;
260 dev->set_multicast_list=&olympic_set_rx_mode;
261 dev->get_stats=&olympic_get_stats ;
262 dev->set_mac_address=&olympic_set_mac_address ;
263 SET_MODULE_OWNER(dev) ;
264 SET_NETDEV_DEV(dev, &pdev->dev);
265
266 pci_set_drvdata(pdev,dev) ;
267 register_netdev(dev) ;
268 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
269 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
270 char proc_name[20] ;
271 strcpy(proc_name,"net/olympic_") ;
272 strcat(proc_name,dev->name) ;
273 create_proc_read_entry(proc_name,0,NULL,olympic_proc_info,(void *)dev) ;
274 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
275 }
276 return 0 ;
277
278op_free_iomap:
279 if (olympic_priv->olympic_mmio)
280 iounmap(olympic_priv->olympic_mmio);
281 if (olympic_priv->olympic_lap)
282 iounmap(olympic_priv->olympic_lap);
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 free_netdev(dev);
Eric Sesterhenn6d56ab92006-06-21 16:17:17 +0200285op_release_dev:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 pci_release_regions(pdev);
287
288op_disable_dev:
289 pci_disable_device(pdev);
290 return i;
291}
292
293static int __devinit olympic_init(struct net_device *dev)
294{
295 struct olympic_private *olympic_priv;
296 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
297 unsigned long t;
298 unsigned int uaa_addr;
299
300 olympic_priv=(struct olympic_private *)dev->priv;
301 olympic_mmio=olympic_priv->olympic_mmio;
302
303 printk("%s \n", version);
304 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
305
306 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
307 t=jiffies;
308 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
309 schedule();
Marcelo Feitoza Parisiff5688a2006-01-09 18:37:15 -0800310 if(time_after(jiffies, t + 40*HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
312 return -ENODEV;
313 }
314 }
315
316
317 /* Needed for cardbus */
318 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
319 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
320 }
321
322#if OLYMPIC_DEBUG
323 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
324 printk("GPR: %x\n",readw(olympic_mmio+GPR));
325 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
326#endif
327 /* Aaaahhh, You have got to be real careful setting GPR, the card
328 holds the previous values from flash memory, including autosense
329 and ring speed */
330
331 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
332
333 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
334 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
335 if (olympic_priv->olympic_message_level)
336 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
337 } else if (olympic_priv->olympic_ring_speed == 16) {
338 if (olympic_priv->olympic_message_level)
339 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
340 writew(GPR_16MBPS, olympic_mmio+GPR);
341 } else if (olympic_priv->olympic_ring_speed == 4) {
342 if (olympic_priv->olympic_message_level)
343 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
344 writew(0, olympic_mmio+GPR);
345 }
346
347 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
348
349#if OLYMPIC_DEBUG
350 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
351#endif
352 /* Solo has been paused to meet the Cardbus power
353 * specs if the adapter is cardbus. Check to
354 * see its been paused and then restart solo. The
355 * adapter should set the pause bit within 1 second.
356 */
357
358 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
359 t=jiffies;
360 while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
361 schedule() ;
Marcelo Feitoza Parisiff5688a2006-01-09 18:37:15 -0800362 if(time_after(jiffies, t + 2*HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
364 return -ENODEV;
365 }
366 }
367 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
368 }
369
370 /* start solo init */
371 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
372
373 t=jiffies;
374 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
375 schedule();
Marcelo Feitoza Parisiff5688a2006-01-09 18:37:15 -0800376 if(time_after(jiffies, t + 15*HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
378 return -ENODEV;
379 }
380 }
381
382 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
383
384#if OLYMPIC_DEBUG
385 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
386#endif
387
388 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
389
390#if OLYMPIC_DEBUG
391{
392 int i;
393 printk("init_srb(%p): ",init_srb);
394 for(i=0;i<20;i++)
395 printk("%x ",readb(init_srb+i));
396 printk("\n");
397}
398#endif
399 if(readw(init_srb+6)) {
400 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
401 return -ENODEV;
402 }
403
404 if (olympic_priv->olympic_message_level) {
405 if ( readb(init_srb +2) & 0x40) {
406 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
407 } else {
408 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
409 }
410 }
411
412 uaa_addr=swab16(readw(init_srb+8));
413
414#if OLYMPIC_DEBUG
415 printk("UAA resides at %x\n",uaa_addr);
416#endif
417
418 writel(uaa_addr,olympic_mmio+LAPA);
419 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
420
421#if OLYMPIC_DEBUG
422 printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n",
423 readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2),
424 readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5));
425#endif
426
427 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
428
429 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
430 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
431
432 return 0;
433
434}
435
436static int olympic_open(struct net_device *dev)
437{
438 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
439 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
440 unsigned long flags, t;
441 int i, open_finished = 1 ;
442 u8 resp, err;
443
444 DECLARE_WAITQUEUE(wait,current) ;
445
446 olympic_init(dev);
447
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700448 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return -EAGAIN;
450 }
451
452#if OLYMPIC_DEBUG
453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
454 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
455#endif
456
457 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
458
459 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
460
461 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
462
463 /* adapter is closed, so SRB is pointed to by LAPWWO */
464
465 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
466 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
467
468#if OLYMPIC_DEBUG
469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
471 printk("Before the open command \n");
472#endif
473 do {
474 memset_io(init_srb,0,SRB_COMMAND_SIZE);
475
476 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
477 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
478
479 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
480 if (olympic_priv->olympic_network_monitor)
481 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
482 else
483 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
484
485 /* Test OR of first 3 bytes as its totally possible for
486 * someone to set the first 2 bytes to be zero, although this
487 * is an error, the first byte must have bit 6 set to 1 */
488
489 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
490 writeb(olympic_priv->olympic_laa[0],init_srb+12);
491 writeb(olympic_priv->olympic_laa[1],init_srb+13);
492 writeb(olympic_priv->olympic_laa[2],init_srb+14);
493 writeb(olympic_priv->olympic_laa[3],init_srb+15);
494 writeb(olympic_priv->olympic_laa[4],init_srb+16);
495 writeb(olympic_priv->olympic_laa[5],init_srb+17);
496 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
497 }
498 writeb(1,init_srb+30);
499
500 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
501 olympic_priv->srb_queued=1;
502
503 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
504 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
505
506 t = jiffies ;
507
508 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
509 set_current_state(TASK_INTERRUPTIBLE) ;
510
511 while(olympic_priv->srb_queued) {
512 schedule() ;
513 if(signal_pending(current)) {
514 printk(KERN_WARNING "%s: Signal received in open.\n",
515 dev->name);
516 printk(KERN_WARNING "SISR=%x LISR=%x\n",
517 readl(olympic_mmio+SISR),
518 readl(olympic_mmio+LISR));
519 olympic_priv->srb_queued=0;
520 break;
521 }
Marcelo Feitoza Parisiff5688a2006-01-09 18:37:15 -0800522 if (time_after(jiffies, t + 10*HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
524 olympic_priv->srb_queued=0;
525 break ;
526 }
527 set_current_state(TASK_INTERRUPTIBLE) ;
528 }
529 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
530 set_current_state(TASK_RUNNING) ;
531 olympic_priv->srb_queued = 0 ;
532#if OLYMPIC_DEBUG
533 printk("init_srb(%p): ",init_srb);
534 for(i=0;i<20;i++)
535 printk("%02x ",readb(init_srb+i));
536 printk("\n");
537#endif
538
539 /* If we get the same return response as we set, the interrupt wasn't raised and the open
540 * timed out.
541 */
542
543 switch (resp = readb(init_srb+2)) {
544 case OLYMPIC_CLEAR_RET_CODE:
545 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
546 goto out;
547 case 0:
548 open_finished = 1;
549 break;
550 case 0x07:
551 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
552 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
553 open_finished = 0 ;
554 continue;
555 }
556
557 err = readb(init_srb+7);
558
559 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
561 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
562 } else {
563 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
564 open_maj_error[(err & 0xf0) >> 4],
565 open_min_error[(err & 0x0f)]);
566 }
567 goto out;
568
569 case 0x32:
570 printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n",
571 dev->name,
572 olympic_priv->olympic_laa[0],
573 olympic_priv->olympic_laa[1],
574 olympic_priv->olympic_laa[2],
575 olympic_priv->olympic_laa[3],
576 olympic_priv->olympic_laa[4],
577 olympic_priv->olympic_laa[5]) ;
578 goto out;
579
580 default:
581 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
582 goto out;
583
584 }
585 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
586
587 if (readb(init_srb+18) & (1<<3))
588 if (olympic_priv->olympic_message_level)
589 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
590
591 if (readb(init_srb+18) & (1<<1))
592 olympic_priv->olympic_ring_speed = 100 ;
593 else if (readb(init_srb+18) & 1)
594 olympic_priv->olympic_ring_speed = 16 ;
595 else
596 olympic_priv->olympic_ring_speed = 4 ;
597
598 if (olympic_priv->olympic_message_level)
599 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
600
601 olympic_priv->asb = swab16(readw(init_srb+8));
602 olympic_priv->srb = swab16(readw(init_srb+10));
603 olympic_priv->arb = swab16(readw(init_srb+12));
604 olympic_priv->trb = swab16(readw(init_srb+16));
605
606 olympic_priv->olympic_receive_options = 0x01 ;
607 olympic_priv->olympic_copy_all_options = 0 ;
608
609 /* setup rx ring */
610
611 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
612
613 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
614
615 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
616
617 struct sk_buff *skb;
618
619 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
620 if(skb == NULL)
621 break;
622
623 skb->dev = dev;
624
625 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
626 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
627 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
628 olympic_priv->rx_ring_skb[i]=skb;
629 }
630
631 if (i==0) {
632 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
633 goto out;
634 }
635
636 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
637 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
638 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
639 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
640 writew(i, olympic_mmio+RXDESCQCNT);
641
642 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
643 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
644 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
645 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
646
647 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
648 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
649
650 writew(i, olympic_mmio+RXSTATQCNT);
651
652#if OLYMPIC_DEBUG
653 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
654 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
655 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
656 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
657 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
658
659 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
660 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
661 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
662#endif
663
664 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
665
666#if OLYMPIC_DEBUG
667 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
668 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
669 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
670#endif
671
672 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
673
674 /* setup tx ring */
675
676 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
677 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
678 olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
679
680 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
681 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
682 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
683 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
684 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
685 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
686
687 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
688 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
689 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
690 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
691 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
692
693 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
694 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
695
696 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
697 writel(0,olympic_mmio+EISR) ;
698 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
699 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
700
701#if OLYMPIC_DEBUG
702 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
703 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
704#endif
705
706 if (olympic_priv->olympic_network_monitor) {
707 u8 __iomem *oat ;
708 u8 __iomem *opt ;
709 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
710 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
711
712 printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
713 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
714 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
715 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
716 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
717 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
718 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5));
719 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
720 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
721 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
722 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
723 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
724 printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
725 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
726 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
727 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
728 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
729 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
730 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5));
731 }
732
733 netif_start_queue(dev);
734 return 0;
735
736out:
737 free_irq(dev->irq, dev);
738 return -EIO;
739}
740
741/*
742 * When we enter the rx routine we do not know how many frames have been
743 * queued on the rx channel. Therefore we start at the next rx status
744 * position and travel around the receive ring until we have completed
745 * all the frames.
746 *
747 * This means that we may process the frame before we receive the end
748 * of frame interrupt. This is why we always test the status instead
749 * of blindly processing the next frame.
750 *
751 * We also remove the last 4 bytes from the packet as well, these are
752 * just token ring trailer info and upset protocols that don't check
753 * their own length, i.e. SNA.
754 *
755 */
756static void olympic_rx(struct net_device *dev)
757{
758 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
759 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
760 struct olympic_rx_status *rx_status;
761 struct olympic_rx_desc *rx_desc ;
762 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
763 struct sk_buff *skb, *skb2;
764 int i;
765
766 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
767
768 while (rx_status->status_buffercnt) {
769 u32 l_status_buffercnt;
770
771 olympic_priv->rx_status_last_received++ ;
772 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
773#if OLYMPIC_DEBUG
774 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
775#endif
776 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
777 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
778 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
779 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
780
781#if OLYMPIC_DEBUG
782 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
783#endif
784 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
785 if(l_status_buffercnt & 0xC0000000) {
786 if (l_status_buffercnt & 0x3B000000) {
787 if (olympic_priv->olympic_message_level) {
788 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
789 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
790 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
791 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
792 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
793 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
794 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
795 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
796 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
797 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
798 }
799 olympic_priv->rx_ring_last_received += i ;
800 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
801 olympic_priv->olympic_stats.rx_errors++;
802 } else {
803
804 if (buffer_cnt == 1) {
805 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
806 } else {
807 skb = dev_alloc_skb(length) ;
808 }
809
810 if (skb == NULL) {
811 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
812 olympic_priv->olympic_stats.rx_dropped++ ;
813 /* Update counters even though we don't transfer the frame */
814 olympic_priv->rx_ring_last_received += i ;
815 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
816 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 /* Optimise based upon number of buffers used.
818 If only one buffer is used we can simply swap the buffers around.
819 If more than one then we must use the new buffer and copy the information
820 first. Ideally all frames would be in a single buffer, this can be tuned by
821 altering the buffer size. If the length of the packet is less than
822 1500 bytes we're going to copy it over anyway to stop packets getting
823 dropped from sockets with buffers smaller than our pkt_buf_sz. */
824
825 if (buffer_cnt==1) {
826 olympic_priv->rx_ring_last_received++ ;
827 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
828 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
829 if (length > 1500) {
830 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
831 /* unmap buffer */
832 pci_unmap_single(olympic_priv->pdev,
833 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
834 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
835 skb_put(skb2,length-4);
836 skb2->protocol = tr_type_trans(skb2,dev);
837 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
838 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
839 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
840 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
841 cpu_to_le32(olympic_priv->pkt_buf_sz);
842 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
843 netif_rx(skb2) ;
844 } else {
845 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
846 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
847 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
848 memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
849 pci_dma_sync_single_for_device(olympic_priv->pdev,
850 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
851 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
852 skb->protocol = tr_type_trans(skb,dev) ;
853 netif_rx(skb) ;
854 }
855 } else {
856 do { /* Walk the buffers */
857 olympic_priv->rx_ring_last_received++ ;
858 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
859 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
860 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
861 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
862 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
863 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
864 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
865 memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
866 pci_dma_sync_single_for_device(olympic_priv->pdev,
867 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
868 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
869 } while (--i) ;
870 skb_trim(skb,skb->len-4) ;
871 skb->protocol = tr_type_trans(skb,dev);
872 netif_rx(skb) ;
873 }
874 dev->last_rx = jiffies ;
875 olympic_priv->olympic_stats.rx_packets++ ;
876 olympic_priv->olympic_stats.rx_bytes += length ;
877 } /* if skb == null */
878 } /* If status & 0x3b */
879
880 } else { /*if buffercnt & 0xC */
881 olympic_priv->rx_ring_last_received += i ;
882 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
883 }
884
885 rx_status->fragmentcnt_framelen = 0 ;
886 rx_status->status_buffercnt = 0 ;
887 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
888
889 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
890 } /* while */
891
892}
893
894static void olympic_freemem(struct net_device *dev)
895{
896 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
897 int i;
898
899 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
900 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
901 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
902 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
903 }
904 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
905 pci_unmap_single(olympic_priv->pdev,
906 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
907 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
908 }
909 olympic_priv->rx_status_last_received++;
910 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
911 }
912 /* unmap rings */
913 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
914 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
915 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
916 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
917
918 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
919 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
920 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
921 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
922
923 return ;
924}
925
David Howells7d12e782006-10-05 14:55:46 +0100926static irqreturn_t olympic_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
928 struct net_device *dev= (struct net_device *)dev_id;
929 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
930 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
931 u32 sisr;
932 u8 __iomem *adapter_check_area ;
933
934 /*
935 * Read sisr but don't reset it yet.
936 * The indication bit may have been set but the interrupt latch
937 * bit may not be set, so we'd lose the interrupt later.
938 */
939 sisr=readl(olympic_mmio+SISR) ;
940 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
941 return IRQ_NONE;
942 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
943
944 spin_lock(&olympic_priv->olympic_lock);
945
946 /* Hotswap gives us this on removal */
947 if (sisr == 0xffffffff) {
948 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
949 spin_unlock(&olympic_priv->olympic_lock) ;
950 return IRQ_NONE;
951 }
952
953 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
954 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
955
956 /* If we ever get this the adapter is seriously dead. Only a reset is going to
957 * bring it back to life. We're talking pci bus errors and such like :( */
958 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
959 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
960 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
961 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
962 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
963 wake_up_interruptible(&olympic_priv->srb_wait);
964 spin_unlock(&olympic_priv->olympic_lock) ;
965 return IRQ_HANDLED;
966 } /* SISR_ERR */
967
968 if(sisr & SISR_SRB_REPLY) {
969 if(olympic_priv->srb_queued==1) {
970 wake_up_interruptible(&olympic_priv->srb_wait);
971 } else if (olympic_priv->srb_queued==2) {
972 olympic_srb_bh(dev) ;
973 }
974 olympic_priv->srb_queued=0;
975 } /* SISR_SRB_REPLY */
976
977 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
978 we get all tx completions. */
979 if (sisr & SISR_TX1_EOF) {
980 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
981 olympic_priv->tx_ring_last_status++;
982 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
983 olympic_priv->free_tx_ring_entries++;
984 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
985 olympic_priv->olympic_stats.tx_packets++ ;
986 pci_unmap_single(olympic_priv->pdev,
987 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
988 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
989 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
990 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
991 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
992 }
993 netif_wake_queue(dev);
994 } /* SISR_TX1_EOF */
995
996 if (sisr & SISR_RX_STATUS) {
997 olympic_rx(dev);
998 } /* SISR_RX_STATUS */
999
1000 if (sisr & SISR_ADAPTER_CHECK) {
1001 netif_stop_queue(dev);
1002 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
1003 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
1004 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
1005 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
1006 spin_unlock(&olympic_priv->olympic_lock) ;
1007 return IRQ_HANDLED;
1008 } /* SISR_ADAPTER_CHECK */
1009
1010 if (sisr & SISR_ASB_FREE) {
1011 /* Wake up anything that is waiting for the asb response */
1012 if (olympic_priv->asb_queued) {
1013 olympic_asb_bh(dev) ;
1014 }
1015 } /* SISR_ASB_FREE */
1016
1017 if (sisr & SISR_ARB_CMD) {
1018 olympic_arb_cmd(dev) ;
1019 } /* SISR_ARB_CMD */
1020
1021 if (sisr & SISR_TRB_REPLY) {
1022 /* Wake up anything that is waiting for the trb response */
1023 if (olympic_priv->trb_queued) {
1024 wake_up_interruptible(&olympic_priv->trb_wait);
1025 }
1026 olympic_priv->trb_queued = 0 ;
1027 } /* SISR_TRB_REPLY */
1028
1029 if (sisr & SISR_RX_NOBUF) {
1030 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1031 /var/log/messages. */
1032 } /* SISR_RX_NOBUF */
1033 } else {
1034 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1035 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1036 } /* One if the interrupts we want */
1037 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1038
1039 spin_unlock(&olympic_priv->olympic_lock) ;
1040 return IRQ_HANDLED;
1041}
1042
1043static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1044{
1045 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1046 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1047 unsigned long flags ;
1048
1049 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1050
1051 netif_stop_queue(dev);
1052
1053 if(olympic_priv->free_tx_ring_entries) {
1054 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1055 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1056 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1057 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1058 olympic_priv->free_tx_ring_entries--;
1059
1060 olympic_priv->tx_ring_free++;
1061 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1062 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1063 netif_wake_queue(dev);
1064 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1065 return 0;
1066 } else {
1067 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1068 return 1;
1069 }
1070
1071}
1072
1073
1074static int olympic_close(struct net_device *dev)
1075{
1076 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1077 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1078 unsigned long t,flags;
1079
1080 DECLARE_WAITQUEUE(wait,current) ;
1081
1082 netif_stop_queue(dev);
1083
1084 writel(olympic_priv->srb,olympic_mmio+LAPA);
1085 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1086
1087 writeb(SRB_CLOSE_ADAPTER,srb+0);
1088 writeb(0,srb+1);
1089 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1090
1091 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1092 set_current_state(TASK_INTERRUPTIBLE) ;
1093
1094 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1095 olympic_priv->srb_queued=1;
1096
1097 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1098 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1099
1100 while(olympic_priv->srb_queued) {
1101
Nishanth Aravamudan3173c892005-09-11 02:09:55 -07001102 t = schedule_timeout_interruptible(60*HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 if(signal_pending(current)) {
1105 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1106 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1107 olympic_priv->srb_queued=0;
1108 break;
1109 }
1110
1111 if (t == 0) {
1112 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1113 }
1114 olympic_priv->srb_queued=0;
1115 }
1116 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1117
1118 olympic_priv->rx_status_last_received++;
1119 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1120
1121 olympic_freemem(dev) ;
1122
1123 /* reset tx/rx fifo's and busmaster logic */
1124
1125 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1126 udelay(1);
1127 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1128
1129#if OLYMPIC_DEBUG
1130 {
1131 int i ;
1132 printk("srb(%p): ",srb);
1133 for(i=0;i<4;i++)
1134 printk("%x ",readb(srb+i));
1135 printk("\n");
1136 }
1137#endif
1138 free_irq(dev->irq,dev);
1139
1140 return 0;
1141
1142}
1143
1144static void olympic_set_rx_mode(struct net_device *dev)
1145{
1146 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1147 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1148 u8 options = 0;
1149 u8 __iomem *srb;
1150 struct dev_mc_list *dmi ;
1151 unsigned char dev_mc_address[4] ;
1152 int i ;
1153
1154 writel(olympic_priv->srb,olympic_mmio+LAPA);
1155 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1156 options = olympic_priv->olympic_copy_all_options;
1157
1158 if (dev->flags&IFF_PROMISC)
1159 options |= 0x61 ;
1160 else
1161 options &= ~0x61 ;
1162
1163 /* Only issue the srb if there is a change in options */
1164
1165 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1166
1167 /* Now to issue the srb command to alter the copy.all.options */
1168
1169 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1170 writeb(0,srb+1);
1171 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1172 writeb(0,srb+3);
1173 writeb(olympic_priv->olympic_receive_options,srb+4);
1174 writeb(options,srb+5);
1175
1176 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1177
1178 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1179
1180 olympic_priv->olympic_copy_all_options = options ;
1181
1182 return ;
1183 }
1184
1185 /* Set the functional addresses we need for multicast */
1186
1187 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1188
1189 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1190 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1191 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1192 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1193 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1194 }
1195
1196 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1197 writeb(0,srb+1);
1198 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1199 writeb(0,srb+3);
1200 writeb(0,srb+4);
1201 writeb(0,srb+5);
1202 writeb(dev_mc_address[0],srb+6);
1203 writeb(dev_mc_address[1],srb+7);
1204 writeb(dev_mc_address[2],srb+8);
1205 writeb(dev_mc_address[3],srb+9);
1206
1207 olympic_priv->srb_queued = 2 ;
1208 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1209
1210}
1211
1212static void olympic_srb_bh(struct net_device *dev)
1213{
1214 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1215 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1216 u8 __iomem *srb;
1217
1218 writel(olympic_priv->srb,olympic_mmio+LAPA);
1219 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1220
1221 switch (readb(srb)) {
1222
1223 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1224 * At some point we should do something if we get an error, such as
1225 * resetting the IFF_PROMISC flag in dev
1226 */
1227
1228 case SRB_MODIFY_RECEIVE_OPTIONS:
1229 switch (readb(srb+2)) {
1230 case 0x01:
1231 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1232 break ;
1233 case 0x04:
1234 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1235 break ;
1236 default:
1237 if (olympic_priv->olympic_message_level)
1238 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1239 break ;
1240 } /* switch srb[2] */
1241 break ;
1242
1243 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1244 */
1245
1246 case SRB_SET_GROUP_ADDRESS:
1247 switch (readb(srb+2)) {
1248 case 0x00:
1249 break ;
1250 case 0x01:
1251 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1252 break ;
1253 case 0x04:
1254 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1255 break ;
1256 case 0x3c:
1257 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1258 break ;
1259 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1260 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1261 break ;
1262 case 0x55:
1263 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1264 break ;
1265 default:
1266 break ;
1267 } /* switch srb[2] */
1268 break ;
1269
1270 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1271 */
1272
1273 case SRB_RESET_GROUP_ADDRESS:
1274 switch (readb(srb+2)) {
1275 case 0x00:
1276 break ;
1277 case 0x01:
1278 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1279 break ;
1280 case 0x04:
1281 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1282 break ;
1283 case 0x39: /* Must deal with this if individual multicast addresses used */
1284 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1285 break ;
1286 default:
1287 break ;
1288 } /* switch srb[2] */
1289 break ;
1290
1291
1292 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1293 */
1294
1295 case SRB_SET_FUNC_ADDRESS:
1296 switch (readb(srb+2)) {
1297 case 0x00:
1298 if (olympic_priv->olympic_message_level)
1299 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1300 break ;
1301 case 0x01:
1302 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1303 break ;
1304 case 0x04:
1305 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1306 break ;
1307 default:
1308 break ;
1309 } /* switch srb[2] */
1310 break ;
1311
1312 /* SRB_READ_LOG - Read and reset the adapter error counters
1313 */
1314
1315 case SRB_READ_LOG:
1316 switch (readb(srb+2)) {
1317 case 0x00:
1318 if (olympic_priv->olympic_message_level)
1319 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1320 break ;
1321 case 0x01:
1322 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1323 break ;
1324 case 0x04:
1325 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1326 break ;
1327
1328 } /* switch srb[2] */
1329 break ;
1330
1331 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1332
1333 case SRB_READ_SR_COUNTERS:
1334 switch (readb(srb+2)) {
1335 case 0x00:
1336 if (olympic_priv->olympic_message_level)
1337 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1338 break ;
1339 case 0x01:
1340 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1341 break ;
1342 case 0x04:
1343 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1344 break ;
1345 default:
1346 break ;
1347 } /* switch srb[2] */
1348 break ;
1349
1350 default:
1351 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1352 break ;
1353 } /* switch srb[0] */
1354
1355}
1356
1357static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1358{
1359 struct olympic_private *olympic_priv ;
1360 olympic_priv=(struct olympic_private *) dev->priv;
1361 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1362}
1363
1364static int olympic_set_mac_address (struct net_device *dev, void *addr)
1365{
1366 struct sockaddr *saddr = addr ;
1367 struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
1368
1369 if (netif_running(dev)) {
1370 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1371 return -EIO ;
1372 }
1373
1374 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1375
1376 if (olympic_priv->olympic_message_level) {
1377 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1378 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1379 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1380 olympic_priv->olympic_laa[5]);
1381 }
1382
1383 return 0 ;
1384}
1385
1386static void olympic_arb_cmd(struct net_device *dev)
1387{
1388 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1389 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1390 u8 __iomem *arb_block, *asb_block, *srb ;
1391 u8 header_len ;
1392 u16 frame_len, buffer_len ;
1393 struct sk_buff *mac_frame ;
1394 u8 __iomem *buf_ptr ;
1395 u8 __iomem *frame_data ;
1396 u16 buff_off ;
1397 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1398 u8 fdx_prot_error ;
1399 u16 next_ptr;
1400
1401 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1402 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1403 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1404
1405 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1406
1407 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1408 frame_len = swab16(readw(arb_block + 10)) ;
1409
1410 buff_off = swab16(readw(arb_block + 6)) ;
1411
1412 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1413
1414#if OLYMPIC_DEBUG
1415{
1416 int i;
1417 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1418
1419 for (i=0 ; i < 14 ; i++) {
1420 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1421 }
1422
1423 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1424}
1425#endif
1426 mac_frame = dev_alloc_skb(frame_len) ;
1427 if (!mac_frame) {
1428 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1429 goto drop_frame;
1430 }
1431
1432 /* Walk the buffer chain, creating the frame */
1433
1434 do {
1435 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1436 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1437 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1438 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1439 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
1440
Arnaldo Carvalho de Meloc1a4b862007-03-19 15:27:07 -07001441 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 if (olympic_priv->olympic_network_monitor) {
1444 struct trh_hdr *mac_hdr ;
1445 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
Arnaldo Carvalho de Meloc1a4b862007-03-19 15:27:07 -07001446 mac_hdr = tr_hdr(mac_frame);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
1448 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
1449 }
Arnaldo Carvalho de Meloc1a4b862007-03-19 15:27:07 -07001450 netif_rx(mac_frame);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 dev->last_rx = jiffies;
1452
1453drop_frame:
1454 /* Now tell the card we have dealt with the received frame */
1455
1456 /* Set LISR Bit 1 */
1457 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1458
1459 /* Is the ASB free ? */
1460
1461 if (readb(asb_block + 2) != 0xff) {
1462 olympic_priv->asb_queued = 1 ;
1463 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1464 return ;
1465 /* Drop out and wait for the bottom half to be run */
1466 }
1467
1468 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1469 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1470 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1471 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1472
1473 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1474
1475 olympic_priv->asb_queued = 2 ;
1476
1477 return ;
1478
1479 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1480 lan_status = swab16(readw(arb_block+6));
1481 fdx_prot_error = readb(arb_block+8) ;
1482
1483 /* Issue ARB Free */
1484 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1485
1486 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1487
1488 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1489 if (lan_status_diff & LSC_LWF)
1490 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1491 if (lan_status_diff & LSC_ARW)
1492 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1493 if (lan_status_diff & LSC_FPE)
1494 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1495 if (lan_status_diff & LSC_RR)
1496 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1497
1498 /* Adapter has been closed by the hardware */
1499
1500 /* reset tx/rx fifo's and busmaster logic */
1501
1502 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1503 udelay(1);
1504 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1505 netif_stop_queue(dev);
1506 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1507 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1508 } /* If serious error */
1509
1510 if (olympic_priv->olympic_message_level) {
1511 if (lan_status_diff & LSC_SIG_LOSS)
1512 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1513 if (lan_status_diff & LSC_HARD_ERR)
1514 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1515 if (lan_status_diff & LSC_SOFT_ERR)
1516 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1517 if (lan_status_diff & LSC_TRAN_BCN)
1518 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1519 if (lan_status_diff & LSC_SS)
1520 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1521 if (lan_status_diff & LSC_RING_REC)
1522 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1523 if (lan_status_diff & LSC_FDX_MODE)
1524 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1525 }
1526
1527 if (lan_status_diff & LSC_CO) {
1528
1529 if (olympic_priv->olympic_message_level)
1530 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1531
1532 /* Issue READ.LOG command */
1533
1534 writeb(SRB_READ_LOG, srb);
1535 writeb(0,srb+1);
1536 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1537 writeb(0,srb+3);
1538 writeb(0,srb+4);
1539 writeb(0,srb+5);
1540
1541 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1542
1543 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1544
1545 }
1546
1547 if (lan_status_diff & LSC_SR_CO) {
1548
1549 if (olympic_priv->olympic_message_level)
1550 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1551
1552 /* Issue a READ.SR.COUNTERS */
1553
1554 writeb(SRB_READ_SR_COUNTERS,srb);
1555 writeb(0,srb+1);
1556 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1557 writeb(0,srb+3);
1558
1559 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1560
1561 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1562
1563 }
1564
1565 olympic_priv->olympic_lan_status = lan_status ;
1566
1567 } /* Lan.change.status */
1568 else
1569 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1570}
1571
1572static void olympic_asb_bh(struct net_device *dev)
1573{
1574 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1575 u8 __iomem *arb_block, *asb_block ;
1576
1577 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1578 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1579
1580 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1581
1582 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1583 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1584 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1585 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1586
1587 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1588 olympic_priv->asb_queued = 2 ;
1589
1590 return ;
1591 }
1592
1593 if (olympic_priv->asb_queued == 2) {
1594 switch (readb(asb_block+2)) {
1595 case 0x01:
1596 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1597 break ;
1598 case 0x26:
1599 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1600 break ;
1601 case 0xFF:
1602 /* Valid response, everything should be ok again */
1603 break ;
1604 default:
1605 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1606 break ;
1607 }
1608 }
1609 olympic_priv->asb_queued = 0 ;
1610}
1611
1612static int olympic_change_mtu(struct net_device *dev, int mtu)
1613{
1614 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1615 u16 max_mtu ;
1616
1617 if (olympic_priv->olympic_ring_speed == 4)
1618 max_mtu = 4500 ;
1619 else
1620 max_mtu = 18000 ;
1621
1622 if (mtu > max_mtu)
1623 return -EINVAL ;
1624 if (mtu < 100)
1625 return -EINVAL ;
1626
1627 dev->mtu = mtu ;
1628 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1629
1630 return 0 ;
1631}
1632
1633static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1634{
1635 struct net_device *dev = (struct net_device *)data ;
1636 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1637 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1638 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1639 int size = 0 ;
1640 int len=0;
1641 off_t begin=0;
1642 off_t pos=0;
1643
1644 size = sprintf(buffer,
1645 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1646 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1647 dev->name);
1648
1649 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
1650 dev->name,
1651 dev->dev_addr[0],
1652 dev->dev_addr[1],
1653 dev->dev_addr[2],
1654 dev->dev_addr[3],
1655 dev->dev_addr[4],
1656 dev->dev_addr[5],
1657 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
1658 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
1659 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
1660 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
1661 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
1662 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5),
1663 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1664 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1665 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1666 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1667
1668 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1669
1670 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1671 dev->name) ;
1672
1673 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
1674 dev->name,
1675 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1676 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1677 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1678 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1679 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
1680 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
1681 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
1682 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
1683 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
1684 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5),
1685 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)),
1686 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1),
1687 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2),
1688 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3),
1689 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4),
1690 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5),
1691 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1692 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1693 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1694
1695 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1696 dev->name) ;
1697
1698 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1699 dev->name,
1700 readb(opt+offsetof(struct olympic_parameters_table, source_addr)),
1701 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1),
1702 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2),
1703 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3),
1704 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4),
1705 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5),
1706 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1707 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1708 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1709 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1710 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1711 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1712
1713 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1714 dev->name) ;
1715
1716 size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
1717 dev->name,
1718 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1719 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1720 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)),
1721 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1),
1722 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2),
1723 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3),
1724 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4),
1725 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5),
1726 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1727 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1728 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1729 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1730
1731 len=size;
1732 pos=begin+size;
1733 if (pos<offset) {
1734 len=0;
1735 begin=pos;
1736 }
1737 *start=buffer+(offset-begin); /* Start of wanted data */
1738 len-=(offset-begin); /* Start slop */
1739 if(len>length)
1740 len=length; /* Ending slop */
1741 return len;
1742}
1743
1744static void __devexit olympic_remove_one(struct pci_dev *pdev)
1745{
1746 struct net_device *dev = pci_get_drvdata(pdev) ;
1747 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1748
1749 if (olympic_priv->olympic_network_monitor) {
1750 char proc_name[20] ;
1751 strcpy(proc_name,"net/olympic_") ;
1752 strcat(proc_name,dev->name) ;
1753 remove_proc_entry(proc_name,NULL);
1754 }
1755 unregister_netdev(dev) ;
1756 iounmap(olympic_priv->olympic_mmio) ;
1757 iounmap(olympic_priv->olympic_lap) ;
1758 pci_release_regions(pdev) ;
1759 pci_set_drvdata(pdev,NULL) ;
1760 free_netdev(dev) ;
1761}
1762
1763static struct pci_driver olympic_driver = {
1764 .name = "olympic",
1765 .id_table = olympic_pci_tbl,
1766 .probe = olympic_probe,
1767 .remove = __devexit_p(olympic_remove_one),
1768};
1769
1770static int __init olympic_pci_init(void)
1771{
Henrik Kretzschmar83717cf2006-10-10 14:33:29 -07001772 return pci_register_driver(&olympic_driver) ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773}
1774
1775static void __exit olympic_pci_cleanup(void)
1776{
1777 pci_unregister_driver(&olympic_driver) ;
1778}
1779
1780
1781module_init(olympic_pci_init) ;
1782module_exit(olympic_pci_cleanup) ;
1783
1784MODULE_LICENSE("GPL");