blob: 3a7b21ff30a558b7bf1bd121eb0b428eef8c630c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
3
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
6
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
8
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25*/
26
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/init.h>
31#include <linux/capability.h>
32#include <linux/sched.h>
33#include <linux/interrupt.h>
34#include <linux/bitops.h>
35#include <linux/pci.h>
36#include <linux/module.h>
37#include <linux/atmdev.h>
38#include <linux/sonet.h>
39#include <linux/atm_suni.h>
40#include <linux/dma-mapping.h>
41#include <linux/delay.h>
42#include <asm/io.h>
43#include <asm/string.h>
44#include <asm/page.h>
45#include <asm/irq.h>
46#include <asm/dma.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49#include <asm/atomic.h>
50
51#ifdef CONFIG_ATM_FORE200E_SBA
52#include <asm/idprom.h>
53#include <asm/sbus.h>
54#include <asm/openprom.h>
55#include <asm/oplib.h>
56#include <asm/pgtable.h>
57#endif
58
59#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
60#define FORE200E_USE_TASKLET
61#endif
62
63#if 0 /* enable the debugging code of the buffer supply queues */
64#define FORE200E_BSQ_DEBUG
65#endif
66
67#if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
68#define FORE200E_52BYTE_AAL0_SDU
69#endif
70
71#include "fore200e.h"
72#include "suni.h"
73
74#define FORE200E_VERSION "0.3e"
75
76#define FORE200E "fore200e: "
77
78#if 0 /* override .config */
79#define CONFIG_ATM_FORE200E_DEBUG 1
80#endif
81#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
82#define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
83 printk(FORE200E format, ##args); } while (0)
84#else
85#define DPRINTK(level, format, args...) do {} while (0)
86#endif
87
88
89#define FORE200E_ALIGN(addr, alignment) \
90 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
91
92#define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
93
94#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
95
96#define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
97
98#if 1
99#define ASSERT(expr) if (!(expr)) { \
100 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
101 __FUNCTION__, __LINE__, #expr); \
102 panic(FORE200E "%s", __FUNCTION__); \
103 }
104#else
105#define ASSERT(expr) do {} while (0)
106#endif
107
108
109static const struct atmdev_ops fore200e_ops;
110static const struct fore200e_bus fore200e_bus[];
111
112static LIST_HEAD(fore200e_boards);
113
114
115MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
116MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
117MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
118
119
120static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
121 { BUFFER_S1_NBR, BUFFER_L1_NBR },
122 { BUFFER_S2_NBR, BUFFER_L2_NBR }
123};
124
125static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
126 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
127 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
128};
129
130
131#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
132static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
133#endif
134
135
136#if 0 /* currently unused */
137static int
138fore200e_fore2atm_aal(enum fore200e_aal aal)
139{
140 switch(aal) {
141 case FORE200E_AAL0: return ATM_AAL0;
142 case FORE200E_AAL34: return ATM_AAL34;
143 case FORE200E_AAL5: return ATM_AAL5;
144 }
145
146 return -EINVAL;
147}
148#endif
149
150
151static enum fore200e_aal
152fore200e_atm2fore_aal(int aal)
153{
154 switch(aal) {
155 case ATM_AAL0: return FORE200E_AAL0;
156 case ATM_AAL34: return FORE200E_AAL34;
157 case ATM_AAL1:
158 case ATM_AAL2:
159 case ATM_AAL5: return FORE200E_AAL5;
160 }
161
162 return -EINVAL;
163}
164
165
166static char*
167fore200e_irq_itoa(int irq)
168{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 static char str[8];
170 sprintf(str, "%d", irq);
171 return str;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
174
175static void*
Al Virodd0fc662005-10-07 07:46:04 +0100176fore200e_kmalloc(int size, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Randy Dunlap7b5b3f32005-10-04 22:38:44 -0700178 void *chunk = kzalloc(size, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Randy Dunlap7b5b3f32005-10-04 22:38:44 -0700180 if (!chunk)
181 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183 return chunk;
184}
185
186
187static void
188fore200e_kfree(void* chunk)
189{
190 kfree(chunk);
191}
192
193
194/* allocate and align a chunk of memory intended to hold the data behing exchanged
195 between the driver and the adapter (using streaming DVMA) */
196
197static int
198fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
199{
200 unsigned long offset = 0;
201
202 if (alignment <= sizeof(int))
203 alignment = 0;
204
205 chunk->alloc_size = size + alignment;
206 chunk->align_size = size;
207 chunk->direction = direction;
208
209 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
210 if (chunk->alloc_addr == NULL)
211 return -ENOMEM;
212
213 if (alignment > 0)
214 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
215
216 chunk->align_addr = chunk->alloc_addr + offset;
217
218 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
219
220 return 0;
221}
222
223
224/* free a chunk of memory */
225
226static void
227fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
228{
229 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
230
231 fore200e_kfree(chunk->alloc_addr);
232}
233
234
235static void
236fore200e_spin(int msecs)
237{
238 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
239 while (time_before(jiffies, timeout));
240}
241
242
243static int
244fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
245{
246 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
247 int ok;
248
249 mb();
250 do {
251 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
252 break;
253
254 } while (time_before(jiffies, timeout));
255
256#if 1
257 if (!ok) {
258 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
259 *addr, val);
260 }
261#endif
262
263 return ok;
264}
265
266
267static int
268fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
269{
270 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
271 int ok;
272
273 do {
274 if ((ok = (fore200e->bus->read(addr) == val)))
275 break;
276
277 } while (time_before(jiffies, timeout));
278
279#if 1
280 if (!ok) {
281 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
282 fore200e->bus->read(addr), val);
283 }
284#endif
285
286 return ok;
287}
288
289
290static void
291fore200e_free_rx_buf(struct fore200e* fore200e)
292{
293 int scheme, magn, nbr;
294 struct buffer* buffer;
295
296 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
297 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
298
299 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
300
301 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
302
303 struct chunk* data = &buffer[ nbr ].data;
304
305 if (data->alloc_addr != NULL)
306 fore200e_chunk_free(fore200e, data);
307 }
308 }
309 }
310 }
311}
312
313
314static void
315fore200e_uninit_bs_queue(struct fore200e* fore200e)
316{
317 int scheme, magn;
318
319 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
320 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
321
322 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
323 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
324
325 if (status->alloc_addr)
326 fore200e->bus->dma_chunk_free(fore200e, status);
327
328 if (rbd_block->alloc_addr)
329 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
330 }
331 }
332}
333
334
335static int
336fore200e_reset(struct fore200e* fore200e, int diag)
337{
338 int ok;
339
340 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
341
342 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
343
344 fore200e->bus->reset(fore200e);
345
346 if (diag) {
347 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
348 if (ok == 0) {
349
350 printk(FORE200E "device %s self-test failed\n", fore200e->name);
351 return -ENODEV;
352 }
353
354 printk(FORE200E "device %s self-test passed\n", fore200e->name);
355
356 fore200e->state = FORE200E_STATE_RESET;
357 }
358
359 return 0;
360}
361
362
363static void
364fore200e_shutdown(struct fore200e* fore200e)
365{
366 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
367 fore200e->name, fore200e->phys_base,
368 fore200e_irq_itoa(fore200e->irq));
369
370 if (fore200e->state > FORE200E_STATE_RESET) {
371 /* first, reset the board to prevent further interrupts or data transfers */
372 fore200e_reset(fore200e, 0);
373 }
374
375 /* then, release all allocated resources */
376 switch(fore200e->state) {
377
378 case FORE200E_STATE_COMPLETE:
Jesper Juhla2c1aa52005-06-02 13:04:07 -0700379 kfree(fore200e->stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381 case FORE200E_STATE_IRQ:
382 free_irq(fore200e->irq, fore200e->atm_dev);
383
384 case FORE200E_STATE_ALLOC_BUF:
385 fore200e_free_rx_buf(fore200e);
386
387 case FORE200E_STATE_INIT_BSQ:
388 fore200e_uninit_bs_queue(fore200e);
389
390 case FORE200E_STATE_INIT_RXQ:
391 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
392 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
393
394 case FORE200E_STATE_INIT_TXQ:
395 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
396 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
397
398 case FORE200E_STATE_INIT_CMDQ:
399 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
400
401 case FORE200E_STATE_INITIALIZE:
402 /* nothing to do for that state */
403
404 case FORE200E_STATE_START_FW:
405 /* nothing to do for that state */
406
407 case FORE200E_STATE_LOAD_FW:
408 /* nothing to do for that state */
409
410 case FORE200E_STATE_RESET:
411 /* nothing to do for that state */
412
413 case FORE200E_STATE_MAP:
414 fore200e->bus->unmap(fore200e);
415
416 case FORE200E_STATE_CONFIGURE:
417 /* nothing to do for that state */
418
419 case FORE200E_STATE_REGISTER:
420 /* XXX shouldn't we *start* by deregistering the device? */
421 atm_dev_deregister(fore200e->atm_dev);
422
423 case FORE200E_STATE_BLANK:
424 /* nothing to do for that state */
425 break;
426 }
427}
428
429
430#ifdef CONFIG_ATM_FORE200E_PCA
431
432static u32 fore200e_pca_read(volatile u32 __iomem *addr)
433{
434 /* on big-endian hosts, the board is configured to convert
435 the endianess of slave RAM accesses */
436 return le32_to_cpu(readl(addr));
437}
438
439
440static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
441{
442 /* on big-endian hosts, the board is configured to convert
443 the endianess of slave RAM accesses */
444 writel(cpu_to_le32(val), addr);
445}
446
447
448static u32
449fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
450{
451 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
452
453 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
454 virt_addr, size, direction, dma_addr);
455
456 return dma_addr;
457}
458
459
460static void
461fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
462{
463 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
464 dma_addr, size, direction);
465
466 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
467}
468
469
470static void
471fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
472{
473 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
474
475 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
476}
477
478static void
479fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
480{
481 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
482
483 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
484}
485
486
487/* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
488 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
489
490static int
491fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
492 int size, int nbr, int alignment)
493{
494 /* returned chunks are page-aligned */
495 chunk->alloc_size = size * nbr;
496 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
497 chunk->alloc_size,
498 &chunk->dma_addr);
499
500 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
501 return -ENOMEM;
502
503 chunk->align_addr = chunk->alloc_addr;
504
505 return 0;
506}
507
508
509/* free a DMA consistent chunk of memory */
510
511static void
512fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
513{
514 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
515 chunk->alloc_size,
516 chunk->alloc_addr,
517 chunk->dma_addr);
518}
519
520
521static int
522fore200e_pca_irq_check(struct fore200e* fore200e)
523{
524 /* this is a 1 bit register */
525 int irq_posted = readl(fore200e->regs.pca.psr);
526
527#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
528 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
529 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
530 }
531#endif
532
533 return irq_posted;
534}
535
536
537static void
538fore200e_pca_irq_ack(struct fore200e* fore200e)
539{
540 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
541}
542
543
544static void
545fore200e_pca_reset(struct fore200e* fore200e)
546{
547 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
548 fore200e_spin(10);
549 writel(0, fore200e->regs.pca.hcr);
550}
551
552
Sam Ravnborgc027f5f2006-03-03 17:50:37 -0800553static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554fore200e_pca_map(struct fore200e* fore200e)
555{
556 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
557
558 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
559
560 if (fore200e->virt_base == NULL) {
561 printk(FORE200E "can't map device %s\n", fore200e->name);
562 return -EFAULT;
563 }
564
565 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
566
567 /* gain access to the PCA specific registers */
568 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
569 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
570 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
571
572 fore200e->state = FORE200E_STATE_MAP;
573 return 0;
574}
575
576
577static void
578fore200e_pca_unmap(struct fore200e* fore200e)
579{
580 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
581
582 if (fore200e->virt_base != NULL)
583 iounmap(fore200e->virt_base);
584}
585
586
Sam Ravnborgc027f5f2006-03-03 17:50:37 -0800587static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588fore200e_pca_configure(struct fore200e* fore200e)
589{
590 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
591 u8 master_ctrl, latency;
592
593 DPRINTK(2, "device %s being configured\n", fore200e->name);
594
595 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
596 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
597 return -EIO;
598 }
599
600 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
601
602 master_ctrl = master_ctrl
603#if defined(__BIG_ENDIAN)
604 /* request the PCA board to convert the endianess of slave RAM accesses */
605 | PCA200E_CTRL_CONVERT_ENDIAN
606#endif
607#if 0
608 | PCA200E_CTRL_DIS_CACHE_RD
609 | PCA200E_CTRL_DIS_WRT_INVAL
610 | PCA200E_CTRL_ENA_CONT_REQ_MODE
611 | PCA200E_CTRL_2_CACHE_WRT_INVAL
612#endif
613 | PCA200E_CTRL_LARGE_PCI_BURSTS;
614
615 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
616
617 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
618 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
619 this may impact the performances of other PCI devices on the same bus, though */
620 latency = 192;
621 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
622
623 fore200e->state = FORE200E_STATE_CONFIGURE;
624 return 0;
625}
626
627
628static int __init
629fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
630{
631 struct host_cmdq* cmdq = &fore200e->host_cmdq;
632 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
633 struct prom_opcode opcode;
634 int ok;
635 u32 prom_dma;
636
637 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
638
639 opcode.opcode = OPCODE_GET_PROM;
640 opcode.pad = 0;
641
642 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
643
644 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
645
646 *entry->status = STATUS_PENDING;
647
648 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
649
650 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
651
652 *entry->status = STATUS_FREE;
653
654 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
655
656 if (ok == 0) {
657 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
658 return -EIO;
659 }
660
661#if defined(__BIG_ENDIAN)
662
663#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
664
665 /* MAC address is stored as little-endian */
666 swap_here(&prom->mac_addr[0]);
667 swap_here(&prom->mac_addr[4]);
668#endif
669
670 return 0;
671}
672
673
674static int
675fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
676{
677 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
678
679 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
680 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
681}
682
683#endif /* CONFIG_ATM_FORE200E_PCA */
684
685
686#ifdef CONFIG_ATM_FORE200E_SBA
687
688static u32
689fore200e_sba_read(volatile u32 __iomem *addr)
690{
691 return sbus_readl(addr);
692}
693
694
695static void
696fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
697{
698 sbus_writel(val, addr);
699}
700
701
702static u32
703fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
704{
705 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
706
707 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
708 virt_addr, size, direction, dma_addr);
709
710 return dma_addr;
711}
712
713
714static void
715fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
716{
717 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
718 dma_addr, size, direction);
719
720 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
721}
722
723
724static void
725fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
726{
727 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
728
729 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
730}
731
732static void
733fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
734{
735 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
736
737 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
738}
739
740
741/* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
742 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
743
744static int
745fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
746 int size, int nbr, int alignment)
747{
748 chunk->alloc_size = chunk->align_size = size * nbr;
749
750 /* returned chunks are page-aligned */
751 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
752 chunk->alloc_size,
753 &chunk->dma_addr);
754
755 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
756 return -ENOMEM;
757
758 chunk->align_addr = chunk->alloc_addr;
759
760 return 0;
761}
762
763
764/* free a DVMA consistent chunk of memory */
765
766static void
767fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
768{
769 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
770 chunk->alloc_size,
771 chunk->alloc_addr,
772 chunk->dma_addr);
773}
774
775
776static void
777fore200e_sba_irq_enable(struct fore200e* fore200e)
778{
779 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
780 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
781}
782
783
784static int
785fore200e_sba_irq_check(struct fore200e* fore200e)
786{
787 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
788}
789
790
791static void
792fore200e_sba_irq_ack(struct fore200e* fore200e)
793{
794 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
795 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
796}
797
798
799static void
800fore200e_sba_reset(struct fore200e* fore200e)
801{
802 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
803 fore200e_spin(10);
804 fore200e->bus->write(0, fore200e->regs.sba.hcr);
805}
806
807
808static int __init
809fore200e_sba_map(struct fore200e* fore200e)
810{
811 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
812 unsigned int bursts;
813
814 /* gain access to the SBA specific registers */
815 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
816 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
817 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
818 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
819
820 if (fore200e->virt_base == NULL) {
821 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
822 return -EFAULT;
823 }
824
825 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
826
827 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
828
829 /* get the supported DVMA burst sizes */
830 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
831
832 if (sbus_can_dma_64bit(sbus_dev))
833 sbus_set_sbus64(sbus_dev, bursts);
834
835 fore200e->state = FORE200E_STATE_MAP;
836 return 0;
837}
838
839
840static void
841fore200e_sba_unmap(struct fore200e* fore200e)
842{
843 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
844 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
845 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
846 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
847}
848
849
850static int __init
851fore200e_sba_configure(struct fore200e* fore200e)
852{
853 fore200e->state = FORE200E_STATE_CONFIGURE;
854 return 0;
855}
856
857
858static struct fore200e* __init
859fore200e_sba_detect(const struct fore200e_bus* bus, int index)
860{
861 struct fore200e* fore200e;
862 struct sbus_bus* sbus_bus;
863 struct sbus_dev* sbus_dev = NULL;
864
865 unsigned int count = 0;
866
867 for_each_sbus (sbus_bus) {
868 for_each_sbusdev (sbus_dev, sbus_bus) {
869 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
870 if (count >= index)
871 goto found;
872 count++;
873 }
874 }
875 }
876 return NULL;
877
878 found:
879 if (sbus_dev->num_registers != 4) {
880 printk(FORE200E "this %s device has %d instead of 4 registers\n",
881 bus->model_name, sbus_dev->num_registers);
882 return NULL;
883 }
884
885 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
886 if (fore200e == NULL)
887 return NULL;
888
889 fore200e->bus = bus;
890 fore200e->bus_dev = sbus_dev;
891 fore200e->irq = sbus_dev->irqs[ 0 ];
892
893 fore200e->phys_base = (unsigned long)sbus_dev;
894
895 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
896
897 return fore200e;
898}
899
900
901static int __init
902fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
903{
904 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
905 int len;
906
907 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
908 if (len < 0)
909 return -EBUSY;
910
911 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
912 if (len < 0)
913 return -EBUSY;
914
915 prom_getproperty(sbus_dev->prom_node, "serialnumber",
916 (char*)&prom->serial_number, sizeof(prom->serial_number));
917
918 prom_getproperty(sbus_dev->prom_node, "promversion",
919 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
920
921 return 0;
922}
923
924
925static int
926fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
927{
928 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
929
930 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
931}
932#endif /* CONFIG_ATM_FORE200E_SBA */
933
934
935static void
936fore200e_tx_irq(struct fore200e* fore200e)
937{
938 struct host_txq* txq = &fore200e->host_txq;
939 struct host_txq_entry* entry;
940 struct atm_vcc* vcc;
941 struct fore200e_vc_map* vc_map;
942
943 if (fore200e->host_txq.txing == 0)
944 return;
945
946 for (;;) {
947
948 entry = &txq->host_entry[ txq->tail ];
949
950 if ((*entry->status & STATUS_COMPLETE) == 0) {
951 break;
952 }
953
954 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
955 entry, txq->tail, entry->vc_map, entry->skb);
956
957 /* free copy of misaligned data */
Jesper Juhla2c1aa52005-06-02 13:04:07 -0700958 kfree(entry->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
960 /* remove DMA mapping */
961 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
962 DMA_TO_DEVICE);
963
964 vc_map = entry->vc_map;
965
966 /* vcc closed since the time the entry was submitted for tx? */
967 if ((vc_map->vcc == NULL) ||
968 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
969
970 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
971 fore200e->atm_dev->number);
972
973 dev_kfree_skb_any(entry->skb);
974 }
975 else {
976 ASSERT(vc_map->vcc);
977
978 /* vcc closed then immediately re-opened? */
979 if (vc_map->incarn != entry->incarn) {
980
981 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
982 if the same vcc is immediately re-opened, those pending PDUs must
983 not be popped after the completion of their emission, as they refer
984 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
985 would be decremented by the size of the (unrelated) skb, possibly
986 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
987 we thus bind the tx entry to the current incarnation of the vcc
988 when the entry is submitted for tx. When the tx later completes,
989 if the incarnation number of the tx entry does not match the one
990 of the vcc, then this implies that the vcc has been closed then re-opened.
991 we thus just drop the skb here. */
992
993 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
994 fore200e->atm_dev->number);
995
996 dev_kfree_skb_any(entry->skb);
997 }
998 else {
999 vcc = vc_map->vcc;
1000 ASSERT(vcc);
1001
1002 /* notify tx completion */
1003 if (vcc->pop) {
1004 vcc->pop(vcc, entry->skb);
1005 }
1006 else {
1007 dev_kfree_skb_any(entry->skb);
1008 }
1009#if 1
1010 /* race fixed by the above incarnation mechanism, but... */
1011 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1012 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1013 }
1014#endif
1015 /* check error condition */
1016 if (*entry->status & STATUS_ERROR)
1017 atomic_inc(&vcc->stats->tx_err);
1018 else
1019 atomic_inc(&vcc->stats->tx);
1020 }
1021 }
1022
1023 *entry->status = STATUS_FREE;
1024
1025 fore200e->host_txq.txing--;
1026
1027 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1028 }
1029}
1030
1031
1032#ifdef FORE200E_BSQ_DEBUG
1033int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1034{
1035 struct buffer* buffer;
1036 int count = 0;
1037
1038 buffer = bsq->freebuf;
1039 while (buffer) {
1040
1041 if (buffer->supplied) {
1042 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1043 where, scheme, magn, buffer->index);
1044 }
1045
1046 if (buffer->magn != magn) {
1047 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1048 where, scheme, magn, buffer->index, buffer->magn);
1049 }
1050
1051 if (buffer->scheme != scheme) {
1052 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1053 where, scheme, magn, buffer->index, buffer->scheme);
1054 }
1055
1056 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1057 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1058 where, scheme, magn, buffer->index);
1059 }
1060
1061 count++;
1062 buffer = buffer->next;
1063 }
1064
1065 if (count != bsq->freebuf_count) {
1066 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1067 where, scheme, magn, count, bsq->freebuf_count);
1068 }
1069 return 0;
1070}
1071#endif
1072
1073
1074static void
1075fore200e_supply(struct fore200e* fore200e)
1076{
1077 int scheme, magn, i;
1078
1079 struct host_bsq* bsq;
1080 struct host_bsq_entry* entry;
1081 struct buffer* buffer;
1082
1083 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1084 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1085
1086 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1087
1088#ifdef FORE200E_BSQ_DEBUG
1089 bsq_audit(1, bsq, scheme, magn);
1090#endif
1091 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1092
1093 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1094 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1095
1096 entry = &bsq->host_entry[ bsq->head ];
1097
1098 for (i = 0; i < RBD_BLK_SIZE; i++) {
1099
1100 /* take the first buffer in the free buffer list */
1101 buffer = bsq->freebuf;
1102 if (!buffer) {
1103 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1104 scheme, magn, bsq->freebuf_count);
1105 return;
1106 }
1107 bsq->freebuf = buffer->next;
1108
1109#ifdef FORE200E_BSQ_DEBUG
1110 if (buffer->supplied)
1111 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1112 scheme, magn, buffer->index);
1113 buffer->supplied = 1;
1114#endif
1115 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1116 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1117 }
1118
1119 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1120
1121 /* decrease accordingly the number of free rx buffers */
1122 bsq->freebuf_count -= RBD_BLK_SIZE;
1123
1124 *entry->status = STATUS_PENDING;
1125 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1126 }
1127 }
1128 }
1129}
1130
1131
1132static int
1133fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1134{
1135 struct sk_buff* skb;
1136 struct buffer* buffer;
1137 struct fore200e_vcc* fore200e_vcc;
1138 int i, pdu_len = 0;
1139#ifdef FORE200E_52BYTE_AAL0_SDU
1140 u32 cell_header = 0;
1141#endif
1142
1143 ASSERT(vcc);
1144
1145 fore200e_vcc = FORE200E_VCC(vcc);
1146 ASSERT(fore200e_vcc);
1147
1148#ifdef FORE200E_52BYTE_AAL0_SDU
1149 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1150
1151 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1152 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1153 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1154 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1155 rpd->atm_header.clp;
1156 pdu_len = 4;
1157 }
1158#endif
1159
1160 /* compute total PDU length */
1161 for (i = 0; i < rpd->nseg; i++)
1162 pdu_len += rpd->rsd[ i ].length;
1163
1164 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1165 if (skb == NULL) {
1166 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1167
1168 atomic_inc(&vcc->stats->rx_drop);
1169 return -ENOMEM;
1170 }
1171
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001172 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174#ifdef FORE200E_52BYTE_AAL0_SDU
1175 if (cell_header) {
1176 *((u32*)skb_put(skb, 4)) = cell_header;
1177 }
1178#endif
1179
1180 /* reassemble segments */
1181 for (i = 0; i < rpd->nseg; i++) {
1182
1183 /* rebuild rx buffer address from rsd handle */
1184 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1185
1186 /* Make device DMA transfer visible to CPU. */
1187 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1188
1189 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1190
1191 /* Now let the device get at it again. */
1192 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1193 }
1194
1195 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1196
1197 if (pdu_len < fore200e_vcc->rx_min_pdu)
1198 fore200e_vcc->rx_min_pdu = pdu_len;
1199 if (pdu_len > fore200e_vcc->rx_max_pdu)
1200 fore200e_vcc->rx_max_pdu = pdu_len;
1201 fore200e_vcc->rx_pdu++;
1202
1203 /* push PDU */
1204 if (atm_charge(vcc, skb->truesize) == 0) {
1205
1206 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1207 vcc->itf, vcc->vpi, vcc->vci);
1208
1209 dev_kfree_skb_any(skb);
1210
1211 atomic_inc(&vcc->stats->rx_drop);
1212 return -ENOMEM;
1213 }
1214
1215 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1216
1217 vcc->push(vcc, skb);
1218 atomic_inc(&vcc->stats->rx);
1219
1220 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1221
1222 return 0;
1223}
1224
1225
1226static void
1227fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1228{
1229 struct host_bsq* bsq;
1230 struct buffer* buffer;
1231 int i;
1232
1233 for (i = 0; i < rpd->nseg; i++) {
1234
1235 /* rebuild rx buffer address from rsd handle */
1236 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1237
1238 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1239
1240#ifdef FORE200E_BSQ_DEBUG
1241 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1242
1243 if (buffer->supplied == 0)
1244 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1245 buffer->scheme, buffer->magn, buffer->index);
1246 buffer->supplied = 0;
1247#endif
1248
1249 /* re-insert the buffer into the free buffer list */
1250 buffer->next = bsq->freebuf;
1251 bsq->freebuf = buffer;
1252
1253 /* then increment the number of free rx buffers */
1254 bsq->freebuf_count++;
1255 }
1256}
1257
1258
1259static void
1260fore200e_rx_irq(struct fore200e* fore200e)
1261{
1262 struct host_rxq* rxq = &fore200e->host_rxq;
1263 struct host_rxq_entry* entry;
1264 struct atm_vcc* vcc;
1265 struct fore200e_vc_map* vc_map;
1266
1267 for (;;) {
1268
1269 entry = &rxq->host_entry[ rxq->head ];
1270
1271 /* no more received PDUs */
1272 if ((*entry->status & STATUS_COMPLETE) == 0)
1273 break;
1274
1275 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1276
1277 if ((vc_map->vcc == NULL) ||
1278 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1279
1280 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1281 fore200e->atm_dev->number,
1282 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1283 }
1284 else {
1285 vcc = vc_map->vcc;
1286 ASSERT(vcc);
1287
1288 if ((*entry->status & STATUS_ERROR) == 0) {
1289
1290 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1291 }
1292 else {
1293 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1294 fore200e->atm_dev->number,
1295 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1296 atomic_inc(&vcc->stats->rx_err);
1297 }
1298 }
1299
1300 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1301
1302 fore200e_collect_rpd(fore200e, entry->rpd);
1303
1304 /* rewrite the rpd address to ack the received PDU */
1305 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1306 *entry->status = STATUS_FREE;
1307
1308 fore200e_supply(fore200e);
1309 }
1310}
1311
1312
1313#ifndef FORE200E_USE_TASKLET
1314static void
1315fore200e_irq(struct fore200e* fore200e)
1316{
1317 unsigned long flags;
1318
1319 spin_lock_irqsave(&fore200e->q_lock, flags);
1320 fore200e_rx_irq(fore200e);
1321 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1322
1323 spin_lock_irqsave(&fore200e->q_lock, flags);
1324 fore200e_tx_irq(fore200e);
1325 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1326}
1327#endif
1328
1329
1330static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001331fore200e_interrupt(int irq, void* dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332{
1333 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1334
1335 if (fore200e->bus->irq_check(fore200e) == 0) {
1336
1337 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1338 return IRQ_NONE;
1339 }
1340 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1341
1342#ifdef FORE200E_USE_TASKLET
1343 tasklet_schedule(&fore200e->tx_tasklet);
1344 tasklet_schedule(&fore200e->rx_tasklet);
1345#else
1346 fore200e_irq(fore200e);
1347#endif
1348
1349 fore200e->bus->irq_ack(fore200e);
1350 return IRQ_HANDLED;
1351}
1352
1353
1354#ifdef FORE200E_USE_TASKLET
1355static void
1356fore200e_tx_tasklet(unsigned long data)
1357{
1358 struct fore200e* fore200e = (struct fore200e*) data;
1359 unsigned long flags;
1360
1361 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1362
1363 spin_lock_irqsave(&fore200e->q_lock, flags);
1364 fore200e_tx_irq(fore200e);
1365 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1366}
1367
1368
1369static void
1370fore200e_rx_tasklet(unsigned long data)
1371{
1372 struct fore200e* fore200e = (struct fore200e*) data;
1373 unsigned long flags;
1374
1375 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1376
1377 spin_lock_irqsave(&fore200e->q_lock, flags);
1378 fore200e_rx_irq((struct fore200e*) data);
1379 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1380}
1381#endif
1382
1383
1384static int
1385fore200e_select_scheme(struct atm_vcc* vcc)
1386{
1387 /* fairly balance the VCs over (identical) buffer schemes */
1388 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1389
1390 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1391 vcc->itf, vcc->vpi, vcc->vci, scheme);
1392
1393 return scheme;
1394}
1395
1396
1397static int
1398fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1399{
1400 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1401 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1402 struct activate_opcode activ_opcode;
1403 struct deactivate_opcode deactiv_opcode;
1404 struct vpvc vpvc;
1405 int ok;
1406 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1407
1408 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1409
1410 if (activate) {
1411 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1412
1413 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1414 activ_opcode.aal = aal;
1415 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1416 activ_opcode.pad = 0;
1417 }
1418 else {
1419 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1420 deactiv_opcode.pad = 0;
1421 }
1422
1423 vpvc.vci = vcc->vci;
1424 vpvc.vpi = vcc->vpi;
1425
1426 *entry->status = STATUS_PENDING;
1427
1428 if (activate) {
1429
1430#ifdef FORE200E_52BYTE_AAL0_SDU
1431 mtu = 48;
1432#endif
1433 /* the MTU is not used by the cp, except in the case of AAL0 */
1434 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1435 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1436 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1437 }
1438 else {
1439 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1440 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1441 }
1442
1443 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1444
1445 *entry->status = STATUS_FREE;
1446
1447 if (ok == 0) {
1448 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1449 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1450 return -EIO;
1451 }
1452
1453 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1454 activate ? "open" : "clos");
1455
1456 return 0;
1457}
1458
1459
1460#define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1461
1462static void
1463fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1464{
1465 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1466
1467 /* compute the data cells to idle cells ratio from the tx PCR */
1468 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1469 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1470 }
1471 else {
1472 /* disable rate control */
1473 rate->data_cells = rate->idle_cells = 0;
1474 }
1475}
1476
1477
1478static int
1479fore200e_open(struct atm_vcc *vcc)
1480{
1481 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1482 struct fore200e_vcc* fore200e_vcc;
1483 struct fore200e_vc_map* vc_map;
1484 unsigned long flags;
1485 int vci = vcc->vci;
1486 short vpi = vcc->vpi;
1487
1488 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1489 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1490
1491 spin_lock_irqsave(&fore200e->q_lock, flags);
1492
1493 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1494 if (vc_map->vcc) {
1495
1496 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1497
1498 printk(FORE200E "VC %d.%d.%d already in use\n",
1499 fore200e->atm_dev->number, vpi, vci);
1500
1501 return -EINVAL;
1502 }
1503
1504 vc_map->vcc = vcc;
1505
1506 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1507
1508 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1509 if (fore200e_vcc == NULL) {
1510 vc_map->vcc = NULL;
1511 return -ENOMEM;
1512 }
1513
1514 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1515 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1516 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1517 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1518 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1519 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1520 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1521
1522 /* pseudo-CBR bandwidth requested? */
1523 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1524
1525 down(&fore200e->rate_sf);
1526 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1527 up(&fore200e->rate_sf);
1528
1529 fore200e_kfree(fore200e_vcc);
1530 vc_map->vcc = NULL;
1531 return -EAGAIN;
1532 }
1533
1534 /* reserve bandwidth */
1535 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1536 up(&fore200e->rate_sf);
1537 }
1538
1539 vcc->itf = vcc->dev->number;
1540
1541 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1542 set_bit(ATM_VF_ADDR, &vcc->flags);
1543
1544 vcc->dev_data = fore200e_vcc;
1545
1546 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1547
1548 vc_map->vcc = NULL;
1549
1550 clear_bit(ATM_VF_ADDR, &vcc->flags);
1551 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1552
1553 vcc->dev_data = NULL;
1554
1555 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1556
1557 fore200e_kfree(fore200e_vcc);
1558 return -EINVAL;
1559 }
1560
1561 /* compute rate control parameters */
1562 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1563
1564 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1565 set_bit(ATM_VF_HASQOS, &vcc->flags);
1566
1567 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1568 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1569 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1570 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1571 }
1572
1573 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1574 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1575 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1576
1577 /* new incarnation of the vcc */
1578 vc_map->incarn = ++fore200e->incarn_count;
1579
1580 /* VC unusable before this flag is set */
1581 set_bit(ATM_VF_READY, &vcc->flags);
1582
1583 return 0;
1584}
1585
1586
1587static void
1588fore200e_close(struct atm_vcc* vcc)
1589{
1590 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1591 struct fore200e_vcc* fore200e_vcc;
1592 struct fore200e_vc_map* vc_map;
1593 unsigned long flags;
1594
1595 ASSERT(vcc);
1596 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1597 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1598
1599 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1600
1601 clear_bit(ATM_VF_READY, &vcc->flags);
1602
1603 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1604
1605 spin_lock_irqsave(&fore200e->q_lock, flags);
1606
1607 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1608
1609 /* the vc is no longer considered as "in use" by fore200e_open() */
1610 vc_map->vcc = NULL;
1611
1612 vcc->itf = vcc->vci = vcc->vpi = 0;
1613
1614 fore200e_vcc = FORE200E_VCC(vcc);
1615 vcc->dev_data = NULL;
1616
1617 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1618
1619 /* release reserved bandwidth, if any */
1620 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1621
1622 down(&fore200e->rate_sf);
1623 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1624 up(&fore200e->rate_sf);
1625
1626 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1627 }
1628
1629 clear_bit(ATM_VF_ADDR, &vcc->flags);
1630 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1631
1632 ASSERT(fore200e_vcc);
1633 fore200e_kfree(fore200e_vcc);
1634}
1635
1636
1637static int
1638fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1639{
1640 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1641 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1642 struct fore200e_vc_map* vc_map;
1643 struct host_txq* txq = &fore200e->host_txq;
1644 struct host_txq_entry* entry;
1645 struct tpd* tpd;
1646 struct tpd_haddr tpd_haddr;
1647 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1648 int tx_copy = 0;
1649 int tx_len = skb->len;
1650 u32* cell_header = NULL;
1651 unsigned char* skb_data;
1652 int skb_len;
1653 unsigned char* data;
1654 unsigned long flags;
1655
1656 ASSERT(vcc);
1657 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1658 ASSERT(fore200e);
1659 ASSERT(fore200e_vcc);
1660
1661 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1662 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1663 dev_kfree_skb_any(skb);
1664 return -EINVAL;
1665 }
1666
1667#ifdef FORE200E_52BYTE_AAL0_SDU
1668 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1669 cell_header = (u32*) skb->data;
1670 skb_data = skb->data + 4; /* skip 4-byte cell header */
1671 skb_len = tx_len = skb->len - 4;
1672
1673 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1674 }
1675 else
1676#endif
1677 {
1678 skb_data = skb->data;
1679 skb_len = skb->len;
1680 }
1681
1682 if (((unsigned long)skb_data) & 0x3) {
1683
1684 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1685 tx_copy = 1;
1686 tx_len = skb_len;
1687 }
1688
1689 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1690
1691 /* this simply NUKES the PCA board */
1692 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1693 tx_copy = 1;
1694 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1695 }
1696
1697 if (tx_copy) {
1698 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1699 if (data == NULL) {
1700 if (vcc->pop) {
1701 vcc->pop(vcc, skb);
1702 }
1703 else {
1704 dev_kfree_skb_any(skb);
1705 }
1706 return -ENOMEM;
1707 }
1708
1709 memcpy(data, skb_data, skb_len);
1710 if (skb_len < tx_len)
1711 memset(data + skb_len, 0x00, tx_len - skb_len);
1712 }
1713 else {
1714 data = skb_data;
1715 }
1716
1717 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1718 ASSERT(vc_map->vcc == vcc);
1719
1720 retry_here:
1721
1722 spin_lock_irqsave(&fore200e->q_lock, flags);
1723
1724 entry = &txq->host_entry[ txq->head ];
1725
1726 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1727
1728 /* try to free completed tx queue entries */
1729 fore200e_tx_irq(fore200e);
1730
1731 if (*entry->status != STATUS_FREE) {
1732
1733 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1734
1735 /* retry once again? */
1736 if (--retry > 0) {
1737 udelay(50);
1738 goto retry_here;
1739 }
1740
1741 atomic_inc(&vcc->stats->tx_err);
1742
1743 fore200e->tx_sat++;
1744 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1745 fore200e->name, fore200e->cp_queues->heartbeat);
1746 if (vcc->pop) {
1747 vcc->pop(vcc, skb);
1748 }
1749 else {
1750 dev_kfree_skb_any(skb);
1751 }
1752
1753 if (tx_copy)
1754 kfree(data);
1755
1756 return -ENOBUFS;
1757 }
1758 }
1759
1760 entry->incarn = vc_map->incarn;
1761 entry->vc_map = vc_map;
1762 entry->skb = skb;
1763 entry->data = tx_copy ? data : NULL;
1764
1765 tpd = entry->tpd;
1766 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1767 tpd->tsd[ 0 ].length = tx_len;
1768
1769 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1770 txq->txing++;
1771
1772 /* The dma_map call above implies a dma_sync so the device can use it,
1773 * thus no explicit dma_sync call is necessary here.
1774 */
1775
1776 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1777 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1778 tpd->tsd[0].length, skb_len);
1779
1780 if (skb_len < fore200e_vcc->tx_min_pdu)
1781 fore200e_vcc->tx_min_pdu = skb_len;
1782 if (skb_len > fore200e_vcc->tx_max_pdu)
1783 fore200e_vcc->tx_max_pdu = skb_len;
1784 fore200e_vcc->tx_pdu++;
1785
1786 /* set tx rate control information */
1787 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1788 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1789
1790 if (cell_header) {
1791 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1792 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1793 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1794 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1795 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1796 }
1797 else {
1798 /* set the ATM header, common to all cells conveying the PDU */
1799 tpd->atm_header.clp = 0;
1800 tpd->atm_header.plt = 0;
1801 tpd->atm_header.vci = vcc->vci;
1802 tpd->atm_header.vpi = vcc->vpi;
1803 tpd->atm_header.gfc = 0;
1804 }
1805
1806 tpd->spec.length = tx_len;
1807 tpd->spec.nseg = 1;
1808 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1809 tpd->spec.intr = 1;
1810
1811 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1812 tpd_haddr.pad = 0;
1813 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1814
1815 *entry->status = STATUS_PENDING;
1816 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1817
1818 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1819
1820 return 0;
1821}
1822
1823
1824static int
1825fore200e_getstats(struct fore200e* fore200e)
1826{
1827 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1828 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1829 struct stats_opcode opcode;
1830 int ok;
1831 u32 stats_dma_addr;
1832
1833 if (fore200e->stats == NULL) {
1834 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1835 if (fore200e->stats == NULL)
1836 return -ENOMEM;
1837 }
1838
1839 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1840 sizeof(struct stats), DMA_FROM_DEVICE);
1841
1842 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1843
1844 opcode.opcode = OPCODE_GET_STATS;
1845 opcode.pad = 0;
1846
1847 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1848
1849 *entry->status = STATUS_PENDING;
1850
1851 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1852
1853 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1854
1855 *entry->status = STATUS_FREE;
1856
1857 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1858
1859 if (ok == 0) {
1860 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1861 return -EIO;
1862 }
1863
1864 return 0;
1865}
1866
1867
1868static int
1869fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1870{
1871 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1872
1873 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1874 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1875
1876 return -EINVAL;
1877}
1878
1879
1880static int
1881fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1882{
1883 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1884
1885 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1886 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1887
1888 return -EINVAL;
1889}
1890
1891
1892#if 0 /* currently unused */
1893static int
1894fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1895{
1896 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1897 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1898 struct oc3_opcode opcode;
1899 int ok;
1900 u32 oc3_regs_dma_addr;
1901
1902 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1903
1904 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1905
1906 opcode.opcode = OPCODE_GET_OC3;
1907 opcode.reg = 0;
1908 opcode.value = 0;
1909 opcode.mask = 0;
1910
1911 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1912
1913 *entry->status = STATUS_PENDING;
1914
1915 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1916
1917 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1918
1919 *entry->status = STATUS_FREE;
1920
1921 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1922
1923 if (ok == 0) {
1924 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1925 return -EIO;
1926 }
1927
1928 return 0;
1929}
1930#endif
1931
1932
1933static int
1934fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1935{
1936 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1937 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1938 struct oc3_opcode opcode;
1939 int ok;
1940
1941 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1942
1943 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1944
1945 opcode.opcode = OPCODE_SET_OC3;
1946 opcode.reg = reg;
1947 opcode.value = value;
1948 opcode.mask = mask;
1949
1950 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1951
1952 *entry->status = STATUS_PENDING;
1953
1954 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1955
1956 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1957
1958 *entry->status = STATUS_FREE;
1959
1960 if (ok == 0) {
1961 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1962 return -EIO;
1963 }
1964
1965 return 0;
1966}
1967
1968
1969static int
1970fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1971{
1972 u32 mct_value, mct_mask;
1973 int error;
1974
1975 if (!capable(CAP_NET_ADMIN))
1976 return -EPERM;
1977
1978 switch (loop_mode) {
1979
1980 case ATM_LM_NONE:
1981 mct_value = 0;
1982 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1983 break;
1984
1985 case ATM_LM_LOC_PHY:
1986 mct_value = mct_mask = SUNI_MCT_DLE;
1987 break;
1988
1989 case ATM_LM_RMT_PHY:
1990 mct_value = mct_mask = SUNI_MCT_LLE;
1991 break;
1992
1993 default:
1994 return -EINVAL;
1995 }
1996
1997 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1998 if (error == 0)
1999 fore200e->loop_mode = loop_mode;
2000
2001 return error;
2002}
2003
2004
2005static inline unsigned int
2006fore200e_swap(unsigned int in)
2007{
2008#if defined(__LITTLE_ENDIAN)
2009 return swab32(in);
2010#else
2011 return in;
2012#endif
2013}
2014
2015
2016static int
2017fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2018{
2019 struct sonet_stats tmp;
2020
2021 if (fore200e_getstats(fore200e) < 0)
2022 return -EIO;
2023
2024 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2025 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2026 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2027 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2028 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2029 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2030 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2031 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2032 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2033 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2034 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2035 fore200e_swap(fore200e->stats->aal34.cells_received) +
2036 fore200e_swap(fore200e->stats->aal5.cells_received);
2037
2038 if (arg)
2039 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2040
2041 return 0;
2042}
2043
2044
2045static int
2046fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2047{
2048 struct fore200e* fore200e = FORE200E_DEV(dev);
2049
2050 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2051
2052 switch (cmd) {
2053
2054 case SONET_GETSTAT:
2055 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2056
2057 case SONET_GETDIAG:
2058 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2059
2060 case ATM_SETLOOP:
2061 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2062
2063 case ATM_GETLOOP:
2064 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2065
2066 case ATM_QUERYLOOP:
2067 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2068 }
2069
2070 return -ENOSYS; /* not implemented */
2071}
2072
2073
2074static int
2075fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2076{
2077 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2078 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2079
2080 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2081 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2082 return -EINVAL;
2083 }
2084
2085 DPRINTK(2, "change_qos %d.%d.%d, "
2086 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2087 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2088 "available_cell_rate = %u",
2089 vcc->itf, vcc->vpi, vcc->vci,
2090 fore200e_traffic_class[ qos->txtp.traffic_class ],
2091 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2092 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2093 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2094 flags, fore200e->available_cell_rate);
2095
2096 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2097
2098 down(&fore200e->rate_sf);
2099 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2100 up(&fore200e->rate_sf);
2101 return -EAGAIN;
2102 }
2103
2104 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2105 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2106
2107 up(&fore200e->rate_sf);
2108
2109 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2110
2111 /* update rate control parameters */
2112 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2113
2114 set_bit(ATM_VF_HASQOS, &vcc->flags);
2115
2116 return 0;
2117 }
2118
2119 return -EINVAL;
2120}
2121
2122
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002123static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124fore200e_irq_request(struct fore200e* fore200e)
2125{
Thomas Gleixnerdace1452006-07-01 19:29:38 -07002126 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
2128 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2129 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2130 return -EBUSY;
2131 }
2132
2133 printk(FORE200E "IRQ %s reserved for device %s\n",
2134 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2135
2136#ifdef FORE200E_USE_TASKLET
2137 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2138 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2139#endif
2140
2141 fore200e->state = FORE200E_STATE_IRQ;
2142 return 0;
2143}
2144
2145
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002146static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147fore200e_get_esi(struct fore200e* fore200e)
2148{
2149 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2150 int ok, i;
2151
2152 if (!prom)
2153 return -ENOMEM;
2154
2155 ok = fore200e->bus->prom_read(fore200e, prom);
2156 if (ok < 0) {
2157 fore200e_kfree(prom);
2158 return -EBUSY;
2159 }
2160
2161 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2162 fore200e->name,
2163 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2164 prom->serial_number & 0xFFFF,
2165 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2166 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2167
2168 for (i = 0; i < ESI_LEN; i++) {
2169 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2170 }
2171
2172 fore200e_kfree(prom);
2173
2174 return 0;
2175}
2176
2177
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002178static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179fore200e_alloc_rx_buf(struct fore200e* fore200e)
2180{
2181 int scheme, magn, nbr, size, i;
2182
2183 struct host_bsq* bsq;
2184 struct buffer* buffer;
2185
2186 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2187 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2188
2189 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2190
2191 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2192 size = fore200e_rx_buf_size[ scheme ][ magn ];
2193
2194 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2195
2196 /* allocate the array of receive buffers */
2197 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2198
2199 if (buffer == NULL)
2200 return -ENOMEM;
2201
2202 bsq->freebuf = NULL;
2203
2204 for (i = 0; i < nbr; i++) {
2205
2206 buffer[ i ].scheme = scheme;
2207 buffer[ i ].magn = magn;
2208#ifdef FORE200E_BSQ_DEBUG
2209 buffer[ i ].index = i;
2210 buffer[ i ].supplied = 0;
2211#endif
2212
2213 /* allocate the receive buffer body */
2214 if (fore200e_chunk_alloc(fore200e,
2215 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2216 DMA_FROM_DEVICE) < 0) {
2217
2218 while (i > 0)
2219 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2220 fore200e_kfree(buffer);
2221
2222 return -ENOMEM;
2223 }
2224
2225 /* insert the buffer into the free buffer list */
2226 buffer[ i ].next = bsq->freebuf;
2227 bsq->freebuf = &buffer[ i ];
2228 }
2229 /* all the buffers are free, initially */
2230 bsq->freebuf_count = nbr;
2231
2232#ifdef FORE200E_BSQ_DEBUG
2233 bsq_audit(3, bsq, scheme, magn);
2234#endif
2235 }
2236 }
2237
2238 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2239 return 0;
2240}
2241
2242
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002243static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244fore200e_init_bs_queue(struct fore200e* fore200e)
2245{
2246 int scheme, magn, i;
2247
2248 struct host_bsq* bsq;
2249 struct cp_bsq_entry __iomem * cp_entry;
2250
2251 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2252 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2253
2254 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2255
2256 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2257
2258 /* allocate and align the array of status words */
2259 if (fore200e->bus->dma_chunk_alloc(fore200e,
2260 &bsq->status,
2261 sizeof(enum status),
2262 QUEUE_SIZE_BS,
2263 fore200e->bus->status_alignment) < 0) {
2264 return -ENOMEM;
2265 }
2266
2267 /* allocate and align the array of receive buffer descriptors */
2268 if (fore200e->bus->dma_chunk_alloc(fore200e,
2269 &bsq->rbd_block,
2270 sizeof(struct rbd_block),
2271 QUEUE_SIZE_BS,
2272 fore200e->bus->descr_alignment) < 0) {
2273
2274 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2275 return -ENOMEM;
2276 }
2277
2278 /* get the base address of the cp resident buffer supply queue entries */
2279 cp_entry = fore200e->virt_base +
2280 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2281
2282 /* fill the host resident and cp resident buffer supply queue entries */
2283 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2284
2285 bsq->host_entry[ i ].status =
2286 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2287 bsq->host_entry[ i ].rbd_block =
2288 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2289 bsq->host_entry[ i ].rbd_block_dma =
2290 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2291 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2292
2293 *bsq->host_entry[ i ].status = STATUS_FREE;
2294
2295 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2296 &cp_entry[ i ].status_haddr);
2297 }
2298 }
2299 }
2300
2301 fore200e->state = FORE200E_STATE_INIT_BSQ;
2302 return 0;
2303}
2304
2305
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002306static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307fore200e_init_rx_queue(struct fore200e* fore200e)
2308{
2309 struct host_rxq* rxq = &fore200e->host_rxq;
2310 struct cp_rxq_entry __iomem * cp_entry;
2311 int i;
2312
2313 DPRINTK(2, "receive queue is being initialized\n");
2314
2315 /* allocate and align the array of status words */
2316 if (fore200e->bus->dma_chunk_alloc(fore200e,
2317 &rxq->status,
2318 sizeof(enum status),
2319 QUEUE_SIZE_RX,
2320 fore200e->bus->status_alignment) < 0) {
2321 return -ENOMEM;
2322 }
2323
2324 /* allocate and align the array of receive PDU descriptors */
2325 if (fore200e->bus->dma_chunk_alloc(fore200e,
2326 &rxq->rpd,
2327 sizeof(struct rpd),
2328 QUEUE_SIZE_RX,
2329 fore200e->bus->descr_alignment) < 0) {
2330
2331 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2332 return -ENOMEM;
2333 }
2334
2335 /* get the base address of the cp resident rx queue entries */
2336 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2337
2338 /* fill the host resident and cp resident rx entries */
2339 for (i=0; i < QUEUE_SIZE_RX; i++) {
2340
2341 rxq->host_entry[ i ].status =
2342 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2343 rxq->host_entry[ i ].rpd =
2344 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2345 rxq->host_entry[ i ].rpd_dma =
2346 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2347 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2348
2349 *rxq->host_entry[ i ].status = STATUS_FREE;
2350
2351 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2352 &cp_entry[ i ].status_haddr);
2353
2354 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2355 &cp_entry[ i ].rpd_haddr);
2356 }
2357
2358 /* set the head entry of the queue */
2359 rxq->head = 0;
2360
2361 fore200e->state = FORE200E_STATE_INIT_RXQ;
2362 return 0;
2363}
2364
2365
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002366static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367fore200e_init_tx_queue(struct fore200e* fore200e)
2368{
2369 struct host_txq* txq = &fore200e->host_txq;
2370 struct cp_txq_entry __iomem * cp_entry;
2371 int i;
2372
2373 DPRINTK(2, "transmit queue is being initialized\n");
2374
2375 /* allocate and align the array of status words */
2376 if (fore200e->bus->dma_chunk_alloc(fore200e,
2377 &txq->status,
2378 sizeof(enum status),
2379 QUEUE_SIZE_TX,
2380 fore200e->bus->status_alignment) < 0) {
2381 return -ENOMEM;
2382 }
2383
2384 /* allocate and align the array of transmit PDU descriptors */
2385 if (fore200e->bus->dma_chunk_alloc(fore200e,
2386 &txq->tpd,
2387 sizeof(struct tpd),
2388 QUEUE_SIZE_TX,
2389 fore200e->bus->descr_alignment) < 0) {
2390
2391 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2392 return -ENOMEM;
2393 }
2394
2395 /* get the base address of the cp resident tx queue entries */
2396 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2397
2398 /* fill the host resident and cp resident tx entries */
2399 for (i=0; i < QUEUE_SIZE_TX; i++) {
2400
2401 txq->host_entry[ i ].status =
2402 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2403 txq->host_entry[ i ].tpd =
2404 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2405 txq->host_entry[ i ].tpd_dma =
2406 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2407 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2408
2409 *txq->host_entry[ i ].status = STATUS_FREE;
2410
2411 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2412 &cp_entry[ i ].status_haddr);
2413
2414 /* although there is a one-to-one mapping of tx queue entries and tpds,
2415 we do not write here the DMA (physical) base address of each tpd into
2416 the related cp resident entry, because the cp relies on this write
2417 operation to detect that a new pdu has been submitted for tx */
2418 }
2419
2420 /* set the head and tail entries of the queue */
2421 txq->head = 0;
2422 txq->tail = 0;
2423
2424 fore200e->state = FORE200E_STATE_INIT_TXQ;
2425 return 0;
2426}
2427
2428
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002429static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430fore200e_init_cmd_queue(struct fore200e* fore200e)
2431{
2432 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2433 struct cp_cmdq_entry __iomem * cp_entry;
2434 int i;
2435
2436 DPRINTK(2, "command queue is being initialized\n");
2437
2438 /* allocate and align the array of status words */
2439 if (fore200e->bus->dma_chunk_alloc(fore200e,
2440 &cmdq->status,
2441 sizeof(enum status),
2442 QUEUE_SIZE_CMD,
2443 fore200e->bus->status_alignment) < 0) {
2444 return -ENOMEM;
2445 }
2446
2447 /* get the base address of the cp resident cmd queue entries */
2448 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2449
2450 /* fill the host resident and cp resident cmd entries */
2451 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2452
2453 cmdq->host_entry[ i ].status =
2454 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2455 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2456
2457 *cmdq->host_entry[ i ].status = STATUS_FREE;
2458
2459 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2460 &cp_entry[ i ].status_haddr);
2461 }
2462
2463 /* set the head entry of the queue */
2464 cmdq->head = 0;
2465
2466 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2467 return 0;
2468}
2469
2470
2471static void __init
2472fore200e_param_bs_queue(struct fore200e* fore200e,
2473 enum buffer_scheme scheme, enum buffer_magn magn,
2474 int queue_length, int pool_size, int supply_blksize)
2475{
2476 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2477
2478 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2479 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2480 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2481 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2482}
2483
2484
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002485static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486fore200e_initialize(struct fore200e* fore200e)
2487{
2488 struct cp_queues __iomem * cpq;
2489 int ok, scheme, magn;
2490
2491 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2492
2493 init_MUTEX(&fore200e->rate_sf);
2494 spin_lock_init(&fore200e->q_lock);
2495
2496 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2497
2498 /* enable cp to host interrupts */
2499 fore200e->bus->write(1, &cpq->imask);
2500
2501 if (fore200e->bus->irq_enable)
2502 fore200e->bus->irq_enable(fore200e);
2503
2504 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2505
2506 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2507 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2508 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2509
2510 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2511 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2512
2513 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2514 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2515 fore200e_param_bs_queue(fore200e, scheme, magn,
2516 QUEUE_SIZE_BS,
2517 fore200e_rx_buf_nbr[ scheme ][ magn ],
2518 RBD_BLK_SIZE);
2519
2520 /* issue the initialize command */
2521 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2522 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2523
2524 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2525 if (ok == 0) {
2526 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2527 return -ENODEV;
2528 }
2529
2530 printk(FORE200E "device %s initialized\n", fore200e->name);
2531
2532 fore200e->state = FORE200E_STATE_INITIALIZE;
2533 return 0;
2534}
2535
2536
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002537static void __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538fore200e_monitor_putc(struct fore200e* fore200e, char c)
2539{
2540 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2541
2542#if 0
2543 printk("%c", c);
2544#endif
2545 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2546}
2547
2548
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002549static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550fore200e_monitor_getc(struct fore200e* fore200e)
2551{
2552 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2553 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2554 int c;
2555
2556 while (time_before(jiffies, timeout)) {
2557
2558 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2559
2560 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2561
2562 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2563#if 0
2564 printk("%c", c & 0xFF);
2565#endif
2566 return c & 0xFF;
2567 }
2568 }
2569
2570 return -1;
2571}
2572
2573
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002574static void __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2576{
2577 while (*str) {
2578
2579 /* the i960 monitor doesn't accept any new character if it has something to say */
2580 while (fore200e_monitor_getc(fore200e) >= 0);
2581
2582 fore200e_monitor_putc(fore200e, *str++);
2583 }
2584
2585 while (fore200e_monitor_getc(fore200e) >= 0);
2586}
2587
2588
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002589static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590fore200e_start_fw(struct fore200e* fore200e)
2591{
2592 int ok;
2593 char cmd[ 48 ];
2594 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2595
2596 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2597
2598#if defined(__sparc_v9__)
2599 /* reported to be required by SBA cards on some sparc64 hosts */
2600 fore200e_spin(100);
2601#endif
2602
2603 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2604
2605 fore200e_monitor_puts(fore200e, cmd);
2606
2607 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2608 if (ok == 0) {
2609 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2610 return -ENODEV;
2611 }
2612
2613 printk(FORE200E "device %s firmware started\n", fore200e->name);
2614
2615 fore200e->state = FORE200E_STATE_START_FW;
2616 return 0;
2617}
2618
2619
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002620static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621fore200e_load_fw(struct fore200e* fore200e)
2622{
2623 u32* fw_data = (u32*) fore200e->bus->fw_data;
2624 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2625
2626 struct fw_header* fw_header = (struct fw_header*) fw_data;
2627
2628 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2629
2630 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2631 fore200e->name, load_addr, fw_size);
2632
2633 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2634 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2635 return -ENODEV;
2636 }
2637
2638 for (; fw_size--; fw_data++, load_addr++)
2639 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2640
2641 fore200e->state = FORE200E_STATE_LOAD_FW;
2642 return 0;
2643}
2644
2645
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002646static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647fore200e_register(struct fore200e* fore200e)
2648{
2649 struct atm_dev* atm_dev;
2650
2651 DPRINTK(2, "device %s being registered\n", fore200e->name);
2652
2653 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2654 NULL);
2655 if (atm_dev == NULL) {
2656 printk(FORE200E "unable to register device %s\n", fore200e->name);
2657 return -ENODEV;
2658 }
2659
2660 atm_dev->dev_data = fore200e;
2661 fore200e->atm_dev = atm_dev;
2662
2663 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2664 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2665
2666 fore200e->available_cell_rate = ATM_OC3_PCR;
2667
2668 fore200e->state = FORE200E_STATE_REGISTER;
2669 return 0;
2670}
2671
2672
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002673static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674fore200e_init(struct fore200e* fore200e)
2675{
2676 if (fore200e_register(fore200e) < 0)
2677 return -ENODEV;
2678
2679 if (fore200e->bus->configure(fore200e) < 0)
2680 return -ENODEV;
2681
2682 if (fore200e->bus->map(fore200e) < 0)
2683 return -ENODEV;
2684
2685 if (fore200e_reset(fore200e, 1) < 0)
2686 return -ENODEV;
2687
2688 if (fore200e_load_fw(fore200e) < 0)
2689 return -ENODEV;
2690
2691 if (fore200e_start_fw(fore200e) < 0)
2692 return -ENODEV;
2693
2694 if (fore200e_initialize(fore200e) < 0)
2695 return -ENODEV;
2696
2697 if (fore200e_init_cmd_queue(fore200e) < 0)
2698 return -ENOMEM;
2699
2700 if (fore200e_init_tx_queue(fore200e) < 0)
2701 return -ENOMEM;
2702
2703 if (fore200e_init_rx_queue(fore200e) < 0)
2704 return -ENOMEM;
2705
2706 if (fore200e_init_bs_queue(fore200e) < 0)
2707 return -ENOMEM;
2708
2709 if (fore200e_alloc_rx_buf(fore200e) < 0)
2710 return -ENOMEM;
2711
2712 if (fore200e_get_esi(fore200e) < 0)
2713 return -EIO;
2714
2715 if (fore200e_irq_request(fore200e) < 0)
2716 return -EBUSY;
2717
2718 fore200e_supply(fore200e);
Sam Ravnborgc027f5f2006-03-03 17:50:37 -08002719
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 /* all done, board initialization is now complete */
2721 fore200e->state = FORE200E_STATE_COMPLETE;
2722 return 0;
2723}
2724
2725
2726static int __devinit
2727fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2728{
2729 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2730 struct fore200e* fore200e;
2731 int err = 0;
2732 static int index = 0;
2733
2734 if (pci_enable_device(pci_dev)) {
2735 err = -EINVAL;
2736 goto out;
2737 }
2738
2739 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
2740 if (fore200e == NULL) {
2741 err = -ENOMEM;
2742 goto out_disable;
2743 }
2744
2745 fore200e->bus = bus;
2746 fore200e->bus_dev = pci_dev;
2747 fore200e->irq = pci_dev->irq;
2748 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2749
2750 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2751
2752 pci_set_master(pci_dev);
2753
2754 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2755 fore200e->bus->model_name,
2756 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2757
2758 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2759
2760 err = fore200e_init(fore200e);
2761 if (err < 0) {
2762 fore200e_shutdown(fore200e);
2763 goto out_free;
2764 }
2765
2766 ++index;
2767 pci_set_drvdata(pci_dev, fore200e);
2768
2769out:
2770 return err;
2771
2772out_free:
2773 kfree(fore200e);
2774out_disable:
2775 pci_disable_device(pci_dev);
2776 goto out;
2777}
2778
2779
2780static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2781{
2782 struct fore200e *fore200e;
2783
2784 fore200e = pci_get_drvdata(pci_dev);
2785
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 fore200e_shutdown(fore200e);
2787 kfree(fore200e);
2788 pci_disable_device(pci_dev);
2789}
2790
2791
2792#ifdef CONFIG_ATM_FORE200E_PCA
2793static struct pci_device_id fore200e_pca_tbl[] = {
2794 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2795 0, 0, (unsigned long) &fore200e_bus[0] },
2796 { 0, }
2797};
2798
2799MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2800
2801static struct pci_driver fore200e_pca_driver = {
2802 .name = "fore_200e",
2803 .probe = fore200e_pca_detect,
2804 .remove = __devexit_p(fore200e_pca_remove_one),
2805 .id_table = fore200e_pca_tbl,
2806};
2807#endif
2808
2809
2810static int __init
2811fore200e_module_init(void)
2812{
2813 const struct fore200e_bus* bus;
2814 struct fore200e* fore200e;
2815 int index;
2816
2817 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2818
2819 /* for each configured bus interface */
2820 for (bus = fore200e_bus; bus->model_name; bus++) {
2821
2822 /* detect all boards present on that bus */
2823 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2824
2825 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2826 fore200e->bus->model_name,
2827 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2828
2829 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2830
2831 if (fore200e_init(fore200e) < 0) {
2832
2833 fore200e_shutdown(fore200e);
2834 break;
2835 }
2836
2837 list_add(&fore200e->entry, &fore200e_boards);
2838 }
2839 }
2840
2841#ifdef CONFIG_ATM_FORE200E_PCA
chas williams18900822005-04-24 18:58:15 -07002842 if (!pci_register_driver(&fore200e_pca_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 return 0;
2844#endif
2845
2846 if (!list_empty(&fore200e_boards))
2847 return 0;
2848
2849 return -ENODEV;
2850}
2851
2852
2853static void __exit
2854fore200e_module_cleanup(void)
2855{
2856 struct fore200e *fore200e, *next;
2857
2858#ifdef CONFIG_ATM_FORE200E_PCA
2859 pci_unregister_driver(&fore200e_pca_driver);
2860#endif
2861
2862 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2863 fore200e_shutdown(fore200e);
2864 kfree(fore200e);
2865 }
2866 DPRINTK(1, "module being removed\n");
2867}
2868
2869
2870static int
2871fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2872{
2873 struct fore200e* fore200e = FORE200E_DEV(dev);
2874 struct fore200e_vcc* fore200e_vcc;
2875 struct atm_vcc* vcc;
2876 int i, len, left = *pos;
2877 unsigned long flags;
2878
2879 if (!left--) {
2880
2881 if (fore200e_getstats(fore200e) < 0)
2882 return -EIO;
2883
2884 len = sprintf(page,"\n"
2885 " device:\n"
2886 " internal name:\t\t%s\n", fore200e->name);
2887
2888 /* print bus-specific information */
2889 if (fore200e->bus->proc_read)
2890 len += fore200e->bus->proc_read(fore200e, page + len);
2891
2892 len += sprintf(page + len,
2893 " interrupt line:\t\t%s\n"
2894 " physical base address:\t0x%p\n"
2895 " virtual base address:\t0x%p\n"
2896 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2897 " board serial number:\t\t%d\n\n",
2898 fore200e_irq_itoa(fore200e->irq),
2899 (void*)fore200e->phys_base,
2900 fore200e->virt_base,
2901 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2902 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2903 fore200e->esi[4] * 256 + fore200e->esi[5]);
2904
2905 return len;
2906 }
2907
2908 if (!left--)
2909 return sprintf(page,
2910 " free small bufs, scheme 1:\t%d\n"
2911 " free large bufs, scheme 1:\t%d\n"
2912 " free small bufs, scheme 2:\t%d\n"
2913 " free large bufs, scheme 2:\t%d\n",
2914 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2915 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2916 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2917 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2918
2919 if (!left--) {
2920 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2921
2922 len = sprintf(page,"\n\n"
2923 " cell processor:\n"
2924 " heartbeat state:\t\t");
2925
2926 if (hb >> 16 != 0xDEAD)
2927 len += sprintf(page + len, "0x%08x\n", hb);
2928 else
2929 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2930
2931 return len;
2932 }
2933
2934 if (!left--) {
2935 static const char* media_name[] = {
2936 "unshielded twisted pair",
2937 "multimode optical fiber ST",
2938 "multimode optical fiber SC",
2939 "single-mode optical fiber ST",
2940 "single-mode optical fiber SC",
2941 "unknown"
2942 };
2943
2944 static const char* oc3_mode[] = {
2945 "normal operation",
2946 "diagnostic loopback",
2947 "line loopback",
2948 "unknown"
2949 };
2950
2951 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2952 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2953 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2954 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2955 u32 oc3_index;
2956
2957 if ((media_index < 0) || (media_index > 4))
2958 media_index = 5;
2959
2960 switch (fore200e->loop_mode) {
2961 case ATM_LM_NONE: oc3_index = 0;
2962 break;
2963 case ATM_LM_LOC_PHY: oc3_index = 1;
2964 break;
2965 case ATM_LM_RMT_PHY: oc3_index = 2;
2966 break;
2967 default: oc3_index = 3;
2968 }
2969
2970 return sprintf(page,
2971 " firmware release:\t\t%d.%d.%d\n"
2972 " monitor release:\t\t%d.%d\n"
2973 " media type:\t\t\t%s\n"
2974 " OC-3 revision:\t\t0x%x\n"
2975 " OC-3 mode:\t\t\t%s",
2976 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2977 mon960_release >> 16, mon960_release << 16 >> 16,
2978 media_name[ media_index ],
2979 oc3_revision,
2980 oc3_mode[ oc3_index ]);
2981 }
2982
2983 if (!left--) {
2984 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2985
2986 return sprintf(page,
2987 "\n\n"
2988 " monitor:\n"
2989 " version number:\t\t%d\n"
2990 " boot status word:\t\t0x%08x\n",
2991 fore200e->bus->read(&cp_monitor->mon_version),
2992 fore200e->bus->read(&cp_monitor->bstat));
2993 }
2994
2995 if (!left--)
2996 return sprintf(page,
2997 "\n"
2998 " device statistics:\n"
2999 " 4b5b:\n"
3000 " crc_header_errors:\t\t%10u\n"
3001 " framing_errors:\t\t%10u\n",
3002 fore200e_swap(fore200e->stats->phy.crc_header_errors),
3003 fore200e_swap(fore200e->stats->phy.framing_errors));
3004
3005 if (!left--)
3006 return sprintf(page, "\n"
3007 " OC-3:\n"
3008 " section_bip8_errors:\t%10u\n"
3009 " path_bip8_errors:\t\t%10u\n"
3010 " line_bip24_errors:\t\t%10u\n"
3011 " line_febe_errors:\t\t%10u\n"
3012 " path_febe_errors:\t\t%10u\n"
3013 " corr_hcs_errors:\t\t%10u\n"
3014 " ucorr_hcs_errors:\t\t%10u\n",
3015 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
3016 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
3017 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
3018 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
3019 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
3020 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
3021 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
3022
3023 if (!left--)
3024 return sprintf(page,"\n"
3025 " ATM:\t\t\t\t cells\n"
3026 " TX:\t\t\t%10u\n"
3027 " RX:\t\t\t%10u\n"
3028 " vpi out of range:\t\t%10u\n"
3029 " vpi no conn:\t\t%10u\n"
3030 " vci out of range:\t\t%10u\n"
3031 " vci no conn:\t\t%10u\n",
3032 fore200e_swap(fore200e->stats->atm.cells_transmitted),
3033 fore200e_swap(fore200e->stats->atm.cells_received),
3034 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
3035 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
3036 fore200e_swap(fore200e->stats->atm.vci_bad_range),
3037 fore200e_swap(fore200e->stats->atm.vci_no_conn));
3038
3039 if (!left--)
3040 return sprintf(page,"\n"
3041 " AAL0:\t\t\t cells\n"
3042 " TX:\t\t\t%10u\n"
3043 " RX:\t\t\t%10u\n"
3044 " dropped:\t\t\t%10u\n",
3045 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3046 fore200e_swap(fore200e->stats->aal0.cells_received),
3047 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3048
3049 if (!left--)
3050 return sprintf(page,"\n"
3051 " AAL3/4:\n"
3052 " SAR sublayer:\t\t cells\n"
3053 " TX:\t\t\t%10u\n"
3054 " RX:\t\t\t%10u\n"
3055 " dropped:\t\t\t%10u\n"
3056 " CRC errors:\t\t%10u\n"
3057 " protocol errors:\t\t%10u\n\n"
3058 " CS sublayer:\t\t PDUs\n"
3059 " TX:\t\t\t%10u\n"
3060 " RX:\t\t\t%10u\n"
3061 " dropped:\t\t\t%10u\n"
3062 " protocol errors:\t\t%10u\n",
3063 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3064 fore200e_swap(fore200e->stats->aal34.cells_received),
3065 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3066 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3067 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3068 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3069 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3070 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3071 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3072
3073 if (!left--)
3074 return sprintf(page,"\n"
3075 " AAL5:\n"
3076 " SAR sublayer:\t\t cells\n"
3077 " TX:\t\t\t%10u\n"
3078 " RX:\t\t\t%10u\n"
3079 " dropped:\t\t\t%10u\n"
3080 " congestions:\t\t%10u\n\n"
3081 " CS sublayer:\t\t PDUs\n"
3082 " TX:\t\t\t%10u\n"
3083 " RX:\t\t\t%10u\n"
3084 " dropped:\t\t\t%10u\n"
3085 " CRC errors:\t\t%10u\n"
3086 " protocol errors:\t\t%10u\n",
3087 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3088 fore200e_swap(fore200e->stats->aal5.cells_received),
3089 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3090 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3091 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3092 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3093 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3094 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3095 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3096
3097 if (!left--)
3098 return sprintf(page,"\n"
3099 " AUX:\t\t allocation failures\n"
3100 " small b1:\t\t\t%10u\n"
3101 " large b1:\t\t\t%10u\n"
3102 " small b2:\t\t\t%10u\n"
3103 " large b2:\t\t\t%10u\n"
3104 " RX PDUs:\t\t\t%10u\n"
3105 " TX PDUs:\t\t\t%10lu\n",
3106 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3107 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3108 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3109 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3110 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3111 fore200e->tx_sat);
3112
3113 if (!left--)
3114 return sprintf(page,"\n"
3115 " receive carrier:\t\t\t%s\n",
3116 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3117
3118 if (!left--) {
3119 return sprintf(page,"\n"
3120 " VCCs:\n address VPI VCI AAL "
3121 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3122 }
3123
3124 for (i = 0; i < NBR_CONNECT; i++) {
3125
3126 vcc = fore200e->vc_map[i].vcc;
3127
3128 if (vcc == NULL)
3129 continue;
3130
3131 spin_lock_irqsave(&fore200e->q_lock, flags);
3132
3133 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3134
3135 fore200e_vcc = FORE200E_VCC(vcc);
3136 ASSERT(fore200e_vcc);
3137
3138 len = sprintf(page,
3139 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3140 (u32)(unsigned long)vcc,
3141 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3142 fore200e_vcc->tx_pdu,
3143 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3144 fore200e_vcc->tx_max_pdu,
3145 fore200e_vcc->rx_pdu,
3146 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3147 fore200e_vcc->rx_max_pdu);
3148
3149 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3150 return len;
3151 }
3152
3153 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3154 }
3155
3156 return 0;
3157}
3158
3159module_init(fore200e_module_init);
3160module_exit(fore200e_module_cleanup);
3161
3162
3163static const struct atmdev_ops fore200e_ops =
3164{
3165 .open = fore200e_open,
3166 .close = fore200e_close,
3167 .ioctl = fore200e_ioctl,
3168 .getsockopt = fore200e_getsockopt,
3169 .setsockopt = fore200e_setsockopt,
3170 .send = fore200e_send,
3171 .change_qos = fore200e_change_qos,
3172 .proc_read = fore200e_proc_read,
3173 .owner = THIS_MODULE
3174};
3175
3176
3177#ifdef CONFIG_ATM_FORE200E_PCA
3178extern const unsigned char _fore200e_pca_fw_data[];
3179extern const unsigned int _fore200e_pca_fw_size;
3180#endif
3181#ifdef CONFIG_ATM_FORE200E_SBA
3182extern const unsigned char _fore200e_sba_fw_data[];
3183extern const unsigned int _fore200e_sba_fw_size;
3184#endif
3185
3186static const struct fore200e_bus fore200e_bus[] = {
3187#ifdef CONFIG_ATM_FORE200E_PCA
3188 { "PCA-200E", "pca200e", 32, 4, 32,
3189 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3190 fore200e_pca_read,
3191 fore200e_pca_write,
3192 fore200e_pca_dma_map,
3193 fore200e_pca_dma_unmap,
3194 fore200e_pca_dma_sync_for_cpu,
3195 fore200e_pca_dma_sync_for_device,
3196 fore200e_pca_dma_chunk_alloc,
3197 fore200e_pca_dma_chunk_free,
3198 NULL,
3199 fore200e_pca_configure,
3200 fore200e_pca_map,
3201 fore200e_pca_reset,
3202 fore200e_pca_prom_read,
3203 fore200e_pca_unmap,
3204 NULL,
3205 fore200e_pca_irq_check,
3206 fore200e_pca_irq_ack,
3207 fore200e_pca_proc_read,
3208 },
3209#endif
3210#ifdef CONFIG_ATM_FORE200E_SBA
3211 { "SBA-200E", "sba200e", 32, 64, 32,
3212 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3213 fore200e_sba_read,
3214 fore200e_sba_write,
3215 fore200e_sba_dma_map,
3216 fore200e_sba_dma_unmap,
3217 fore200e_sba_dma_sync_for_cpu,
3218 fore200e_sba_dma_sync_for_device,
3219 fore200e_sba_dma_chunk_alloc,
3220 fore200e_sba_dma_chunk_free,
3221 fore200e_sba_detect,
3222 fore200e_sba_configure,
3223 fore200e_sba_map,
3224 fore200e_sba_reset,
3225 fore200e_sba_prom_read,
3226 fore200e_sba_unmap,
3227 fore200e_sba_irq_enable,
3228 fore200e_sba_irq_check,
3229 fore200e_sba_irq_ack,
3230 fore200e_sba_proc_read,
3231 },
3232#endif
3233 {}
3234};
3235
3236#ifdef MODULE_LICENSE
3237MODULE_LICENSE("GPL");
3238#endif