blob: 205f09672492b4f4ec42236a264779717cc6297d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
4 *
5 * S5SCC/DMA support by Janko Koleznik S52HI
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23#include <linux/module.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/if_arp.h>
27#include <linux/in.h>
28#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/ioport.h>
31#include <linux/kernel.h>
32#include <linux/mm.h>
33#include <linux/netdevice.h>
34#include <linux/rtnetlink.h>
35#include <linux/sockios.h>
36#include <linux/workqueue.h>
37#include <asm/atomic.h>
38#include <asm/bitops.h>
39#include <asm/dma.h>
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43#include <net/ax25.h>
44#include "z8530.h"
45
46
47/* Number of buffers per channel */
48
49#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
52
53
54/* Cards supported */
55
56#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
64
65#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
66
67#define TMR_0_HZ 25600 /* Frequency of timer 0 */
68
69#define TYPE_PI 0
70#define TYPE_PI2 1
71#define TYPE_TWIN 2
72#define TYPE_S5 3
73#define NUM_TYPES 4
74
75#define MAX_NUM_DEVS 32
76
77
78/* SCC chips supported */
79
80#define Z8530 0
81#define Z85C30 1
82#define Z85230 2
83
84#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
85
86
87/* I/O registers */
88
89/* 8530 registers relative to card base */
90#define SCCB_CMD 0x00
91#define SCCB_DATA 0x01
92#define SCCA_CMD 0x02
93#define SCCA_DATA 0x03
94
95/* 8253/8254 registers relative to card base */
96#define TMR_CNT0 0x00
97#define TMR_CNT1 0x01
98#define TMR_CNT2 0x02
99#define TMR_CTRL 0x03
100
101/* Additional PI/PI2 registers relative to card base */
102#define PI_DREQ_MASK 0x04
103
104/* Additional PackeTwin registers relative to card base */
105#define TWIN_INT_REG 0x08
106#define TWIN_CLR_TMR1 0x09
107#define TWIN_CLR_TMR2 0x0a
108#define TWIN_SPARE_1 0x0b
109#define TWIN_DMA_CFG 0x08
110#define TWIN_SERIAL_CFG 0x09
111#define TWIN_DMA_CLR_FF 0x0a
112#define TWIN_SPARE_2 0x0b
113
114
115/* PackeTwin I/O register values */
116
117/* INT_REG */
118#define TWIN_SCC_MSK 0x01
119#define TWIN_TMR1_MSK 0x02
120#define TWIN_TMR2_MSK 0x04
121#define TWIN_INT_MSK 0x07
122
123/* SERIAL_CFG */
124#define TWIN_DTRA_ON 0x01
125#define TWIN_DTRB_ON 0x02
126#define TWIN_EXTCLKA 0x04
127#define TWIN_EXTCLKB 0x08
128#define TWIN_LOOPA_ON 0x10
129#define TWIN_LOOPB_ON 0x20
130#define TWIN_EI 0x80
131
132/* DMA_CFG */
133#define TWIN_DMA_HDX_T1 0x08
134#define TWIN_DMA_HDX_R1 0x0a
135#define TWIN_DMA_HDX_T3 0x14
136#define TWIN_DMA_HDX_R3 0x16
137#define TWIN_DMA_FDX_T3R1 0x1b
138#define TWIN_DMA_FDX_T1R3 0x1d
139
140
141/* Status values */
142
143#define IDLE 0
144#define TX_HEAD 1
145#define TX_DATA 2
146#define TX_PAUSE 3
147#define TX_TAIL 4
148#define RTS_OFF 5
149#define WAIT 6
150#define DCD_ON 7
151#define RX_ON 8
152#define DCD_OFF 9
153
154
155/* Ioctls */
156
157#define SIOCGSCCPARAM SIOCDEVPRIVATE
158#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
159
160
161/* Data types */
162
163struct scc_param {
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
179};
180
181struct scc_hardware {
182 char *name;
183 int io_region;
184 int io_delta;
185 int io_size;
186 int num_devs;
187 int scc_offset;
188 int tmr_offset;
189 int tmr_hz;
190 int pclk_hz;
191};
192
193struct scc_priv {
194 int type;
195 int chip;
196 struct net_device *dev;
197 struct scc_info *info;
198 struct net_device_stats stats;
199 int channel;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
205 int rx_ptr;
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
208 int rx_over;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
211 int tx_ptr;
212 int tx_head, tx_tail, tx_count;
213 int state;
214 unsigned long tx_start;
215 int rr0;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
218};
219
220struct scc_info {
221 int irq_used;
222 int twin_serial_cfg;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
227};
228
229
230/* Function declarations */
231static int setup_adapter(int card_base, int type, int n) __init;
232
233static void write_scc(struct scc_priv *priv, int reg, int val);
234static void write_scc_data(struct scc_priv *priv, int val, int fast);
235static int read_scc(struct scc_priv *priv, int reg);
236static int read_scc_data(struct scc_priv *priv);
237
238static int scc_open(struct net_device *dev);
239static int scc_close(struct net_device *dev);
240static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242static struct net_device_stats *scc_get_stats(struct net_device *dev);
243static int scc_set_mac_address(struct net_device *dev, void *sa);
244
245static inline void tx_on(struct scc_priv *priv);
246static inline void rx_on(struct scc_priv *priv);
247static inline void rx_off(struct scc_priv *priv);
248static void start_timer(struct scc_priv *priv, int t, int r15);
249static inline unsigned char random(void);
250
251static inline void z8530_isr(struct scc_info *info);
David Howells7d12e782006-10-05 14:55:46 +0100252static irqreturn_t scc_isr(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253static void rx_isr(struct scc_priv *priv);
254static void special_condition(struct scc_priv *priv, int rc);
Al Viro7a87b6c2006-12-06 18:51:40 +0000255static void rx_bh(struct work_struct *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256static void tx_isr(struct scc_priv *priv);
257static void es_isr(struct scc_priv *priv);
258static void tm_isr(struct scc_priv *priv);
259
260
261/* Initialization variables */
262
263static int io[MAX_NUM_DEVS] __initdata = { 0, };
264
265/* Beware! hw[] is also used in cleanup_module(). */
266static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268
269/* Global variables */
270
271static struct scc_info *first;
272static unsigned long rand;
273
274
275MODULE_AUTHOR("Klaus Kudielka");
276MODULE_DESCRIPTION("Driver for high-speed SCC boards");
Rusty Russell8d3b33f2006-03-25 03:07:05 -0800277module_param_array(io, int, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278MODULE_LICENSE("GPL");
279
280static void __exit dmascc_exit(void)
281{
282 int i;
283 struct scc_info *info;
284
285 while (first) {
286 info = first;
287
288 /* Unregister devices */
289 for (i = 0; i < 2; i++)
290 unregister_netdev(info->dev[i]);
291
292 /* Reset board */
293 if (info->priv[0].type == TYPE_TWIN)
294 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
295 write_scc(&info->priv[0], R9, FHWRES);
296 release_region(info->dev[0]->base_addr,
297 hw[info->priv[0].type].io_size);
298
299 for (i = 0; i < 2; i++)
300 free_netdev(info->dev[i]);
301
302 /* Free memory */
303 first = info->next;
304 kfree(info);
305 }
306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308static int __init dmascc_init(void)
309{
310 int h, i, j, n;
311 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
312 t1[MAX_NUM_DEVS];
313 unsigned t_val;
314 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
315 counting[MAX_NUM_DEVS];
316
317 /* Initialize random number generator */
318 rand = jiffies;
319 /* Cards found = 0 */
320 n = 0;
321 /* Warning message */
322 if (!io[0])
323 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
324
325 /* Run autodetection for each card type */
326 for (h = 0; h < NUM_TYPES; h++) {
327
328 if (io[0]) {
329 /* User-specified I/O address regions */
330 for (i = 0; i < hw[h].num_devs; i++)
331 base[i] = 0;
332 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
333 j = (io[i] -
334 hw[h].io_region) / hw[h].io_delta;
335 if (j >= 0 && j < hw[h].num_devs
336 && hw[h].io_region +
337 j * hw[h].io_delta == io[i]) {
338 base[j] = io[i];
339 }
340 }
341 } else {
342 /* Default I/O address regions */
343 for (i = 0; i < hw[h].num_devs; i++) {
344 base[i] =
345 hw[h].io_region + i * hw[h].io_delta;
346 }
347 }
348
349 /* Check valid I/O address regions */
350 for (i = 0; i < hw[h].num_devs; i++)
351 if (base[i]) {
352 if (!request_region
353 (base[i], hw[h].io_size, "dmascc"))
354 base[i] = 0;
355 else {
356 tcmd[i] =
357 base[i] + hw[h].tmr_offset +
358 TMR_CTRL;
359 t0[i] =
360 base[i] + hw[h].tmr_offset +
361 TMR_CNT0;
362 t1[i] =
363 base[i] + hw[h].tmr_offset +
364 TMR_CNT1;
365 }
366 }
367
368 /* Start timers */
369 for (i = 0; i < hw[h].num_devs; i++)
370 if (base[i]) {
371 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
372 outb(0x36, tcmd[i]);
373 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
374 t0[i]);
375 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
376 t0[i]);
377 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
378 outb(0x70, tcmd[i]);
379 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
380 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
381 start[i] = jiffies;
382 delay[i] = 0;
383 counting[i] = 1;
384 /* Timer 2: LSB+MSB, Mode 0 */
385 outb(0xb0, tcmd[i]);
386 }
387 time = jiffies;
388 /* Wait until counter registers are loaded */
389 udelay(2000000 / TMR_0_HZ);
390
391 /* Timing loop */
392 while (jiffies - time < 13) {
393 for (i = 0; i < hw[h].num_devs; i++)
394 if (base[i] && counting[i]) {
395 /* Read back Timer 1: latch; read LSB; read MSB */
396 outb(0x40, tcmd[i]);
397 t_val =
398 inb(t1[i]) + (inb(t1[i]) << 8);
399 /* Also check whether counter did wrap */
400 if (t_val == 0
401 || t_val > TMR_0_HZ / HZ * 10)
402 counting[i] = 0;
403 delay[i] = jiffies - start[i];
404 }
405 }
406
407 /* Evaluate measurements */
408 for (i = 0; i < hw[h].num_devs; i++)
409 if (base[i]) {
410 if ((delay[i] >= 9 && delay[i] <= 11) &&
411 /* Ok, we have found an adapter */
412 (setup_adapter(base[i], h, n) == 0))
413 n++;
414 else
415 release_region(base[i],
416 hw[h].io_size);
417 }
418
419 } /* NUM_TYPES */
420
421 /* If any adapter was successfully initialized, return ok */
422 if (n)
423 return 0;
424
425 /* If no adapter found, return error */
426 printk(KERN_INFO "dmascc: no adapters found\n");
427 return -EIO;
428}
429
430module_init(dmascc_init);
431module_exit(dmascc_exit);
432
Adrian Bunke2fdbc02006-06-26 12:31:46 +0200433static void __init dev_setup(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 dev->type = ARPHRD_AX25;
Ralf Baechlec4bc7ee2005-09-12 14:19:26 -0700436 dev->hard_header_len = AX25_MAX_HEADER_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 dev->mtu = 1500;
Ralf Baechlec4bc7ee2005-09-12 14:19:26 -0700438 dev->addr_len = AX25_ADDR_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 dev->tx_queue_len = 64;
Ralf Baechle15b1c0e2006-12-07 15:47:08 -0800440 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
441 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442}
443
444static int __init setup_adapter(int card_base, int type, int n)
445{
446 int i, irq, chip;
447 struct scc_info *info;
448 struct net_device *dev;
449 struct scc_priv *priv;
450 unsigned long time;
451 unsigned int irqs;
452 int tmr_base = card_base + hw[type].tmr_offset;
453 int scc_base = card_base + hw[type].scc_offset;
454 char *chipnames[] = CHIPNAMES;
455
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700456 /* Initialize what is necessary for write_scc and write_scc_data */
457 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 if (!info) {
459 printk(KERN_ERR "dmascc: "
460 "could not allocate memory for %s at %#3x\n",
461 hw[type].name, card_base);
462 goto out;
463 }
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466 info->dev[0] = alloc_netdev(0, "", dev_setup);
467 if (!info->dev[0]) {
468 printk(KERN_ERR "dmascc: "
469 "could not allocate memory for %s at %#3x\n",
470 hw[type].name, card_base);
471 goto out1;
472 }
473
474 info->dev[1] = alloc_netdev(0, "", dev_setup);
475 if (!info->dev[1]) {
476 printk(KERN_ERR "dmascc: "
477 "could not allocate memory for %s at %#3x\n",
478 hw[type].name, card_base);
479 goto out2;
480 }
481 spin_lock_init(&info->register_lock);
482
483 priv = &info->priv[0];
484 priv->type = type;
485 priv->card_base = card_base;
486 priv->scc_cmd = scc_base + SCCA_CMD;
487 priv->scc_data = scc_base + SCCA_DATA;
488 priv->register_lock = &info->register_lock;
489
490 /* Reset SCC */
491 write_scc(priv, R9, FHWRES | MIE | NV);
492
493 /* Determine type of chip by enabling SDLC/HDLC enhancements */
494 write_scc(priv, R15, SHDLCE);
495 if (!read_scc(priv, R15)) {
496 /* WR7' not present. This is an ordinary Z8530 SCC. */
497 chip = Z8530;
498 } else {
499 /* Put one character in TX FIFO */
500 write_scc_data(priv, 0, 0);
501 if (read_scc(priv, R0) & Tx_BUF_EMP) {
502 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
503 chip = Z85230;
504 } else {
505 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
506 chip = Z85C30;
507 }
508 }
509 write_scc(priv, R15, 0);
510
511 /* Start IRQ auto-detection */
512 irqs = probe_irq_on();
513
514 /* Enable interrupts */
515 if (type == TYPE_TWIN) {
516 outb(0, card_base + TWIN_DMA_CFG);
517 inb(card_base + TWIN_CLR_TMR1);
518 inb(card_base + TWIN_CLR_TMR2);
519 info->twin_serial_cfg = TWIN_EI;
520 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
521 } else {
522 write_scc(priv, R15, CTSIE);
523 write_scc(priv, R0, RES_EXT_INT);
524 write_scc(priv, R1, EXT_INT_ENAB);
525 }
526
527 /* Start timer */
528 outb(1, tmr_base + TMR_CNT1);
529 outb(0, tmr_base + TMR_CNT1);
530
531 /* Wait and detect IRQ */
532 time = jiffies;
533 while (jiffies - time < 2 + HZ / TMR_0_HZ);
534 irq = probe_irq_off(irqs);
535
536 /* Clear pending interrupt, disable interrupts */
537 if (type == TYPE_TWIN) {
538 inb(card_base + TWIN_CLR_TMR1);
539 } else {
540 write_scc(priv, R1, 0);
541 write_scc(priv, R15, 0);
542 write_scc(priv, R0, RES_EXT_INT);
543 }
544
545 if (irq <= 0) {
546 printk(KERN_ERR
547 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
548 hw[type].name, card_base, irq);
549 goto out3;
550 }
551
552 /* Set up data structures */
553 for (i = 0; i < 2; i++) {
554 dev = info->dev[i];
555 priv = &info->priv[i];
556 priv->type = type;
557 priv->chip = chip;
558 priv->dev = dev;
559 priv->info = info;
560 priv->channel = i;
561 spin_lock_init(&priv->ring_lock);
562 priv->register_lock = &info->register_lock;
563 priv->card_base = card_base;
564 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
565 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
566 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
567 priv->tmr_ctrl = tmr_base + TMR_CTRL;
568 priv->tmr_mode = i ? 0xb0 : 0x70;
569 priv->param.pclk_hz = hw[type].pclk_hz;
570 priv->param.brg_tc = -1;
571 priv->param.clocks = TCTRxCP | RCRTxCP;
572 priv->param.persist = 256;
573 priv->param.dma = -1;
Al Viro7a87b6c2006-12-06 18:51:40 +0000574 INIT_WORK(&priv->rx_work, rx_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 dev->priv = priv;
576 sprintf(dev->name, "dmascc%i", 2 * n + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 dev->base_addr = card_base;
578 dev->irq = irq;
579 dev->open = scc_open;
580 dev->stop = scc_close;
581 dev->do_ioctl = scc_ioctl;
582 dev->hard_start_xmit = scc_send_packet;
583 dev->get_stats = scc_get_stats;
Ralf Baechle6f749982005-09-12 14:21:01 -0700584 dev->hard_header = ax25_hard_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 dev->rebuild_header = ax25_rebuild_header;
586 dev->set_mac_address = scc_set_mac_address;
587 }
588 if (register_netdev(info->dev[0])) {
589 printk(KERN_ERR "dmascc: could not register %s\n",
590 info->dev[0]->name);
591 goto out3;
592 }
593 if (register_netdev(info->dev[1])) {
594 printk(KERN_ERR "dmascc: could not register %s\n",
595 info->dev[1]->name);
596 goto out4;
597 }
598
599
600 info->next = first;
601 first = info;
602 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
603 hw[type].name, chipnames[chip], card_base, irq);
604 return 0;
605
606 out4:
607 unregister_netdev(info->dev[0]);
608 out3:
609 if (info->priv[0].type == TYPE_TWIN)
610 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
611 write_scc(&info->priv[0], R9, FHWRES);
612 free_netdev(info->dev[1]);
613 out2:
614 free_netdev(info->dev[0]);
615 out1:
616 kfree(info);
617 out:
618 return -1;
619}
620
621
622/* Driver functions */
623
624static void write_scc(struct scc_priv *priv, int reg, int val)
625{
626 unsigned long flags;
627 switch (priv->type) {
628 case TYPE_S5:
629 if (reg)
630 outb(reg, priv->scc_cmd);
631 outb(val, priv->scc_cmd);
632 return;
633 case TYPE_TWIN:
634 if (reg)
635 outb_p(reg, priv->scc_cmd);
636 outb_p(val, priv->scc_cmd);
637 return;
638 default:
639 spin_lock_irqsave(priv->register_lock, flags);
640 outb_p(0, priv->card_base + PI_DREQ_MASK);
641 if (reg)
642 outb_p(reg, priv->scc_cmd);
643 outb_p(val, priv->scc_cmd);
644 outb(1, priv->card_base + PI_DREQ_MASK);
645 spin_unlock_irqrestore(priv->register_lock, flags);
646 return;
647 }
648}
649
650
651static void write_scc_data(struct scc_priv *priv, int val, int fast)
652{
653 unsigned long flags;
654 switch (priv->type) {
655 case TYPE_S5:
656 outb(val, priv->scc_data);
657 return;
658 case TYPE_TWIN:
659 outb_p(val, priv->scc_data);
660 return;
661 default:
662 if (fast)
663 outb_p(val, priv->scc_data);
664 else {
665 spin_lock_irqsave(priv->register_lock, flags);
666 outb_p(0, priv->card_base + PI_DREQ_MASK);
667 outb_p(val, priv->scc_data);
668 outb(1, priv->card_base + PI_DREQ_MASK);
669 spin_unlock_irqrestore(priv->register_lock, flags);
670 }
671 return;
672 }
673}
674
675
676static int read_scc(struct scc_priv *priv, int reg)
677{
678 int rc;
679 unsigned long flags;
680 switch (priv->type) {
681 case TYPE_S5:
682 if (reg)
683 outb(reg, priv->scc_cmd);
684 return inb(priv->scc_cmd);
685 case TYPE_TWIN:
686 if (reg)
687 outb_p(reg, priv->scc_cmd);
688 return inb_p(priv->scc_cmd);
689 default:
690 spin_lock_irqsave(priv->register_lock, flags);
691 outb_p(0, priv->card_base + PI_DREQ_MASK);
692 if (reg)
693 outb_p(reg, priv->scc_cmd);
694 rc = inb_p(priv->scc_cmd);
695 outb(1, priv->card_base + PI_DREQ_MASK);
696 spin_unlock_irqrestore(priv->register_lock, flags);
697 return rc;
698 }
699}
700
701
702static int read_scc_data(struct scc_priv *priv)
703{
704 int rc;
705 unsigned long flags;
706 switch (priv->type) {
707 case TYPE_S5:
708 return inb(priv->scc_data);
709 case TYPE_TWIN:
710 return inb_p(priv->scc_data);
711 default:
712 spin_lock_irqsave(priv->register_lock, flags);
713 outb_p(0, priv->card_base + PI_DREQ_MASK);
714 rc = inb_p(priv->scc_data);
715 outb(1, priv->card_base + PI_DREQ_MASK);
716 spin_unlock_irqrestore(priv->register_lock, flags);
717 return rc;
718 }
719}
720
721
722static int scc_open(struct net_device *dev)
723{
724 struct scc_priv *priv = dev->priv;
725 struct scc_info *info = priv->info;
726 int card_base = priv->card_base;
727
728 /* Request IRQ if not already used by other channel */
729 if (!info->irq_used) {
730 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
731 return -EAGAIN;
732 }
733 }
734 info->irq_used++;
735
736 /* Request DMA if required */
737 if (priv->param.dma >= 0) {
738 if (request_dma(priv->param.dma, "dmascc")) {
739 if (--info->irq_used == 0)
740 free_irq(dev->irq, info);
741 return -EAGAIN;
742 } else {
743 unsigned long flags = claim_dma_lock();
744 clear_dma_ff(priv->param.dma);
745 release_dma_lock(flags);
746 }
747 }
748
749 /* Initialize local variables */
750 priv->rx_ptr = 0;
751 priv->rx_over = 0;
752 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
753 priv->state = IDLE;
754 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
755 priv->tx_ptr = 0;
756
757 /* Reset channel */
758 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
759 /* X1 clock, SDLC mode */
760 write_scc(priv, R4, SDLC | X1CLK);
761 /* DMA */
762 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
763 /* 8 bit RX char, RX disable */
764 write_scc(priv, R3, Rx8);
765 /* 8 bit TX char, TX disable */
766 write_scc(priv, R5, Tx8);
767 /* SDLC address field */
768 write_scc(priv, R6, 0);
769 /* SDLC flag */
770 write_scc(priv, R7, FLAG);
771 switch (priv->chip) {
772 case Z85C30:
773 /* Select WR7' */
774 write_scc(priv, R15, SHDLCE);
775 /* Auto EOM reset */
776 write_scc(priv, R7, AUTOEOM);
777 write_scc(priv, R15, 0);
778 break;
779 case Z85230:
780 /* Select WR7' */
781 write_scc(priv, R15, SHDLCE);
782 /* The following bits are set (see 2.5.2.1):
783 - Automatic EOM reset
784 - Interrupt request if RX FIFO is half full
785 This bit should be ignored in DMA mode (according to the
786 documentation), but actually isn't. The receiver doesn't work if
787 it is set. Thus, we have to clear it in DMA mode.
788 - Interrupt/DMA request if TX FIFO is completely empty
789 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
790 compatibility).
791 b) If cleared, DMA requests may follow each other very quickly,
792 filling up the TX FIFO.
793 Advantage: TX works even in case of high bus latency.
794 Disadvantage: Edge-triggered DMA request circuitry may miss
795 a request. No more data is delivered, resulting
796 in a TX FIFO underrun.
797 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
798 The PackeTwin doesn't. I don't know about the PI, but let's
799 assume it behaves like the PI2.
800 */
801 if (priv->param.dma >= 0) {
802 if (priv->type == TYPE_TWIN)
803 write_scc(priv, R7, AUTOEOM | TXFIFOE);
804 else
805 write_scc(priv, R7, AUTOEOM);
806 } else {
807 write_scc(priv, R7, AUTOEOM | RXFIFOH);
808 }
809 write_scc(priv, R15, 0);
810 break;
811 }
812 /* Preset CRC, NRZ(I) encoding */
813 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
814
815 /* Configure baud rate generator */
816 if (priv->param.brg_tc >= 0) {
817 /* Program BR generator */
818 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
819 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
820 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
821 PackeTwin, not connected on the PI2); set DPLL source to BRG */
822 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
823 /* Enable DPLL */
824 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
825 } else {
826 /* Disable BR generator */
827 write_scc(priv, R14, DTRREQ | BRSRC);
828 }
829
830 /* Configure clocks */
831 if (priv->type == TYPE_TWIN) {
832 /* Disable external TX clock receiver */
833 outb((info->twin_serial_cfg &=
834 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
835 card_base + TWIN_SERIAL_CFG);
836 }
837 write_scc(priv, R11, priv->param.clocks);
838 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
839 /* Enable external TX clock receiver */
840 outb((info->twin_serial_cfg |=
841 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
842 card_base + TWIN_SERIAL_CFG);
843 }
844
845 /* Configure PackeTwin */
846 if (priv->type == TYPE_TWIN) {
847 /* Assert DTR, enable interrupts */
848 outb((info->twin_serial_cfg |= TWIN_EI |
849 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
850 card_base + TWIN_SERIAL_CFG);
851 }
852
853 /* Read current status */
854 priv->rr0 = read_scc(priv, R0);
855 /* Enable DCD interrupt */
856 write_scc(priv, R15, DCDIE);
857
858 netif_start_queue(dev);
859
860 return 0;
861}
862
863
864static int scc_close(struct net_device *dev)
865{
866 struct scc_priv *priv = dev->priv;
867 struct scc_info *info = priv->info;
868 int card_base = priv->card_base;
869
870 netif_stop_queue(dev);
871
872 if (priv->type == TYPE_TWIN) {
873 /* Drop DTR */
874 outb((info->twin_serial_cfg &=
875 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
876 card_base + TWIN_SERIAL_CFG);
877 }
878
879 /* Reset channel, free DMA and IRQ */
880 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
881 if (priv->param.dma >= 0) {
882 if (priv->type == TYPE_TWIN)
883 outb(0, card_base + TWIN_DMA_CFG);
884 free_dma(priv->param.dma);
885 }
886 if (--info->irq_used == 0)
887 free_irq(dev->irq, info);
888
889 return 0;
890}
891
892
893static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
894{
895 struct scc_priv *priv = dev->priv;
896
897 switch (cmd) {
898 case SIOCGSCCPARAM:
899 if (copy_to_user
900 (ifr->ifr_data, &priv->param,
901 sizeof(struct scc_param)))
902 return -EFAULT;
903 return 0;
904 case SIOCSSCCPARAM:
905 if (!capable(CAP_NET_ADMIN))
906 return -EPERM;
907 if (netif_running(dev))
908 return -EAGAIN;
909 if (copy_from_user
910 (&priv->param, ifr->ifr_data,
911 sizeof(struct scc_param)))
912 return -EFAULT;
913 return 0;
914 default:
915 return -EINVAL;
916 }
917}
918
919
920static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
921{
922 struct scc_priv *priv = dev->priv;
923 unsigned long flags;
924 int i;
925
926 /* Temporarily stop the scheduler feeding us packets */
927 netif_stop_queue(dev);
928
929 /* Transfer data to DMA buffer */
930 i = priv->tx_head;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300931 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 priv->tx_len[i] = skb->len - 1;
933
934 /* Clear interrupts while we touch our circular buffers */
935
936 spin_lock_irqsave(&priv->ring_lock, flags);
937 /* Move the ring buffer's head */
938 priv->tx_head = (i + 1) % NUM_TX_BUF;
939 priv->tx_count++;
940
941 /* If we just filled up the last buffer, leave queue stopped.
942 The higher layers must wait until we have a DMA buffer
943 to accept the data. */
944 if (priv->tx_count < NUM_TX_BUF)
945 netif_wake_queue(dev);
946
947 /* Set new TX state */
948 if (priv->state == IDLE) {
949 /* Assert RTS, start timer */
950 priv->state = TX_HEAD;
951 priv->tx_start = jiffies;
952 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
953 write_scc(priv, R15, 0);
954 start_timer(priv, priv->param.txdelay, 0);
955 }
956
957 /* Turn interrupts back on and free buffer */
958 spin_unlock_irqrestore(&priv->ring_lock, flags);
959 dev_kfree_skb(skb);
960
961 return 0;
962}
963
964
965static struct net_device_stats *scc_get_stats(struct net_device *dev)
966{
967 struct scc_priv *priv = dev->priv;
968
969 return &priv->stats;
970}
971
972
973static int scc_set_mac_address(struct net_device *dev, void *sa)
974{
975 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
976 dev->addr_len);
977 return 0;
978}
979
980
981static inline void tx_on(struct scc_priv *priv)
982{
983 int i, n;
984 unsigned long flags;
985
986 if (priv->param.dma >= 0) {
987 n = (priv->chip == Z85230) ? 3 : 1;
988 /* Program DMA controller */
989 flags = claim_dma_lock();
990 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
991 set_dma_addr(priv->param.dma,
992 (int) priv->tx_buf[priv->tx_tail] + n);
993 set_dma_count(priv->param.dma,
994 priv->tx_len[priv->tx_tail] - n);
995 release_dma_lock(flags);
996 /* Enable TX underrun interrupt */
997 write_scc(priv, R15, TxUIE);
998 /* Configure DREQ */
999 if (priv->type == TYPE_TWIN)
1000 outb((priv->param.dma ==
1001 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1002 priv->card_base + TWIN_DMA_CFG);
1003 else
1004 write_scc(priv, R1,
1005 EXT_INT_ENAB | WT_FN_RDYFN |
1006 WT_RDY_ENAB);
1007 /* Write first byte(s) */
1008 spin_lock_irqsave(priv->register_lock, flags);
1009 for (i = 0; i < n; i++)
1010 write_scc_data(priv,
1011 priv->tx_buf[priv->tx_tail][i], 1);
1012 enable_dma(priv->param.dma);
1013 spin_unlock_irqrestore(priv->register_lock, flags);
1014 } else {
1015 write_scc(priv, R15, TxUIE);
1016 write_scc(priv, R1,
1017 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1018 tx_isr(priv);
1019 }
1020 /* Reset EOM latch if we do not have the AUTOEOM feature */
1021 if (priv->chip == Z8530)
1022 write_scc(priv, R0, RES_EOM_L);
1023}
1024
1025
1026static inline void rx_on(struct scc_priv *priv)
1027{
1028 unsigned long flags;
1029
1030 /* Clear RX FIFO */
1031 while (read_scc(priv, R0) & Rx_CH_AV)
1032 read_scc_data(priv);
1033 priv->rx_over = 0;
1034 if (priv->param.dma >= 0) {
1035 /* Program DMA controller */
1036 flags = claim_dma_lock();
1037 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1038 set_dma_addr(priv->param.dma,
1039 (int) priv->rx_buf[priv->rx_head]);
1040 set_dma_count(priv->param.dma, BUF_SIZE);
1041 release_dma_lock(flags);
1042 enable_dma(priv->param.dma);
1043 /* Configure PackeTwin DMA */
1044 if (priv->type == TYPE_TWIN) {
1045 outb((priv->param.dma ==
1046 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1047 priv->card_base + TWIN_DMA_CFG);
1048 }
1049 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1050 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1051 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1052 } else {
1053 /* Reset current frame */
1054 priv->rx_ptr = 0;
1055 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1056 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1057 WT_FN_RDYFN);
1058 }
1059 write_scc(priv, R0, ERR_RES);
1060 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1061}
1062
1063
1064static inline void rx_off(struct scc_priv *priv)
1065{
1066 /* Disable receiver */
1067 write_scc(priv, R3, Rx8);
1068 /* Disable DREQ / RX interrupt */
1069 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1070 outb(0, priv->card_base + TWIN_DMA_CFG);
1071 else
1072 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1073 /* Disable DMA */
1074 if (priv->param.dma >= 0)
1075 disable_dma(priv->param.dma);
1076}
1077
1078
1079static void start_timer(struct scc_priv *priv, int t, int r15)
1080{
1081 unsigned long flags;
1082
1083 outb(priv->tmr_mode, priv->tmr_ctrl);
1084 if (t == 0) {
1085 tm_isr(priv);
1086 } else if (t > 0) {
1087 save_flags(flags);
1088 cli();
1089 outb(t & 0xFF, priv->tmr_cnt);
1090 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1091 if (priv->type != TYPE_TWIN) {
1092 write_scc(priv, R15, r15 | CTSIE);
1093 priv->rr0 |= CTS;
1094 }
1095 restore_flags(flags);
1096 }
1097}
1098
1099
1100static inline unsigned char random(void)
1101{
1102 /* See "Numerical Recipes in C", second edition, p. 284 */
1103 rand = rand * 1664525L + 1013904223L;
1104 return (unsigned char) (rand >> 24);
1105}
1106
1107static inline void z8530_isr(struct scc_info *info)
1108{
1109 int is, i = 100;
1110
1111 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1112 if (is & CHARxIP) {
1113 rx_isr(&info->priv[0]);
1114 } else if (is & CHATxIP) {
1115 tx_isr(&info->priv[0]);
1116 } else if (is & CHAEXT) {
1117 es_isr(&info->priv[0]);
1118 } else if (is & CHBRxIP) {
1119 rx_isr(&info->priv[1]);
1120 } else if (is & CHBTxIP) {
1121 tx_isr(&info->priv[1]);
1122 } else {
1123 es_isr(&info->priv[1]);
1124 }
1125 write_scc(&info->priv[0], R0, RES_H_IUS);
1126 i++;
1127 }
1128 if (i < 0) {
1129 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1130 is);
1131 }
1132 /* Ok, no interrupts pending from this 8530. The INT line should
1133 be inactive now. */
1134}
1135
1136
David Howells7d12e782006-10-05 14:55:46 +01001137static irqreturn_t scc_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138{
1139 struct scc_info *info = dev_id;
1140
1141 spin_lock(info->priv[0].register_lock);
1142 /* At this point interrupts are enabled, and the interrupt under service
1143 is already acknowledged, but masked off.
1144
1145 Interrupt processing: We loop until we know that the IRQ line is
1146 low. If another positive edge occurs afterwards during the ISR,
1147 another interrupt will be triggered by the interrupt controller
1148 as soon as the IRQ level is enabled again (see asm/irq.h).
1149
1150 Bottom-half handlers will be processed after scc_isr(). This is
1151 important, since we only have small ringbuffers and want new data
1152 to be fetched/delivered immediately. */
1153
1154 if (info->priv[0].type == TYPE_TWIN) {
1155 int is, card_base = info->priv[0].card_base;
1156 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1157 TWIN_INT_MSK) {
1158 if (is & TWIN_SCC_MSK) {
1159 z8530_isr(info);
1160 } else if (is & TWIN_TMR1_MSK) {
1161 inb(card_base + TWIN_CLR_TMR1);
1162 tm_isr(&info->priv[0]);
1163 } else {
1164 inb(card_base + TWIN_CLR_TMR2);
1165 tm_isr(&info->priv[1]);
1166 }
1167 }
1168 } else
1169 z8530_isr(info);
1170 spin_unlock(info->priv[0].register_lock);
1171 return IRQ_HANDLED;
1172}
1173
1174
1175static void rx_isr(struct scc_priv *priv)
1176{
1177 if (priv->param.dma >= 0) {
1178 /* Check special condition and perform error reset. See 2.4.7.5. */
1179 special_condition(priv, read_scc(priv, R1));
1180 write_scc(priv, R0, ERR_RES);
1181 } else {
1182 /* Check special condition for each character. Error reset not necessary.
1183 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1184 int rc;
1185 while (read_scc(priv, R0) & Rx_CH_AV) {
1186 rc = read_scc(priv, R1);
1187 if (priv->rx_ptr < BUF_SIZE)
1188 priv->rx_buf[priv->rx_head][priv->
1189 rx_ptr++] =
1190 read_scc_data(priv);
1191 else {
1192 priv->rx_over = 2;
1193 read_scc_data(priv);
1194 }
1195 special_condition(priv, rc);
1196 }
1197 }
1198}
1199
1200
1201static void special_condition(struct scc_priv *priv, int rc)
1202{
1203 int cb;
1204 unsigned long flags;
1205
1206 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1207
1208 if (rc & Rx_OVR) {
1209 /* Receiver overrun */
1210 priv->rx_over = 1;
1211 if (priv->param.dma < 0)
1212 write_scc(priv, R0, ERR_RES);
1213 } else if (rc & END_FR) {
1214 /* End of frame. Get byte count */
1215 if (priv->param.dma >= 0) {
1216 flags = claim_dma_lock();
1217 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1218 2;
1219 release_dma_lock(flags);
1220 } else {
1221 cb = priv->rx_ptr - 2;
1222 }
1223 if (priv->rx_over) {
1224 /* We had an overrun */
1225 priv->stats.rx_errors++;
1226 if (priv->rx_over == 2)
1227 priv->stats.rx_length_errors++;
1228 else
1229 priv->stats.rx_fifo_errors++;
1230 priv->rx_over = 0;
1231 } else if (rc & CRC_ERR) {
1232 /* Count invalid CRC only if packet length >= minimum */
1233 if (cb >= 15) {
1234 priv->stats.rx_errors++;
1235 priv->stats.rx_crc_errors++;
1236 }
1237 } else {
1238 if (cb >= 15) {
1239 if (priv->rx_count < NUM_RX_BUF - 1) {
1240 /* Put good frame in FIFO */
1241 priv->rx_len[priv->rx_head] = cb;
1242 priv->rx_head =
1243 (priv->rx_head +
1244 1) % NUM_RX_BUF;
1245 priv->rx_count++;
1246 schedule_work(&priv->rx_work);
1247 } else {
1248 priv->stats.rx_errors++;
1249 priv->stats.rx_over_errors++;
1250 }
1251 }
1252 }
1253 /* Get ready for new frame */
1254 if (priv->param.dma >= 0) {
1255 flags = claim_dma_lock();
1256 set_dma_addr(priv->param.dma,
1257 (int) priv->rx_buf[priv->rx_head]);
1258 set_dma_count(priv->param.dma, BUF_SIZE);
1259 release_dma_lock(flags);
1260 } else {
1261 priv->rx_ptr = 0;
1262 }
1263 }
1264}
1265
1266
Al Viro7a87b6c2006-12-06 18:51:40 +00001267static void rx_bh(struct work_struct *ugli_api)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268{
Al Viro7a87b6c2006-12-06 18:51:40 +00001269 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 int i = priv->rx_tail;
1271 int cb;
1272 unsigned long flags;
1273 struct sk_buff *skb;
1274 unsigned char *data;
1275
1276 spin_lock_irqsave(&priv->ring_lock, flags);
1277 while (priv->rx_count) {
1278 spin_unlock_irqrestore(&priv->ring_lock, flags);
1279 cb = priv->rx_len[i];
1280 /* Allocate buffer */
1281 skb = dev_alloc_skb(cb + 1);
1282 if (skb == NULL) {
1283 /* Drop packet */
1284 priv->stats.rx_dropped++;
1285 } else {
1286 /* Fill buffer */
1287 data = skb_put(skb, cb + 1);
1288 data[0] = 0;
1289 memcpy(&data[1], priv->rx_buf[i], cb);
Arnaldo Carvalho de Melo56cb5152005-04-24 18:53:06 -07001290 skb->protocol = ax25_type_trans(skb, priv->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 netif_rx(skb);
1292 priv->dev->last_rx = jiffies;
1293 priv->stats.rx_packets++;
1294 priv->stats.rx_bytes += cb;
1295 }
1296 spin_lock_irqsave(&priv->ring_lock, flags);
1297 /* Move tail */
1298 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1299 priv->rx_count--;
1300 }
1301 spin_unlock_irqrestore(&priv->ring_lock, flags);
1302}
1303
1304
1305static void tx_isr(struct scc_priv *priv)
1306{
1307 int i = priv->tx_tail, p = priv->tx_ptr;
1308
1309 /* Suspend TX interrupts if we don't want to send anything.
1310 See Figure 2-22. */
1311 if (p == priv->tx_len[i]) {
1312 write_scc(priv, R0, RES_Tx_P);
1313 return;
1314 }
1315
1316 /* Write characters */
1317 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1318 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1319 }
1320
1321 /* Reset EOM latch of Z8530 */
1322 if (!priv->tx_ptr && p && priv->chip == Z8530)
1323 write_scc(priv, R0, RES_EOM_L);
1324
1325 priv->tx_ptr = p;
1326}
1327
1328
1329static void es_isr(struct scc_priv *priv)
1330{
1331 int i, rr0, drr0, res;
1332 unsigned long flags;
1333
1334 /* Read status, reset interrupt bit (open latches) */
1335 rr0 = read_scc(priv, R0);
1336 write_scc(priv, R0, RES_EXT_INT);
1337 drr0 = priv->rr0 ^ rr0;
1338 priv->rr0 = rr0;
1339
1340 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1341 it might have already been cleared again by AUTOEOM. */
1342 if (priv->state == TX_DATA) {
1343 /* Get remaining bytes */
1344 i = priv->tx_tail;
1345 if (priv->param.dma >= 0) {
1346 disable_dma(priv->param.dma);
1347 flags = claim_dma_lock();
1348 res = get_dma_residue(priv->param.dma);
1349 release_dma_lock(flags);
1350 } else {
1351 res = priv->tx_len[i] - priv->tx_ptr;
1352 priv->tx_ptr = 0;
1353 }
1354 /* Disable DREQ / TX interrupt */
1355 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1356 outb(0, priv->card_base + TWIN_DMA_CFG);
1357 else
1358 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1359 if (res) {
1360 /* Update packet statistics */
1361 priv->stats.tx_errors++;
1362 priv->stats.tx_fifo_errors++;
1363 /* Other underrun interrupts may already be waiting */
1364 write_scc(priv, R0, RES_EXT_INT);
1365 write_scc(priv, R0, RES_EXT_INT);
1366 } else {
1367 /* Update packet statistics */
1368 priv->stats.tx_packets++;
1369 priv->stats.tx_bytes += priv->tx_len[i];
1370 /* Remove frame from FIFO */
1371 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1372 priv->tx_count--;
1373 /* Inform upper layers */
1374 netif_wake_queue(priv->dev);
1375 }
1376 /* Switch state */
1377 write_scc(priv, R15, 0);
1378 if (priv->tx_count &&
1379 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1380 priv->state = TX_PAUSE;
1381 start_timer(priv, priv->param.txpause, 0);
1382 } else {
1383 priv->state = TX_TAIL;
1384 start_timer(priv, priv->param.txtail, 0);
1385 }
1386 }
1387
1388 /* DCD transition */
1389 if (drr0 & DCD) {
1390 if (rr0 & DCD) {
1391 switch (priv->state) {
1392 case IDLE:
1393 case WAIT:
1394 priv->state = DCD_ON;
1395 write_scc(priv, R15, 0);
1396 start_timer(priv, priv->param.dcdon, 0);
1397 }
1398 } else {
1399 switch (priv->state) {
1400 case RX_ON:
1401 rx_off(priv);
1402 priv->state = DCD_OFF;
1403 write_scc(priv, R15, 0);
1404 start_timer(priv, priv->param.dcdoff, 0);
1405 }
1406 }
1407 }
1408
1409 /* CTS transition */
1410 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1411 tm_isr(priv);
1412
1413}
1414
1415
1416static void tm_isr(struct scc_priv *priv)
1417{
1418 switch (priv->state) {
1419 case TX_HEAD:
1420 case TX_PAUSE:
1421 tx_on(priv);
1422 priv->state = TX_DATA;
1423 break;
1424 case TX_TAIL:
1425 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1426 priv->state = RTS_OFF;
1427 if (priv->type != TYPE_TWIN)
1428 write_scc(priv, R15, 0);
1429 start_timer(priv, priv->param.rtsoff, 0);
1430 break;
1431 case RTS_OFF:
1432 write_scc(priv, R15, DCDIE);
1433 priv->rr0 = read_scc(priv, R0);
1434 if (priv->rr0 & DCD) {
1435 priv->stats.collisions++;
1436 rx_on(priv);
1437 priv->state = RX_ON;
1438 } else {
1439 priv->state = WAIT;
1440 start_timer(priv, priv->param.waittime, DCDIE);
1441 }
1442 break;
1443 case WAIT:
1444 if (priv->tx_count) {
1445 priv->state = TX_HEAD;
1446 priv->tx_start = jiffies;
1447 write_scc(priv, R5,
1448 TxCRC_ENAB | RTS | TxENAB | Tx8);
1449 write_scc(priv, R15, 0);
1450 start_timer(priv, priv->param.txdelay, 0);
1451 } else {
1452 priv->state = IDLE;
1453 if (priv->type != TYPE_TWIN)
1454 write_scc(priv, R15, DCDIE);
1455 }
1456 break;
1457 case DCD_ON:
1458 case DCD_OFF:
1459 write_scc(priv, R15, DCDIE);
1460 priv->rr0 = read_scc(priv, R0);
1461 if (priv->rr0 & DCD) {
1462 rx_on(priv);
1463 priv->state = RX_ON;
1464 } else {
1465 priv->state = WAIT;
1466 start_timer(priv,
1467 random() / priv->param.persist *
1468 priv->param.slottime, DCDIE);
1469 }
1470 break;
1471 }
1472}