blob: 881bf818bb48aac8a71672b4cd2677b2588613a4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
4 *
5 * S5SCC/DMA support by Janko Koleznik S52HI
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23#include <linux/module.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070024#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/if_arp.h>
28#include <linux/in.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/ioport.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/netdevice.h>
35#include <linux/rtnetlink.h>
36#include <linux/sockios.h>
37#include <linux/workqueue.h>
38#include <asm/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/dma.h>
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43#include <net/ax25.h>
44#include "z8530.h"
45
46
47/* Number of buffers per channel */
48
49#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
52
53
54/* Cards supported */
55
56#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
64
65#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
66
67#define TMR_0_HZ 25600 /* Frequency of timer 0 */
68
69#define TYPE_PI 0
70#define TYPE_PI2 1
71#define TYPE_TWIN 2
72#define TYPE_S5 3
73#define NUM_TYPES 4
74
75#define MAX_NUM_DEVS 32
76
77
78/* SCC chips supported */
79
80#define Z8530 0
81#define Z85C30 1
82#define Z85230 2
83
84#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
85
86
87/* I/O registers */
88
89/* 8530 registers relative to card base */
90#define SCCB_CMD 0x00
91#define SCCB_DATA 0x01
92#define SCCA_CMD 0x02
93#define SCCA_DATA 0x03
94
95/* 8253/8254 registers relative to card base */
96#define TMR_CNT0 0x00
97#define TMR_CNT1 0x01
98#define TMR_CNT2 0x02
99#define TMR_CTRL 0x03
100
101/* Additional PI/PI2 registers relative to card base */
102#define PI_DREQ_MASK 0x04
103
104/* Additional PackeTwin registers relative to card base */
105#define TWIN_INT_REG 0x08
106#define TWIN_CLR_TMR1 0x09
107#define TWIN_CLR_TMR2 0x0a
108#define TWIN_SPARE_1 0x0b
109#define TWIN_DMA_CFG 0x08
110#define TWIN_SERIAL_CFG 0x09
111#define TWIN_DMA_CLR_FF 0x0a
112#define TWIN_SPARE_2 0x0b
113
114
115/* PackeTwin I/O register values */
116
117/* INT_REG */
118#define TWIN_SCC_MSK 0x01
119#define TWIN_TMR1_MSK 0x02
120#define TWIN_TMR2_MSK 0x04
121#define TWIN_INT_MSK 0x07
122
123/* SERIAL_CFG */
124#define TWIN_DTRA_ON 0x01
125#define TWIN_DTRB_ON 0x02
126#define TWIN_EXTCLKA 0x04
127#define TWIN_EXTCLKB 0x08
128#define TWIN_LOOPA_ON 0x10
129#define TWIN_LOOPB_ON 0x20
130#define TWIN_EI 0x80
131
132/* DMA_CFG */
133#define TWIN_DMA_HDX_T1 0x08
134#define TWIN_DMA_HDX_R1 0x0a
135#define TWIN_DMA_HDX_T3 0x14
136#define TWIN_DMA_HDX_R3 0x16
137#define TWIN_DMA_FDX_T3R1 0x1b
138#define TWIN_DMA_FDX_T1R3 0x1d
139
140
141/* Status values */
142
143#define IDLE 0
144#define TX_HEAD 1
145#define TX_DATA 2
146#define TX_PAUSE 3
147#define TX_TAIL 4
148#define RTS_OFF 5
149#define WAIT 6
150#define DCD_ON 7
151#define RX_ON 8
152#define DCD_OFF 9
153
154
155/* Ioctls */
156
157#define SIOCGSCCPARAM SIOCDEVPRIVATE
158#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
159
160
161/* Data types */
162
163struct scc_param {
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
179};
180
181struct scc_hardware {
182 char *name;
183 int io_region;
184 int io_delta;
185 int io_size;
186 int num_devs;
187 int scc_offset;
188 int tmr_offset;
189 int tmr_hz;
190 int pclk_hz;
191};
192
193struct scc_priv {
194 int type;
195 int chip;
196 struct net_device *dev;
197 struct scc_info *info;
Stephen Hemminger13c05822009-01-09 13:01:33 +0000198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 int channel;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
205 int rx_ptr;
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
208 int rx_over;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
211 int tx_ptr;
212 int tx_head, tx_tail, tx_count;
213 int state;
214 unsigned long tx_start;
215 int rr0;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
218};
219
220struct scc_info {
221 int irq_used;
222 int twin_serial_cfg;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
227};
228
229
230/* Function declarations */
231static int setup_adapter(int card_base, int type, int n) __init;
232
233static void write_scc(struct scc_priv *priv, int reg, int val);
234static void write_scc_data(struct scc_priv *priv, int val, int fast);
235static int read_scc(struct scc_priv *priv, int reg);
236static int read_scc_data(struct scc_priv *priv);
237
238static int scc_open(struct net_device *dev);
239static int scc_close(struct net_device *dev);
240static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242static int scc_set_mac_address(struct net_device *dev, void *sa);
243
244static inline void tx_on(struct scc_priv *priv);
245static inline void rx_on(struct scc_priv *priv);
246static inline void rx_off(struct scc_priv *priv);
247static void start_timer(struct scc_priv *priv, int t, int r15);
248static inline unsigned char random(void);
249
250static inline void z8530_isr(struct scc_info *info);
David Howells7d12e782006-10-05 14:55:46 +0100251static irqreturn_t scc_isr(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252static void rx_isr(struct scc_priv *priv);
253static void special_condition(struct scc_priv *priv, int rc);
Al Viro7a87b6c2006-12-06 18:51:40 +0000254static void rx_bh(struct work_struct *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255static void tx_isr(struct scc_priv *priv);
256static void es_isr(struct scc_priv *priv);
257static void tm_isr(struct scc_priv *priv);
258
259
260/* Initialization variables */
261
262static int io[MAX_NUM_DEVS] __initdata = { 0, };
263
Randy Dunlapcd8d6272008-02-05 03:04:05 -0800264/* Beware! hw[] is also used in dmascc_exit(). */
265static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267
268/* Global variables */
269
270static struct scc_info *first;
271static unsigned long rand;
272
273
274MODULE_AUTHOR("Klaus Kudielka");
275MODULE_DESCRIPTION("Driver for high-speed SCC boards");
Rusty Russell8d3b33f2006-03-25 03:07:05 -0800276module_param_array(io, int, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277MODULE_LICENSE("GPL");
278
279static void __exit dmascc_exit(void)
280{
281 int i;
282 struct scc_info *info;
283
284 while (first) {
285 info = first;
286
287 /* Unregister devices */
288 for (i = 0; i < 2; i++)
289 unregister_netdev(info->dev[i]);
290
291 /* Reset board */
292 if (info->priv[0].type == TYPE_TWIN)
293 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
294 write_scc(&info->priv[0], R9, FHWRES);
295 release_region(info->dev[0]->base_addr,
296 hw[info->priv[0].type].io_size);
297
298 for (i = 0; i < 2; i++)
299 free_netdev(info->dev[i]);
300
301 /* Free memory */
302 first = info->next;
303 kfree(info);
304 }
305}
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307static int __init dmascc_init(void)
308{
309 int h, i, j, n;
310 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
311 t1[MAX_NUM_DEVS];
312 unsigned t_val;
313 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
314 counting[MAX_NUM_DEVS];
315
316 /* Initialize random number generator */
317 rand = jiffies;
318 /* Cards found = 0 */
319 n = 0;
320 /* Warning message */
321 if (!io[0])
322 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
323
324 /* Run autodetection for each card type */
325 for (h = 0; h < NUM_TYPES; h++) {
326
327 if (io[0]) {
328 /* User-specified I/O address regions */
329 for (i = 0; i < hw[h].num_devs; i++)
330 base[i] = 0;
331 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
332 j = (io[i] -
333 hw[h].io_region) / hw[h].io_delta;
334 if (j >= 0 && j < hw[h].num_devs
335 && hw[h].io_region +
336 j * hw[h].io_delta == io[i]) {
337 base[j] = io[i];
338 }
339 }
340 } else {
341 /* Default I/O address regions */
342 for (i = 0; i < hw[h].num_devs; i++) {
343 base[i] =
344 hw[h].io_region + i * hw[h].io_delta;
345 }
346 }
347
348 /* Check valid I/O address regions */
349 for (i = 0; i < hw[h].num_devs; i++)
350 if (base[i]) {
351 if (!request_region
352 (base[i], hw[h].io_size, "dmascc"))
353 base[i] = 0;
354 else {
355 tcmd[i] =
356 base[i] + hw[h].tmr_offset +
357 TMR_CTRL;
358 t0[i] =
359 base[i] + hw[h].tmr_offset +
360 TMR_CNT0;
361 t1[i] =
362 base[i] + hw[h].tmr_offset +
363 TMR_CNT1;
364 }
365 }
366
367 /* Start timers */
368 for (i = 0; i < hw[h].num_devs; i++)
369 if (base[i]) {
370 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
371 outb(0x36, tcmd[i]);
372 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
373 t0[i]);
374 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
375 t0[i]);
376 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
377 outb(0x70, tcmd[i]);
378 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
379 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
380 start[i] = jiffies;
381 delay[i] = 0;
382 counting[i] = 1;
383 /* Timer 2: LSB+MSB, Mode 0 */
384 outb(0xb0, tcmd[i]);
385 }
386 time = jiffies;
387 /* Wait until counter registers are loaded */
388 udelay(2000000 / TMR_0_HZ);
389
390 /* Timing loop */
391 while (jiffies - time < 13) {
392 for (i = 0; i < hw[h].num_devs; i++)
393 if (base[i] && counting[i]) {
394 /* Read back Timer 1: latch; read LSB; read MSB */
395 outb(0x40, tcmd[i]);
396 t_val =
397 inb(t1[i]) + (inb(t1[i]) << 8);
398 /* Also check whether counter did wrap */
399 if (t_val == 0
400 || t_val > TMR_0_HZ / HZ * 10)
401 counting[i] = 0;
402 delay[i] = jiffies - start[i];
403 }
404 }
405
406 /* Evaluate measurements */
407 for (i = 0; i < hw[h].num_devs; i++)
408 if (base[i]) {
409 if ((delay[i] >= 9 && delay[i] <= 11) &&
410 /* Ok, we have found an adapter */
411 (setup_adapter(base[i], h, n) == 0))
412 n++;
413 else
414 release_region(base[i],
415 hw[h].io_size);
416 }
417
418 } /* NUM_TYPES */
419
420 /* If any adapter was successfully initialized, return ok */
421 if (n)
422 return 0;
423
424 /* If no adapter found, return error */
425 printk(KERN_INFO "dmascc: no adapters found\n");
426 return -EIO;
427}
428
429module_init(dmascc_init);
430module_exit(dmascc_exit);
431
Adrian Bunke2fdbc02006-06-26 12:31:46 +0200432static void __init dev_setup(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 dev->type = ARPHRD_AX25;
Ralf Baechlec4bc7ee2005-09-12 14:19:26 -0700435 dev->hard_header_len = AX25_MAX_HEADER_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 dev->mtu = 1500;
Ralf Baechlec4bc7ee2005-09-12 14:19:26 -0700437 dev->addr_len = AX25_ADDR_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 dev->tx_queue_len = 64;
Ralf Baechle15b1c0e2006-12-07 15:47:08 -0800439 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
440 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
Stephen Hemminger52db6252009-01-09 13:01:34 +0000443static const struct net_device_ops scc_netdev_ops = {
444 .ndo_open = scc_open,
445 .ndo_stop = scc_close,
446 .ndo_start_xmit = scc_send_packet,
447 .ndo_do_ioctl = scc_ioctl,
448};
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450static int __init setup_adapter(int card_base, int type, int n)
451{
452 int i, irq, chip;
453 struct scc_info *info;
454 struct net_device *dev;
455 struct scc_priv *priv;
456 unsigned long time;
457 unsigned int irqs;
458 int tmr_base = card_base + hw[type].tmr_offset;
459 int scc_base = card_base + hw[type].scc_offset;
460 char *chipnames[] = CHIPNAMES;
461
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700462 /* Initialize what is necessary for write_scc and write_scc_data */
463 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 if (!info) {
465 printk(KERN_ERR "dmascc: "
466 "could not allocate memory for %s at %#3x\n",
467 hw[type].name, card_base);
468 goto out;
469 }
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 info->dev[0] = alloc_netdev(0, "", dev_setup);
473 if (!info->dev[0]) {
474 printk(KERN_ERR "dmascc: "
475 "could not allocate memory for %s at %#3x\n",
476 hw[type].name, card_base);
477 goto out1;
478 }
479
480 info->dev[1] = alloc_netdev(0, "", dev_setup);
481 if (!info->dev[1]) {
482 printk(KERN_ERR "dmascc: "
483 "could not allocate memory for %s at %#3x\n",
484 hw[type].name, card_base);
485 goto out2;
486 }
487 spin_lock_init(&info->register_lock);
488
489 priv = &info->priv[0];
490 priv->type = type;
491 priv->card_base = card_base;
492 priv->scc_cmd = scc_base + SCCA_CMD;
493 priv->scc_data = scc_base + SCCA_DATA;
494 priv->register_lock = &info->register_lock;
495
496 /* Reset SCC */
497 write_scc(priv, R9, FHWRES | MIE | NV);
498
499 /* Determine type of chip by enabling SDLC/HDLC enhancements */
500 write_scc(priv, R15, SHDLCE);
501 if (!read_scc(priv, R15)) {
502 /* WR7' not present. This is an ordinary Z8530 SCC. */
503 chip = Z8530;
504 } else {
505 /* Put one character in TX FIFO */
506 write_scc_data(priv, 0, 0);
507 if (read_scc(priv, R0) & Tx_BUF_EMP) {
508 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
509 chip = Z85230;
510 } else {
511 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
512 chip = Z85C30;
513 }
514 }
515 write_scc(priv, R15, 0);
516
517 /* Start IRQ auto-detection */
518 irqs = probe_irq_on();
519
520 /* Enable interrupts */
521 if (type == TYPE_TWIN) {
522 outb(0, card_base + TWIN_DMA_CFG);
523 inb(card_base + TWIN_CLR_TMR1);
524 inb(card_base + TWIN_CLR_TMR2);
525 info->twin_serial_cfg = TWIN_EI;
526 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
527 } else {
528 write_scc(priv, R15, CTSIE);
529 write_scc(priv, R0, RES_EXT_INT);
530 write_scc(priv, R1, EXT_INT_ENAB);
531 }
532
533 /* Start timer */
534 outb(1, tmr_base + TMR_CNT1);
535 outb(0, tmr_base + TMR_CNT1);
536
537 /* Wait and detect IRQ */
538 time = jiffies;
539 while (jiffies - time < 2 + HZ / TMR_0_HZ);
540 irq = probe_irq_off(irqs);
541
542 /* Clear pending interrupt, disable interrupts */
543 if (type == TYPE_TWIN) {
544 inb(card_base + TWIN_CLR_TMR1);
545 } else {
546 write_scc(priv, R1, 0);
547 write_scc(priv, R15, 0);
548 write_scc(priv, R0, RES_EXT_INT);
549 }
550
551 if (irq <= 0) {
552 printk(KERN_ERR
553 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
554 hw[type].name, card_base, irq);
555 goto out3;
556 }
557
558 /* Set up data structures */
559 for (i = 0; i < 2; i++) {
560 dev = info->dev[i];
561 priv = &info->priv[i];
562 priv->type = type;
563 priv->chip = chip;
564 priv->dev = dev;
565 priv->info = info;
566 priv->channel = i;
567 spin_lock_init(&priv->ring_lock);
568 priv->register_lock = &info->register_lock;
569 priv->card_base = card_base;
570 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
571 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
572 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
573 priv->tmr_ctrl = tmr_base + TMR_CTRL;
574 priv->tmr_mode = i ? 0xb0 : 0x70;
575 priv->param.pclk_hz = hw[type].pclk_hz;
576 priv->param.brg_tc = -1;
577 priv->param.clocks = TCTRxCP | RCRTxCP;
578 priv->param.persist = 256;
579 priv->param.dma = -1;
Al Viro7a87b6c2006-12-06 18:51:40 +0000580 INIT_WORK(&priv->rx_work, rx_bh);
Wang Chenf4bdd262008-11-20 01:02:05 -0800581 dev->ml_priv = priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 sprintf(dev->name, "dmascc%i", 2 * n + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 dev->base_addr = card_base;
584 dev->irq = irq;
Stephen Hemminger52db6252009-01-09 13:01:34 +0000585 dev->netdev_ops = &scc_netdev_ops;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700586 dev->header_ops = &ax25_header_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 dev->set_mac_address = scc_set_mac_address;
588 }
589 if (register_netdev(info->dev[0])) {
590 printk(KERN_ERR "dmascc: could not register %s\n",
591 info->dev[0]->name);
592 goto out3;
593 }
594 if (register_netdev(info->dev[1])) {
595 printk(KERN_ERR "dmascc: could not register %s\n",
596 info->dev[1]->name);
597 goto out4;
598 }
599
600
601 info->next = first;
602 first = info;
603 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
604 hw[type].name, chipnames[chip], card_base, irq);
605 return 0;
606
607 out4:
608 unregister_netdev(info->dev[0]);
609 out3:
610 if (info->priv[0].type == TYPE_TWIN)
611 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
612 write_scc(&info->priv[0], R9, FHWRES);
613 free_netdev(info->dev[1]);
614 out2:
615 free_netdev(info->dev[0]);
616 out1:
617 kfree(info);
618 out:
619 return -1;
620}
621
622
623/* Driver functions */
624
625static void write_scc(struct scc_priv *priv, int reg, int val)
626{
627 unsigned long flags;
628 switch (priv->type) {
629 case TYPE_S5:
630 if (reg)
631 outb(reg, priv->scc_cmd);
632 outb(val, priv->scc_cmd);
633 return;
634 case TYPE_TWIN:
635 if (reg)
636 outb_p(reg, priv->scc_cmd);
637 outb_p(val, priv->scc_cmd);
638 return;
639 default:
640 spin_lock_irqsave(priv->register_lock, flags);
641 outb_p(0, priv->card_base + PI_DREQ_MASK);
642 if (reg)
643 outb_p(reg, priv->scc_cmd);
644 outb_p(val, priv->scc_cmd);
645 outb(1, priv->card_base + PI_DREQ_MASK);
646 spin_unlock_irqrestore(priv->register_lock, flags);
647 return;
648 }
649}
650
651
652static void write_scc_data(struct scc_priv *priv, int val, int fast)
653{
654 unsigned long flags;
655 switch (priv->type) {
656 case TYPE_S5:
657 outb(val, priv->scc_data);
658 return;
659 case TYPE_TWIN:
660 outb_p(val, priv->scc_data);
661 return;
662 default:
663 if (fast)
664 outb_p(val, priv->scc_data);
665 else {
666 spin_lock_irqsave(priv->register_lock, flags);
667 outb_p(0, priv->card_base + PI_DREQ_MASK);
668 outb_p(val, priv->scc_data);
669 outb(1, priv->card_base + PI_DREQ_MASK);
670 spin_unlock_irqrestore(priv->register_lock, flags);
671 }
672 return;
673 }
674}
675
676
677static int read_scc(struct scc_priv *priv, int reg)
678{
679 int rc;
680 unsigned long flags;
681 switch (priv->type) {
682 case TYPE_S5:
683 if (reg)
684 outb(reg, priv->scc_cmd);
685 return inb(priv->scc_cmd);
686 case TYPE_TWIN:
687 if (reg)
688 outb_p(reg, priv->scc_cmd);
689 return inb_p(priv->scc_cmd);
690 default:
691 spin_lock_irqsave(priv->register_lock, flags);
692 outb_p(0, priv->card_base + PI_DREQ_MASK);
693 if (reg)
694 outb_p(reg, priv->scc_cmd);
695 rc = inb_p(priv->scc_cmd);
696 outb(1, priv->card_base + PI_DREQ_MASK);
697 spin_unlock_irqrestore(priv->register_lock, flags);
698 return rc;
699 }
700}
701
702
703static int read_scc_data(struct scc_priv *priv)
704{
705 int rc;
706 unsigned long flags;
707 switch (priv->type) {
708 case TYPE_S5:
709 return inb(priv->scc_data);
710 case TYPE_TWIN:
711 return inb_p(priv->scc_data);
712 default:
713 spin_lock_irqsave(priv->register_lock, flags);
714 outb_p(0, priv->card_base + PI_DREQ_MASK);
715 rc = inb_p(priv->scc_data);
716 outb(1, priv->card_base + PI_DREQ_MASK);
717 spin_unlock_irqrestore(priv->register_lock, flags);
718 return rc;
719 }
720}
721
722
723static int scc_open(struct net_device *dev)
724{
Wang Chenf4bdd262008-11-20 01:02:05 -0800725 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 struct scc_info *info = priv->info;
727 int card_base = priv->card_base;
728
729 /* Request IRQ if not already used by other channel */
730 if (!info->irq_used) {
731 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
732 return -EAGAIN;
733 }
734 }
735 info->irq_used++;
736
737 /* Request DMA if required */
738 if (priv->param.dma >= 0) {
739 if (request_dma(priv->param.dma, "dmascc")) {
740 if (--info->irq_used == 0)
741 free_irq(dev->irq, info);
742 return -EAGAIN;
743 } else {
744 unsigned long flags = claim_dma_lock();
745 clear_dma_ff(priv->param.dma);
746 release_dma_lock(flags);
747 }
748 }
749
750 /* Initialize local variables */
751 priv->rx_ptr = 0;
752 priv->rx_over = 0;
753 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
754 priv->state = IDLE;
755 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
756 priv->tx_ptr = 0;
757
758 /* Reset channel */
759 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
760 /* X1 clock, SDLC mode */
761 write_scc(priv, R4, SDLC | X1CLK);
762 /* DMA */
763 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
764 /* 8 bit RX char, RX disable */
765 write_scc(priv, R3, Rx8);
766 /* 8 bit TX char, TX disable */
767 write_scc(priv, R5, Tx8);
768 /* SDLC address field */
769 write_scc(priv, R6, 0);
770 /* SDLC flag */
771 write_scc(priv, R7, FLAG);
772 switch (priv->chip) {
773 case Z85C30:
774 /* Select WR7' */
775 write_scc(priv, R15, SHDLCE);
776 /* Auto EOM reset */
777 write_scc(priv, R7, AUTOEOM);
778 write_scc(priv, R15, 0);
779 break;
780 case Z85230:
781 /* Select WR7' */
782 write_scc(priv, R15, SHDLCE);
783 /* The following bits are set (see 2.5.2.1):
784 - Automatic EOM reset
785 - Interrupt request if RX FIFO is half full
786 This bit should be ignored in DMA mode (according to the
787 documentation), but actually isn't. The receiver doesn't work if
788 it is set. Thus, we have to clear it in DMA mode.
789 - Interrupt/DMA request if TX FIFO is completely empty
790 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
791 compatibility).
792 b) If cleared, DMA requests may follow each other very quickly,
793 filling up the TX FIFO.
794 Advantage: TX works even in case of high bus latency.
795 Disadvantage: Edge-triggered DMA request circuitry may miss
796 a request. No more data is delivered, resulting
797 in a TX FIFO underrun.
798 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
799 The PackeTwin doesn't. I don't know about the PI, but let's
800 assume it behaves like the PI2.
801 */
802 if (priv->param.dma >= 0) {
803 if (priv->type == TYPE_TWIN)
804 write_scc(priv, R7, AUTOEOM | TXFIFOE);
805 else
806 write_scc(priv, R7, AUTOEOM);
807 } else {
808 write_scc(priv, R7, AUTOEOM | RXFIFOH);
809 }
810 write_scc(priv, R15, 0);
811 break;
812 }
813 /* Preset CRC, NRZ(I) encoding */
814 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
815
816 /* Configure baud rate generator */
817 if (priv->param.brg_tc >= 0) {
818 /* Program BR generator */
819 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
820 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
821 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
822 PackeTwin, not connected on the PI2); set DPLL source to BRG */
823 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
824 /* Enable DPLL */
825 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
826 } else {
827 /* Disable BR generator */
828 write_scc(priv, R14, DTRREQ | BRSRC);
829 }
830
831 /* Configure clocks */
832 if (priv->type == TYPE_TWIN) {
833 /* Disable external TX clock receiver */
834 outb((info->twin_serial_cfg &=
835 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
836 card_base + TWIN_SERIAL_CFG);
837 }
838 write_scc(priv, R11, priv->param.clocks);
839 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
840 /* Enable external TX clock receiver */
841 outb((info->twin_serial_cfg |=
842 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
843 card_base + TWIN_SERIAL_CFG);
844 }
845
846 /* Configure PackeTwin */
847 if (priv->type == TYPE_TWIN) {
848 /* Assert DTR, enable interrupts */
849 outb((info->twin_serial_cfg |= TWIN_EI |
850 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
851 card_base + TWIN_SERIAL_CFG);
852 }
853
854 /* Read current status */
855 priv->rr0 = read_scc(priv, R0);
856 /* Enable DCD interrupt */
857 write_scc(priv, R15, DCDIE);
858
859 netif_start_queue(dev);
860
861 return 0;
862}
863
864
865static int scc_close(struct net_device *dev)
866{
Wang Chenf4bdd262008-11-20 01:02:05 -0800867 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 struct scc_info *info = priv->info;
869 int card_base = priv->card_base;
870
871 netif_stop_queue(dev);
872
873 if (priv->type == TYPE_TWIN) {
874 /* Drop DTR */
875 outb((info->twin_serial_cfg &=
876 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
877 card_base + TWIN_SERIAL_CFG);
878 }
879
880 /* Reset channel, free DMA and IRQ */
881 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
882 if (priv->param.dma >= 0) {
883 if (priv->type == TYPE_TWIN)
884 outb(0, card_base + TWIN_DMA_CFG);
885 free_dma(priv->param.dma);
886 }
887 if (--info->irq_used == 0)
888 free_irq(dev->irq, info);
889
890 return 0;
891}
892
893
894static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
895{
Wang Chenf4bdd262008-11-20 01:02:05 -0800896 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
898 switch (cmd) {
899 case SIOCGSCCPARAM:
900 if (copy_to_user
901 (ifr->ifr_data, &priv->param,
902 sizeof(struct scc_param)))
903 return -EFAULT;
904 return 0;
905 case SIOCSSCCPARAM:
906 if (!capable(CAP_NET_ADMIN))
907 return -EPERM;
908 if (netif_running(dev))
909 return -EAGAIN;
910 if (copy_from_user
911 (&priv->param, ifr->ifr_data,
912 sizeof(struct scc_param)))
913 return -EFAULT;
914 return 0;
915 default:
916 return -EINVAL;
917 }
918}
919
920
921static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
922{
Wang Chenf4bdd262008-11-20 01:02:05 -0800923 struct scc_priv *priv = dev->ml_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 unsigned long flags;
925 int i;
926
927 /* Temporarily stop the scheduler feeding us packets */
928 netif_stop_queue(dev);
929
930 /* Transfer data to DMA buffer */
931 i = priv->tx_head;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300932 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 priv->tx_len[i] = skb->len - 1;
934
935 /* Clear interrupts while we touch our circular buffers */
936
937 spin_lock_irqsave(&priv->ring_lock, flags);
938 /* Move the ring buffer's head */
939 priv->tx_head = (i + 1) % NUM_TX_BUF;
940 priv->tx_count++;
941
942 /* If we just filled up the last buffer, leave queue stopped.
943 The higher layers must wait until we have a DMA buffer
944 to accept the data. */
945 if (priv->tx_count < NUM_TX_BUF)
946 netif_wake_queue(dev);
947
948 /* Set new TX state */
949 if (priv->state == IDLE) {
950 /* Assert RTS, start timer */
951 priv->state = TX_HEAD;
952 priv->tx_start = jiffies;
953 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
954 write_scc(priv, R15, 0);
955 start_timer(priv, priv->param.txdelay, 0);
956 }
957
958 /* Turn interrupts back on and free buffer */
959 spin_unlock_irqrestore(&priv->ring_lock, flags);
960 dev_kfree_skb(skb);
961
962 return 0;
963}
964
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966static int scc_set_mac_address(struct net_device *dev, void *sa)
967{
968 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
969 dev->addr_len);
970 return 0;
971}
972
973
974static inline void tx_on(struct scc_priv *priv)
975{
976 int i, n;
977 unsigned long flags;
978
979 if (priv->param.dma >= 0) {
980 n = (priv->chip == Z85230) ? 3 : 1;
981 /* Program DMA controller */
982 flags = claim_dma_lock();
983 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
984 set_dma_addr(priv->param.dma,
985 (int) priv->tx_buf[priv->tx_tail] + n);
986 set_dma_count(priv->param.dma,
987 priv->tx_len[priv->tx_tail] - n);
988 release_dma_lock(flags);
989 /* Enable TX underrun interrupt */
990 write_scc(priv, R15, TxUIE);
991 /* Configure DREQ */
992 if (priv->type == TYPE_TWIN)
993 outb((priv->param.dma ==
994 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
995 priv->card_base + TWIN_DMA_CFG);
996 else
997 write_scc(priv, R1,
998 EXT_INT_ENAB | WT_FN_RDYFN |
999 WT_RDY_ENAB);
1000 /* Write first byte(s) */
1001 spin_lock_irqsave(priv->register_lock, flags);
1002 for (i = 0; i < n; i++)
1003 write_scc_data(priv,
1004 priv->tx_buf[priv->tx_tail][i], 1);
1005 enable_dma(priv->param.dma);
1006 spin_unlock_irqrestore(priv->register_lock, flags);
1007 } else {
1008 write_scc(priv, R15, TxUIE);
1009 write_scc(priv, R1,
1010 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1011 tx_isr(priv);
1012 }
1013 /* Reset EOM latch if we do not have the AUTOEOM feature */
1014 if (priv->chip == Z8530)
1015 write_scc(priv, R0, RES_EOM_L);
1016}
1017
1018
1019static inline void rx_on(struct scc_priv *priv)
1020{
1021 unsigned long flags;
1022
1023 /* Clear RX FIFO */
1024 while (read_scc(priv, R0) & Rx_CH_AV)
1025 read_scc_data(priv);
1026 priv->rx_over = 0;
1027 if (priv->param.dma >= 0) {
1028 /* Program DMA controller */
1029 flags = claim_dma_lock();
1030 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1031 set_dma_addr(priv->param.dma,
1032 (int) priv->rx_buf[priv->rx_head]);
1033 set_dma_count(priv->param.dma, BUF_SIZE);
1034 release_dma_lock(flags);
1035 enable_dma(priv->param.dma);
1036 /* Configure PackeTwin DMA */
1037 if (priv->type == TYPE_TWIN) {
1038 outb((priv->param.dma ==
1039 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1040 priv->card_base + TWIN_DMA_CFG);
1041 }
1042 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1043 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1044 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1045 } else {
1046 /* Reset current frame */
1047 priv->rx_ptr = 0;
1048 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1049 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1050 WT_FN_RDYFN);
1051 }
1052 write_scc(priv, R0, ERR_RES);
1053 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1054}
1055
1056
1057static inline void rx_off(struct scc_priv *priv)
1058{
1059 /* Disable receiver */
1060 write_scc(priv, R3, Rx8);
1061 /* Disable DREQ / RX interrupt */
1062 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1063 outb(0, priv->card_base + TWIN_DMA_CFG);
1064 else
1065 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1066 /* Disable DMA */
1067 if (priv->param.dma >= 0)
1068 disable_dma(priv->param.dma);
1069}
1070
1071
1072static void start_timer(struct scc_priv *priv, int t, int r15)
1073{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 outb(priv->tmr_mode, priv->tmr_ctrl);
1075 if (t == 0) {
1076 tm_isr(priv);
1077 } else if (t > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 outb(t & 0xFF, priv->tmr_cnt);
1079 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1080 if (priv->type != TYPE_TWIN) {
1081 write_scc(priv, R15, r15 | CTSIE);
1082 priv->rr0 |= CTS;
1083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 }
1085}
1086
1087
1088static inline unsigned char random(void)
1089{
1090 /* See "Numerical Recipes in C", second edition, p. 284 */
1091 rand = rand * 1664525L + 1013904223L;
1092 return (unsigned char) (rand >> 24);
1093}
1094
1095static inline void z8530_isr(struct scc_info *info)
1096{
1097 int is, i = 100;
1098
1099 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1100 if (is & CHARxIP) {
1101 rx_isr(&info->priv[0]);
1102 } else if (is & CHATxIP) {
1103 tx_isr(&info->priv[0]);
1104 } else if (is & CHAEXT) {
1105 es_isr(&info->priv[0]);
1106 } else if (is & CHBRxIP) {
1107 rx_isr(&info->priv[1]);
1108 } else if (is & CHBTxIP) {
1109 tx_isr(&info->priv[1]);
1110 } else {
1111 es_isr(&info->priv[1]);
1112 }
1113 write_scc(&info->priv[0], R0, RES_H_IUS);
1114 i++;
1115 }
1116 if (i < 0) {
1117 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1118 is);
1119 }
1120 /* Ok, no interrupts pending from this 8530. The INT line should
1121 be inactive now. */
1122}
1123
1124
David Howells7d12e782006-10-05 14:55:46 +01001125static irqreturn_t scc_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
1127 struct scc_info *info = dev_id;
1128
1129 spin_lock(info->priv[0].register_lock);
1130 /* At this point interrupts are enabled, and the interrupt under service
1131 is already acknowledged, but masked off.
1132
1133 Interrupt processing: We loop until we know that the IRQ line is
1134 low. If another positive edge occurs afterwards during the ISR,
1135 another interrupt will be triggered by the interrupt controller
1136 as soon as the IRQ level is enabled again (see asm/irq.h).
1137
1138 Bottom-half handlers will be processed after scc_isr(). This is
1139 important, since we only have small ringbuffers and want new data
1140 to be fetched/delivered immediately. */
1141
1142 if (info->priv[0].type == TYPE_TWIN) {
1143 int is, card_base = info->priv[0].card_base;
1144 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1145 TWIN_INT_MSK) {
1146 if (is & TWIN_SCC_MSK) {
1147 z8530_isr(info);
1148 } else if (is & TWIN_TMR1_MSK) {
1149 inb(card_base + TWIN_CLR_TMR1);
1150 tm_isr(&info->priv[0]);
1151 } else {
1152 inb(card_base + TWIN_CLR_TMR2);
1153 tm_isr(&info->priv[1]);
1154 }
1155 }
1156 } else
1157 z8530_isr(info);
1158 spin_unlock(info->priv[0].register_lock);
1159 return IRQ_HANDLED;
1160}
1161
1162
1163static void rx_isr(struct scc_priv *priv)
1164{
1165 if (priv->param.dma >= 0) {
1166 /* Check special condition and perform error reset. See 2.4.7.5. */
1167 special_condition(priv, read_scc(priv, R1));
1168 write_scc(priv, R0, ERR_RES);
1169 } else {
1170 /* Check special condition for each character. Error reset not necessary.
1171 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1172 int rc;
1173 while (read_scc(priv, R0) & Rx_CH_AV) {
1174 rc = read_scc(priv, R1);
1175 if (priv->rx_ptr < BUF_SIZE)
1176 priv->rx_buf[priv->rx_head][priv->
1177 rx_ptr++] =
1178 read_scc_data(priv);
1179 else {
1180 priv->rx_over = 2;
1181 read_scc_data(priv);
1182 }
1183 special_condition(priv, rc);
1184 }
1185 }
1186}
1187
1188
1189static void special_condition(struct scc_priv *priv, int rc)
1190{
1191 int cb;
1192 unsigned long flags;
1193
1194 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1195
1196 if (rc & Rx_OVR) {
1197 /* Receiver overrun */
1198 priv->rx_over = 1;
1199 if (priv->param.dma < 0)
1200 write_scc(priv, R0, ERR_RES);
1201 } else if (rc & END_FR) {
1202 /* End of frame. Get byte count */
1203 if (priv->param.dma >= 0) {
1204 flags = claim_dma_lock();
1205 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1206 2;
1207 release_dma_lock(flags);
1208 } else {
1209 cb = priv->rx_ptr - 2;
1210 }
1211 if (priv->rx_over) {
1212 /* We had an overrun */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001213 priv->dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 if (priv->rx_over == 2)
Stephen Hemminger13c05822009-01-09 13:01:33 +00001215 priv->dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 else
Stephen Hemminger13c05822009-01-09 13:01:33 +00001217 priv->dev->stats.rx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 priv->rx_over = 0;
1219 } else if (rc & CRC_ERR) {
1220 /* Count invalid CRC only if packet length >= minimum */
1221 if (cb >= 15) {
Stephen Hemminger13c05822009-01-09 13:01:33 +00001222 priv->dev->stats.rx_errors++;
1223 priv->dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 }
1225 } else {
1226 if (cb >= 15) {
1227 if (priv->rx_count < NUM_RX_BUF - 1) {
1228 /* Put good frame in FIFO */
1229 priv->rx_len[priv->rx_head] = cb;
1230 priv->rx_head =
1231 (priv->rx_head +
1232 1) % NUM_RX_BUF;
1233 priv->rx_count++;
1234 schedule_work(&priv->rx_work);
1235 } else {
Stephen Hemminger13c05822009-01-09 13:01:33 +00001236 priv->dev->stats.rx_errors++;
1237 priv->dev->stats.rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 }
1239 }
1240 }
1241 /* Get ready for new frame */
1242 if (priv->param.dma >= 0) {
1243 flags = claim_dma_lock();
1244 set_dma_addr(priv->param.dma,
1245 (int) priv->rx_buf[priv->rx_head]);
1246 set_dma_count(priv->param.dma, BUF_SIZE);
1247 release_dma_lock(flags);
1248 } else {
1249 priv->rx_ptr = 0;
1250 }
1251 }
1252}
1253
1254
Al Viro7a87b6c2006-12-06 18:51:40 +00001255static void rx_bh(struct work_struct *ugli_api)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256{
Al Viro7a87b6c2006-12-06 18:51:40 +00001257 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 int i = priv->rx_tail;
1259 int cb;
1260 unsigned long flags;
1261 struct sk_buff *skb;
1262 unsigned char *data;
1263
1264 spin_lock_irqsave(&priv->ring_lock, flags);
1265 while (priv->rx_count) {
1266 spin_unlock_irqrestore(&priv->ring_lock, flags);
1267 cb = priv->rx_len[i];
1268 /* Allocate buffer */
1269 skb = dev_alloc_skb(cb + 1);
1270 if (skb == NULL) {
1271 /* Drop packet */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001272 priv->dev->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 } else {
1274 /* Fill buffer */
1275 data = skb_put(skb, cb + 1);
1276 data[0] = 0;
1277 memcpy(&data[1], priv->rx_buf[i], cb);
Arnaldo Carvalho de Melo56cb5152005-04-24 18:53:06 -07001278 skb->protocol = ax25_type_trans(skb, priv->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 netif_rx(skb);
Stephen Hemminger13c05822009-01-09 13:01:33 +00001280 priv->dev->stats.rx_packets++;
1281 priv->dev->stats.rx_bytes += cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 }
1283 spin_lock_irqsave(&priv->ring_lock, flags);
1284 /* Move tail */
1285 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1286 priv->rx_count--;
1287 }
1288 spin_unlock_irqrestore(&priv->ring_lock, flags);
1289}
1290
1291
1292static void tx_isr(struct scc_priv *priv)
1293{
1294 int i = priv->tx_tail, p = priv->tx_ptr;
1295
1296 /* Suspend TX interrupts if we don't want to send anything.
1297 See Figure 2-22. */
1298 if (p == priv->tx_len[i]) {
1299 write_scc(priv, R0, RES_Tx_P);
1300 return;
1301 }
1302
1303 /* Write characters */
1304 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1305 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1306 }
1307
1308 /* Reset EOM latch of Z8530 */
1309 if (!priv->tx_ptr && p && priv->chip == Z8530)
1310 write_scc(priv, R0, RES_EOM_L);
1311
1312 priv->tx_ptr = p;
1313}
1314
1315
1316static void es_isr(struct scc_priv *priv)
1317{
1318 int i, rr0, drr0, res;
1319 unsigned long flags;
1320
1321 /* Read status, reset interrupt bit (open latches) */
1322 rr0 = read_scc(priv, R0);
1323 write_scc(priv, R0, RES_EXT_INT);
1324 drr0 = priv->rr0 ^ rr0;
1325 priv->rr0 = rr0;
1326
1327 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1328 it might have already been cleared again by AUTOEOM. */
1329 if (priv->state == TX_DATA) {
1330 /* Get remaining bytes */
1331 i = priv->tx_tail;
1332 if (priv->param.dma >= 0) {
1333 disable_dma(priv->param.dma);
1334 flags = claim_dma_lock();
1335 res = get_dma_residue(priv->param.dma);
1336 release_dma_lock(flags);
1337 } else {
1338 res = priv->tx_len[i] - priv->tx_ptr;
1339 priv->tx_ptr = 0;
1340 }
1341 /* Disable DREQ / TX interrupt */
1342 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1343 outb(0, priv->card_base + TWIN_DMA_CFG);
1344 else
1345 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1346 if (res) {
1347 /* Update packet statistics */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001348 priv->dev->stats.tx_errors++;
1349 priv->dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 /* Other underrun interrupts may already be waiting */
1351 write_scc(priv, R0, RES_EXT_INT);
1352 write_scc(priv, R0, RES_EXT_INT);
1353 } else {
1354 /* Update packet statistics */
Stephen Hemminger13c05822009-01-09 13:01:33 +00001355 priv->dev->stats.tx_packets++;
1356 priv->dev->stats.tx_bytes += priv->tx_len[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 /* Remove frame from FIFO */
1358 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1359 priv->tx_count--;
1360 /* Inform upper layers */
1361 netif_wake_queue(priv->dev);
1362 }
1363 /* Switch state */
1364 write_scc(priv, R15, 0);
1365 if (priv->tx_count &&
1366 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1367 priv->state = TX_PAUSE;
1368 start_timer(priv, priv->param.txpause, 0);
1369 } else {
1370 priv->state = TX_TAIL;
1371 start_timer(priv, priv->param.txtail, 0);
1372 }
1373 }
1374
1375 /* DCD transition */
1376 if (drr0 & DCD) {
1377 if (rr0 & DCD) {
1378 switch (priv->state) {
1379 case IDLE:
1380 case WAIT:
1381 priv->state = DCD_ON;
1382 write_scc(priv, R15, 0);
1383 start_timer(priv, priv->param.dcdon, 0);
1384 }
1385 } else {
1386 switch (priv->state) {
1387 case RX_ON:
1388 rx_off(priv);
1389 priv->state = DCD_OFF;
1390 write_scc(priv, R15, 0);
1391 start_timer(priv, priv->param.dcdoff, 0);
1392 }
1393 }
1394 }
1395
1396 /* CTS transition */
1397 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1398 tm_isr(priv);
1399
1400}
1401
1402
1403static void tm_isr(struct scc_priv *priv)
1404{
1405 switch (priv->state) {
1406 case TX_HEAD:
1407 case TX_PAUSE:
1408 tx_on(priv);
1409 priv->state = TX_DATA;
1410 break;
1411 case TX_TAIL:
1412 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1413 priv->state = RTS_OFF;
1414 if (priv->type != TYPE_TWIN)
1415 write_scc(priv, R15, 0);
1416 start_timer(priv, priv->param.rtsoff, 0);
1417 break;
1418 case RTS_OFF:
1419 write_scc(priv, R15, DCDIE);
1420 priv->rr0 = read_scc(priv, R0);
1421 if (priv->rr0 & DCD) {
Stephen Hemminger13c05822009-01-09 13:01:33 +00001422 priv->dev->stats.collisions++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 rx_on(priv);
1424 priv->state = RX_ON;
1425 } else {
1426 priv->state = WAIT;
1427 start_timer(priv, priv->param.waittime, DCDIE);
1428 }
1429 break;
1430 case WAIT:
1431 if (priv->tx_count) {
1432 priv->state = TX_HEAD;
1433 priv->tx_start = jiffies;
1434 write_scc(priv, R5,
1435 TxCRC_ENAB | RTS | TxENAB | Tx8);
1436 write_scc(priv, R15, 0);
1437 start_timer(priv, priv->param.txdelay, 0);
1438 } else {
1439 priv->state = IDLE;
1440 if (priv->type != TYPE_TWIN)
1441 write_scc(priv, R15, DCDIE);
1442 }
1443 break;
1444 case DCD_ON:
1445 case DCD_OFF:
1446 write_scc(priv, R15, DCDIE);
1447 priv->rr0 = read_scc(priv, R0);
1448 if (priv->rr0 & DCD) {
1449 rx_on(priv);
1450 priv->state = RX_ON;
1451 } else {
1452 priv->state = WAIT;
1453 start_timer(priv,
1454 random() / priv->param.persist *
1455 priv->param.slottime, DCDIE);
1456 }
1457 break;
1458 }
1459}