blob: 5bf7e01ef0e9fe3c408408e24dc8e43a772f6161 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8 * (c) Copyright 2000, 2001 Red Hat Inc
9 *
10 * Development of this driver was funded by Equiinet Ltd
11 * http://www.equiinet.com
12 *
13 * ChangeLog:
14 *
15 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16 * unification of all the Z85x30 asynchronous drivers for real.
17 *
18 * DMA now uses get_free_page as kmalloc buffers may span a 64K
19 * boundary.
20 *
Alan Cox113aa832008-10-13 19:01:08 -070021 * Modified for SMP safety and SMP locking by Alan Cox
22 * <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 *
24 * Performance
25 *
26 * Z85230:
27 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28 * X.25 is not unrealistic on all machines. DMA mode can in theory
29 * handle T1/E1 quite nicely. In practice the limit seems to be about
30 * 512Kbit->1Mbit depending on motherboard.
31 *
32 * Z85C30:
33 * 64K will take DMA, 9600 baud X.25 should be ok.
34 *
35 * Z8530:
36 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
37 */
38
39#include <linux/module.h>
40#include <linux/kernel.h>
41#include <linux/mm.h>
42#include <linux/net.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/if_arp.h>
46#include <linux/delay.h>
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +020047#include <linux/hdlc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/ioport.h>
49#include <linux/init.h>
50#include <asm/dma.h>
51#include <asm/io.h>
52#define RT_LOCK
53#define RT_UNLOCK
54#include <linux/spinlock.h>
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include "z85230.h"
57
58
59/**
60 * z8530_read_port - Architecture specific interface function
61 * @p: port to read
62 *
63 * Provided port access methods. The Comtrol SV11 requires no delays
64 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
65 *
66 * In the longer term this should become an architecture specific
67 * section so that this can become a generic driver interface for all
68 * platforms. For now we only handle PC I/O ports with or without the
69 * dread 5uS sanity delay.
70 *
71 * The caller must hold sufficient locks to avoid violating the horrible
72 * 5uS delay rule.
73 */
74
75static inline int z8530_read_port(unsigned long p)
76{
77 u8 r=inb(Z8530_PORT_OF(p));
78 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
79 udelay(5);
80 return r;
81}
82
83/**
84 * z8530_write_port - Architecture specific interface function
85 * @p: port to write
86 * @d: value to write
87 *
88 * Write a value to a port with delays if need be. Note that the
89 * caller must hold locks to avoid read/writes from other contexts
90 * violating the 5uS rule
91 *
92 * In the longer term this should become an architecture specific
93 * section so that this can become a generic driver interface for all
94 * platforms. For now we only handle PC I/O ports with or without the
95 * dread 5uS sanity delay.
96 */
97
98
99static inline void z8530_write_port(unsigned long p, u8 d)
100{
101 outb(d,Z8530_PORT_OF(p));
102 if(p&Z8530_PORT_SLEEP)
103 udelay(5);
104}
105
106
107
108static void z8530_rx_done(struct z8530_channel *c);
109static void z8530_tx_done(struct z8530_channel *c);
110
111
112/**
113 * read_zsreg - Read a register from a Z85230
114 * @c: Z8530 channel to read from (2 per chip)
115 * @reg: Register to read
116 * FIXME: Use a spinlock.
117 *
118 * Most of the Z8530 registers are indexed off the control registers.
119 * A read is done by writing to the control register and reading the
120 * register back. The caller must hold the lock
121 */
122
123static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
124{
125 if(reg)
126 z8530_write_port(c->ctrlio, reg);
127 return z8530_read_port(c->ctrlio);
128}
129
130/**
131 * read_zsdata - Read the data port of a Z8530 channel
132 * @c: The Z8530 channel to read the data port from
133 *
134 * The data port provides fast access to some things. We still
135 * have all the 5uS delays to worry about.
136 */
137
138static inline u8 read_zsdata(struct z8530_channel *c)
139{
140 u8 r;
141 r=z8530_read_port(c->dataio);
142 return r;
143}
144
145/**
146 * write_zsreg - Write to a Z8530 channel register
147 * @c: The Z8530 channel
148 * @reg: Register number
149 * @val: Value to write
150 *
151 * Write a value to an indexed register. The caller must hold the lock
152 * to honour the irritating delay rules. We know about register 0
153 * being fast to access.
154 *
155 * Assumes c->lock is held.
156 */
157static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
158{
159 if(reg)
160 z8530_write_port(c->ctrlio, reg);
161 z8530_write_port(c->ctrlio, val);
162
163}
164
165/**
166 * write_zsctrl - Write to a Z8530 control register
167 * @c: The Z8530 channel
168 * @val: Value to write
169 *
170 * Write directly to the control register on the Z8530
171 */
172
173static inline void write_zsctrl(struct z8530_channel *c, u8 val)
174{
175 z8530_write_port(c->ctrlio, val);
176}
177
178/**
179 * write_zsdata - Write to a Z8530 control register
180 * @c: The Z8530 channel
181 * @val: Value to write
182 *
183 * Write directly to the data register on the Z8530
184 */
185
186
187static inline void write_zsdata(struct z8530_channel *c, u8 val)
188{
189 z8530_write_port(c->dataio, val);
190}
191
192/*
193 * Register loading parameters for a dead port
194 */
195
196u8 z8530_dead_port[]=
197{
198 255
199};
200
201EXPORT_SYMBOL(z8530_dead_port);
202
203/*
204 * Register loading parameters for currently supported circuit types
205 */
206
207
208/*
209 * Data clocked by telco end. This is the correct data for the UK
210 * "kilostream" service, and most other similar services.
211 */
212
213u8 z8530_hdlc_kilostream[]=
214{
215 4, SYNC_ENAB|SDLC|X1CLK,
216 2, 0, /* No vector */
217 1, 0,
218 3, ENT_HM|RxCRC_ENAB|Rx8,
219 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
220 9, 0, /* Disable interrupts */
221 6, 0xFF,
222 7, FLAG,
223 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
224 11, TCTRxCP,
225 14, DISDPLL,
226 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
227 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
228 9, NV|MIE|NORESET,
229 255
230};
231
232EXPORT_SYMBOL(z8530_hdlc_kilostream);
233
234/*
235 * As above but for enhanced chips.
236 */
237
238u8 z8530_hdlc_kilostream_85230[]=
239{
240 4, SYNC_ENAB|SDLC|X1CLK,
241 2, 0, /* No vector */
242 1, 0,
243 3, ENT_HM|RxCRC_ENAB|Rx8,
244 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
245 9, 0, /* Disable interrupts */
246 6, 0xFF,
247 7, FLAG,
248 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
249 11, TCTRxCP,
250 14, DISDPLL,
251 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
252 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
253 9, NV|MIE|NORESET,
254 23, 3, /* Extended mode AUTO TX and EOM*/
255
256 255
257};
258
259EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
260
261/**
262 * z8530_flush_fifo - Flush on chip RX FIFO
263 * @c: Channel to flush
264 *
265 * Flush the receive FIFO. There is no specific option for this, we
266 * blindly read bytes and discard them. Reading when there is no data
267 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
268 *
269 * All locking is handled for the caller. On return data may still be
270 * present if it arrived during the flush.
271 */
272
273static void z8530_flush_fifo(struct z8530_channel *c)
274{
275 read_zsreg(c, R1);
276 read_zsreg(c, R1);
277 read_zsreg(c, R1);
278 read_zsreg(c, R1);
279 if(c->dev->type==Z85230)
280 {
281 read_zsreg(c, R1);
282 read_zsreg(c, R1);
283 read_zsreg(c, R1);
284 read_zsreg(c, R1);
285 }
286}
287
288/**
289 * z8530_rtsdtr - Control the outgoing DTS/RTS line
290 * @c: The Z8530 channel to control;
291 * @set: 1 to set, 0 to clear
292 *
293 * Sets or clears DTR/RTS on the requested line. All locking is handled
294 * by the caller. For now we assume all boards use the actual RTS/DTR
295 * on the chip. Apparently one or two don't. We'll scream about them
296 * later.
297 */
298
299static void z8530_rtsdtr(struct z8530_channel *c, int set)
300{
301 if (set)
302 c->regs[5] |= (RTS | DTR);
303 else
304 c->regs[5] &= ~(RTS | DTR);
305 write_zsreg(c, R5, c->regs[5]);
306}
307
308/**
309 * z8530_rx - Handle a PIO receive event
310 * @c: Z8530 channel to process
311 *
312 * Receive handler for receiving in PIO mode. This is much like the
313 * async one but not quite the same or as complex
314 *
315 * Note: Its intended that this handler can easily be separated from
316 * the main code to run realtime. That'll be needed for some machines
317 * (eg to ever clock 64kbits on a sparc ;)).
318 *
319 * The RT_LOCK macros don't do anything now. Keep the code covered
320 * by them as short as possible in all circumstances - clocks cost
321 * baud. The interrupt handler is assumed to be atomic w.r.t. to
322 * other code - this is true in the RT case too.
323 *
324 * We only cover the sync cases for this. If you want 2Mbit async
325 * do it yourself but consider medical assistance first. This non DMA
326 * synchronous mode is portable code. The DMA mode assumes PCI like
327 * ISA DMA
328 *
329 * Called with the device lock held
330 */
331
332static void z8530_rx(struct z8530_channel *c)
333{
334 u8 ch,stat;
Alan Cox45d3ac42007-02-05 16:31:03 -0800335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 while(1)
337 {
338 /* FIFO empty ? */
339 if(!(read_zsreg(c, R0)&1))
340 break;
341 ch=read_zsdata(c);
342 stat=read_zsreg(c, R1);
343
344 /*
345 * Overrun ?
346 */
347 if(c->count < c->max)
348 {
349 *c->dptr++=ch;
350 c->count++;
351 }
352
353 if(stat&END_FR)
354 {
355
356 /*
357 * Error ?
358 */
359 if(stat&(Rx_OVR|CRC_ERR))
360 {
361 /* Rewind the buffer and return */
362 if(c->skb)
363 c->dptr=c->skb->data;
364 c->count=0;
365 if(stat&Rx_OVR)
366 {
367 printk(KERN_WARNING "%s: overrun\n", c->dev->name);
368 c->rx_overrun++;
369 }
370 if(stat&CRC_ERR)
371 {
372 c->rx_crc_err++;
373 /* printk("crc error\n"); */
374 }
375 /* Shove the frame upstream */
376 }
377 else
378 {
379 /*
380 * Drop the lock for RX processing, or
381 * there are deadlocks
382 */
383 z8530_rx_done(c);
384 write_zsctrl(c, RES_Rx_CRC);
385 }
386 }
387 }
388 /*
389 * Clear irq
390 */
391 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
395
396/**
397 * z8530_tx - Handle a PIO transmit event
398 * @c: Z8530 channel to process
399 *
400 * Z8530 transmit interrupt handler for the PIO mode. The basic
401 * idea is to attempt to keep the FIFO fed. We fill as many bytes
402 * in as possible, its quite possible that we won't keep up with the
403 * data rate otherwise.
404 */
405
406static void z8530_tx(struct z8530_channel *c)
407{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 while(c->txcount) {
409 /* FIFO full ? */
410 if(!(read_zsreg(c, R0)&4))
Alan Coxfe797452007-03-07 16:37:38 +0000411 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 c->txcount--;
413 /*
414 * Shovel out the byte
415 */
416 write_zsreg(c, R8, *c->tx_ptr++);
417 write_zsctrl(c, RES_H_IUS);
418 /* We are about to underflow */
419 if(c->txcount==0)
420 {
421 write_zsctrl(c, RES_EOM_L);
422 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
423 }
424 }
425
426
427 /*
428 * End of frame TX - fire another one
429 */
430
431 write_zsctrl(c, RES_Tx_P);
432
433 z8530_tx_done(c);
434 write_zsctrl(c, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435}
436
437/**
438 * z8530_status - Handle a PIO status exception
439 * @chan: Z8530 channel to process
440 *
441 * A status event occurred in PIO synchronous mode. There are several
442 * reasons the chip will bother us here. A transmit underrun means we
443 * failed to feed the chip fast enough and just broke a packet. A DCD
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200444 * change is a line up or down.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 */
446
447static void z8530_status(struct z8530_channel *chan)
448{
449 u8 status, altered;
450
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200451 status = read_zsreg(chan, R0);
452 altered = chan->status ^ status;
453
454 chan->status = status;
455
456 if (status & TxEOM) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/* printk("%s: Tx underrun.\n", chan->dev->name); */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200458 chan->netdevice->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 write_zsctrl(chan, ERR_RES);
460 z8530_tx_done(chan);
461 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200462
463 if (altered & chan->dcdcheck)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200465 if (status & chan->dcdcheck) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200467 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
468 if (chan->netdevice)
469 netif_carrier_on(chan->netdevice);
470 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200472 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 z8530_flush_fifo(chan);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200474 if (chan->netdevice)
475 netif_carrier_off(chan->netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200477
478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 write_zsctrl(chan, RES_EXT_INT);
480 write_zsctrl(chan, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
482
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200483struct z8530_irqhandler z8530_sync =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
485 z8530_rx,
486 z8530_tx,
487 z8530_status
488};
489
490EXPORT_SYMBOL(z8530_sync);
491
492/**
493 * z8530_dma_rx - Handle a DMA RX event
494 * @chan: Channel to handle
495 *
496 * Non bus mastering DMA interfaces for the Z8x30 devices. This
497 * is really pretty PC specific. The DMA mode means that most receive
498 * events are handled by the DMA hardware. We get a kick here only if
499 * a frame ended.
500 */
501
502static void z8530_dma_rx(struct z8530_channel *chan)
503{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if(chan->rxdma_on)
505 {
506 /* Special condition check only */
507 u8 status;
508
509 read_zsreg(chan, R7);
510 read_zsreg(chan, R6);
511
512 status=read_zsreg(chan, R1);
513
514 if(status&END_FR)
515 {
516 z8530_rx_done(chan); /* Fire up the next one */
517 }
518 write_zsctrl(chan, ERR_RES);
519 write_zsctrl(chan, RES_H_IUS);
520 }
521 else
522 {
523 /* DMA is off right now, drain the slow way */
524 z8530_rx(chan);
525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
528/**
529 * z8530_dma_tx - Handle a DMA TX event
530 * @chan: The Z8530 channel to handle
531 *
532 * We have received an interrupt while doing DMA transmissions. It
533 * shouldn't happen. Scream loudly if it does.
534 */
535
536static void z8530_dma_tx(struct z8530_channel *chan)
537{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if(!chan->dma_tx)
539 {
540 printk(KERN_WARNING "Hey who turned the DMA off?\n");
541 z8530_tx(chan);
542 return;
543 }
544 /* This shouldnt occur in DMA mode */
545 printk(KERN_ERR "DMA tx - bogus event!\n");
546 z8530_tx(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
549/**
550 * z8530_dma_status - Handle a DMA status exception
551 * @chan: Z8530 channel to process
552 *
553 * A status event occurred on the Z8530. We receive these for two reasons
554 * when in DMA mode. Firstly if we finished a packet transfer we get one
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200555 * and kick the next packet out. Secondly we may see a DCD change.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 *
557 */
558
559static void z8530_dma_status(struct z8530_channel *chan)
560{
561 u8 status, altered;
562
563 status=read_zsreg(chan, R0);
564 altered=chan->status^status;
565
566 chan->status=status;
567
568
569 if(chan->dma_tx)
570 {
571 if(status&TxEOM)
572 {
573 unsigned long flags;
574
575 flags=claim_dma_lock();
576 disable_dma(chan->txdma);
577 clear_dma_ff(chan->txdma);
578 chan->txdma_on=0;
579 release_dma_lock(flags);
580 z8530_tx_done(chan);
581 }
582 }
583
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200584 if (altered & chan->dcdcheck)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200586 if (status & chan->dcdcheck) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200588 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
589 if (chan->netdevice)
590 netif_carrier_on(chan->netdevice);
591 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200593 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 z8530_flush_fifo(chan);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200595 if (chan->netdevice)
596 netif_carrier_off(chan->netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 write_zsctrl(chan, RES_EXT_INT);
601 write_zsctrl(chan, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602}
603
604struct z8530_irqhandler z8530_dma_sync=
605{
606 z8530_dma_rx,
607 z8530_dma_tx,
608 z8530_dma_status
609};
610
611EXPORT_SYMBOL(z8530_dma_sync);
612
613struct z8530_irqhandler z8530_txdma_sync=
614{
615 z8530_rx,
616 z8530_dma_tx,
617 z8530_dma_status
618};
619
620EXPORT_SYMBOL(z8530_txdma_sync);
621
622/**
623 * z8530_rx_clear - Handle RX events from a stopped chip
624 * @c: Z8530 channel to shut up
625 *
626 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
627 * For machines with PCI Z85x30 cards, or level triggered interrupts
628 * (eg the MacII) we must clear the interrupt cause or die.
629 */
630
631
632static void z8530_rx_clear(struct z8530_channel *c)
633{
634 /*
635 * Data and status bytes
636 */
637 u8 stat;
638
639 read_zsdata(c);
640 stat=read_zsreg(c, R1);
641
642 if(stat&END_FR)
643 write_zsctrl(c, RES_Rx_CRC);
644 /*
645 * Clear irq
646 */
647 write_zsctrl(c, ERR_RES);
648 write_zsctrl(c, RES_H_IUS);
649}
650
651/**
652 * z8530_tx_clear - Handle TX events from a stopped chip
653 * @c: Z8530 channel to shut up
654 *
655 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
656 * For machines with PCI Z85x30 cards, or level triggered interrupts
657 * (eg the MacII) we must clear the interrupt cause or die.
658 */
659
660static void z8530_tx_clear(struct z8530_channel *c)
661{
662 write_zsctrl(c, RES_Tx_P);
663 write_zsctrl(c, RES_H_IUS);
664}
665
666/**
667 * z8530_status_clear - Handle status events from a stopped chip
668 * @chan: Z8530 channel to shut up
669 *
670 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
671 * For machines with PCI Z85x30 cards, or level triggered interrupts
672 * (eg the MacII) we must clear the interrupt cause or die.
673 */
674
675static void z8530_status_clear(struct z8530_channel *chan)
676{
677 u8 status=read_zsreg(chan, R0);
678 if(status&TxEOM)
679 write_zsctrl(chan, ERR_RES);
680 write_zsctrl(chan, RES_EXT_INT);
681 write_zsctrl(chan, RES_H_IUS);
682}
683
684struct z8530_irqhandler z8530_nop=
685{
686 z8530_rx_clear,
687 z8530_tx_clear,
688 z8530_status_clear
689};
690
691
692EXPORT_SYMBOL(z8530_nop);
693
694/**
695 * z8530_interrupt - Handle an interrupt from a Z8530
696 * @irq: Interrupt number
697 * @dev_id: The Z8530 device that is interrupting.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 *
699 * A Z85[2]30 device has stuck its hand in the air for attention.
700 * We scan both the channels on the chip for events and then call
701 * the channel specific call backs for each channel that has events.
702 * We have to use callback functions because the two channels can be
703 * in different modes.
704 *
705 * Locking is done for the handlers. Note that locking is done
706 * at the chip level (the 5uS delay issue is per chip not per
707 * channel). c->lock for both channels points to dev->lock
708 */
709
David Howells7d12e782006-10-05 14:55:46 +0100710irqreturn_t z8530_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
712 struct z8530_dev *dev=dev_id;
713 u8 intr;
714 static volatile int locker=0;
715 int work=0;
716 struct z8530_irqhandler *irqs;
717
718 if(locker)
719 {
720 printk(KERN_ERR "IRQ re-enter\n");
721 return IRQ_NONE;
722 }
723 locker=1;
724
725 spin_lock(&dev->lock);
726
727 while(++work<5000)
728 {
729
730 intr = read_zsreg(&dev->chanA, R3);
731 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
732 break;
733
734 /* This holds the IRQ status. On the 8530 you must read it from chan
735 A even though it applies to the whole chip */
736
737 /* Now walk the chip and see what it is wanting - it may be
738 an IRQ for someone else remember */
739
740 irqs=dev->chanA.irqs;
741
742 if(intr & (CHARxIP|CHATxIP|CHAEXT))
743 {
744 if(intr&CHARxIP)
745 irqs->rx(&dev->chanA);
746 if(intr&CHATxIP)
747 irqs->tx(&dev->chanA);
748 if(intr&CHAEXT)
749 irqs->status(&dev->chanA);
750 }
751
752 irqs=dev->chanB.irqs;
753
754 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
755 {
756 if(intr&CHBRxIP)
757 irqs->rx(&dev->chanB);
758 if(intr&CHBTxIP)
759 irqs->tx(&dev->chanB);
760 if(intr&CHBEXT)
761 irqs->status(&dev->chanB);
762 }
763 }
764 spin_unlock(&dev->lock);
765 if(work==5000)
766 printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
767 /* Ok all done */
768 locker=0;
769 return IRQ_HANDLED;
770}
771
772EXPORT_SYMBOL(z8530_interrupt);
773
774static char reg_init[16]=
775{
776 0,0,0,0,
777 0,0,0,0,
778 0,0,0,0,
779 0x55,0,0,0
780};
781
782
783/**
784 * z8530_sync_open - Open a Z8530 channel for PIO
785 * @dev: The network interface we are using
786 * @c: The Z8530 channel to open in synchronous PIO mode
787 *
788 * Switch a Z8530 into synchronous mode without DMA assist. We
789 * raise the RTS/DTR and commence network operation.
790 */
791
792int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
793{
794 unsigned long flags;
795
796 spin_lock_irqsave(c->lock, flags);
797
798 c->sync = 1;
799 c->mtu = dev->mtu+64;
800 c->count = 0;
801 c->skb = NULL;
802 c->skb2 = NULL;
803 c->irqs = &z8530_sync;
804
805 /* This loads the double buffer up */
806 z8530_rx_done(c); /* Load the frame ring */
807 z8530_rx_done(c); /* Load the backup frame */
808 z8530_rtsdtr(c,1);
809 c->dma_tx = 0;
810 c->regs[R1]|=TxINT_ENAB;
811 write_zsreg(c, R1, c->regs[R1]);
812 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
813
814 spin_unlock_irqrestore(c->lock, flags);
815 return 0;
816}
817
818
819EXPORT_SYMBOL(z8530_sync_open);
820
821/**
822 * z8530_sync_close - Close a PIO Z8530 channel
823 * @dev: Network device to close
824 * @c: Z8530 channel to disassociate and move to idle
825 *
826 * Close down a Z8530 interface and switch its interrupt handlers
827 * to discard future events.
828 */
829
830int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
831{
832 u8 chk;
833 unsigned long flags;
834
835 spin_lock_irqsave(c->lock, flags);
836 c->irqs = &z8530_nop;
837 c->max = 0;
838 c->sync = 0;
839
840 chk=read_zsreg(c,R0);
841 write_zsreg(c, R3, c->regs[R3]);
842 z8530_rtsdtr(c,0);
843
844 spin_unlock_irqrestore(c->lock, flags);
845 return 0;
846}
847
848EXPORT_SYMBOL(z8530_sync_close);
849
850/**
851 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
852 * @dev: The network device to attach
853 * @c: The Z8530 channel to configure in sync DMA mode.
854 *
855 * Set up a Z85x30 device for synchronous DMA in both directions. Two
856 * ISA DMA channels must be available for this to work. We assume ISA
857 * DMA driven I/O and PC limits on access.
858 */
859
860int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
861{
862 unsigned long cflags, dflags;
863
864 c->sync = 1;
865 c->mtu = dev->mtu+64;
866 c->count = 0;
867 c->skb = NULL;
868 c->skb2 = NULL;
869 /*
870 * Load the DMA interfaces up
871 */
872 c->rxdma_on = 0;
873 c->txdma_on = 0;
874
875 /*
876 * Allocate the DMA flip buffers. Limit by page size.
877 * Everyone runs 1500 mtu or less on wan links so this
878 * should be fine.
879 */
880
881 if(c->mtu > PAGE_SIZE/2)
882 return -EMSGSIZE;
883
884 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
885 if(c->rx_buf[0]==NULL)
886 return -ENOBUFS;
887 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
888
889 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
890 if(c->tx_dma_buf[0]==NULL)
891 {
892 free_page((unsigned long)c->rx_buf[0]);
893 c->rx_buf[0]=NULL;
894 return -ENOBUFS;
895 }
896 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
897
898 c->tx_dma_used=0;
899 c->dma_tx = 1;
900 c->dma_num=0;
901 c->dma_ready=1;
902
903 /*
904 * Enable DMA control mode
905 */
906
907 spin_lock_irqsave(c->lock, cflags);
908
909 /*
910 * TX DMA via DIR/REQ
911 */
912
913 c->regs[R14]|= DTRREQ;
914 write_zsreg(c, R14, c->regs[R14]);
915
916 c->regs[R1]&= ~TxINT_ENAB;
917 write_zsreg(c, R1, c->regs[R1]);
918
919 /*
920 * RX DMA via W/Req
921 */
922
923 c->regs[R1]|= WT_FN_RDYFN;
924 c->regs[R1]|= WT_RDY_RT;
925 c->regs[R1]|= INT_ERR_Rx;
926 c->regs[R1]&= ~TxINT_ENAB;
927 write_zsreg(c, R1, c->regs[R1]);
928 c->regs[R1]|= WT_RDY_ENAB;
929 write_zsreg(c, R1, c->regs[R1]);
930
931 /*
932 * DMA interrupts
933 */
934
935 /*
936 * Set up the DMA configuration
937 */
938
939 dflags=claim_dma_lock();
940
941 disable_dma(c->rxdma);
942 clear_dma_ff(c->rxdma);
943 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
944 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
945 set_dma_count(c->rxdma, c->mtu);
946 enable_dma(c->rxdma);
947
948 disable_dma(c->txdma);
949 clear_dma_ff(c->txdma);
950 set_dma_mode(c->txdma, DMA_MODE_WRITE);
951 disable_dma(c->txdma);
952
953 release_dma_lock(dflags);
954
955 /*
956 * Select the DMA interrupt handlers
957 */
958
959 c->rxdma_on = 1;
960 c->txdma_on = 1;
961 c->tx_dma_used = 1;
962
963 c->irqs = &z8530_dma_sync;
964 z8530_rtsdtr(c,1);
965 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
966
967 spin_unlock_irqrestore(c->lock, cflags);
968
969 return 0;
970}
971
972EXPORT_SYMBOL(z8530_sync_dma_open);
973
974/**
975 * z8530_sync_dma_close - Close down DMA I/O
976 * @dev: Network device to detach
977 * @c: Z8530 channel to move into discard mode
978 *
979 * Shut down a DMA mode synchronous interface. Halt the DMA, and
980 * free the buffers.
981 */
982
983int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
984{
985 u8 chk;
986 unsigned long flags;
987
988 c->irqs = &z8530_nop;
989 c->max = 0;
990 c->sync = 0;
991
992 /*
993 * Disable the PC DMA channels
994 */
995
996 flags=claim_dma_lock();
997 disable_dma(c->rxdma);
998 clear_dma_ff(c->rxdma);
999
1000 c->rxdma_on = 0;
1001
1002 disable_dma(c->txdma);
1003 clear_dma_ff(c->txdma);
1004 release_dma_lock(flags);
1005
1006 c->txdma_on = 0;
1007 c->tx_dma_used = 0;
1008
1009 spin_lock_irqsave(c->lock, flags);
1010
1011 /*
1012 * Disable DMA control mode
1013 */
1014
1015 c->regs[R1]&= ~WT_RDY_ENAB;
1016 write_zsreg(c, R1, c->regs[R1]);
1017 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1018 c->regs[R1]|= INT_ALL_Rx;
1019 write_zsreg(c, R1, c->regs[R1]);
1020 c->regs[R14]&= ~DTRREQ;
1021 write_zsreg(c, R14, c->regs[R14]);
1022
1023 if(c->rx_buf[0])
1024 {
1025 free_page((unsigned long)c->rx_buf[0]);
1026 c->rx_buf[0]=NULL;
1027 }
1028 if(c->tx_dma_buf[0])
1029 {
1030 free_page((unsigned long)c->tx_dma_buf[0]);
1031 c->tx_dma_buf[0]=NULL;
1032 }
1033 chk=read_zsreg(c,R0);
1034 write_zsreg(c, R3, c->regs[R3]);
1035 z8530_rtsdtr(c,0);
1036
1037 spin_unlock_irqrestore(c->lock, flags);
1038
1039 return 0;
1040}
1041
1042EXPORT_SYMBOL(z8530_sync_dma_close);
1043
1044/**
1045 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1046 * @dev: The network device to attach
1047 * @c: The Z8530 channel to configure in sync DMA mode.
1048 *
1049 * Set up a Z85x30 device for synchronous DMA tranmission. One
1050 * ISA DMA channel must be available for this to work. The receive
1051 * side is run in PIO mode, but then it has the bigger FIFO.
1052 */
1053
1054int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1055{
1056 unsigned long cflags, dflags;
1057
1058 printk("Opening sync interface for TX-DMA\n");
1059 c->sync = 1;
1060 c->mtu = dev->mtu+64;
1061 c->count = 0;
1062 c->skb = NULL;
1063 c->skb2 = NULL;
1064
1065 /*
1066 * Allocate the DMA flip buffers. Limit by page size.
1067 * Everyone runs 1500 mtu or less on wan links so this
1068 * should be fine.
1069 */
1070
1071 if(c->mtu > PAGE_SIZE/2)
1072 return -EMSGSIZE;
1073
1074 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1075 if(c->tx_dma_buf[0]==NULL)
1076 return -ENOBUFS;
1077
1078 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1079
1080
1081 spin_lock_irqsave(c->lock, cflags);
1082
1083 /*
1084 * Load the PIO receive ring
1085 */
1086
1087 z8530_rx_done(c);
1088 z8530_rx_done(c);
1089
1090 /*
1091 * Load the DMA interfaces up
1092 */
1093
1094 c->rxdma_on = 0;
1095 c->txdma_on = 0;
1096
1097 c->tx_dma_used=0;
1098 c->dma_num=0;
1099 c->dma_ready=1;
1100 c->dma_tx = 1;
1101
1102 /*
1103 * Enable DMA control mode
1104 */
1105
1106 /*
1107 * TX DMA via DIR/REQ
1108 */
1109 c->regs[R14]|= DTRREQ;
1110 write_zsreg(c, R14, c->regs[R14]);
1111
1112 c->regs[R1]&= ~TxINT_ENAB;
1113 write_zsreg(c, R1, c->regs[R1]);
1114
1115 /*
1116 * Set up the DMA configuration
1117 */
1118
1119 dflags = claim_dma_lock();
1120
1121 disable_dma(c->txdma);
1122 clear_dma_ff(c->txdma);
1123 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1124 disable_dma(c->txdma);
1125
1126 release_dma_lock(dflags);
1127
1128 /*
1129 * Select the DMA interrupt handlers
1130 */
1131
1132 c->rxdma_on = 0;
1133 c->txdma_on = 1;
1134 c->tx_dma_used = 1;
1135
1136 c->irqs = &z8530_txdma_sync;
1137 z8530_rtsdtr(c,1);
1138 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1139 spin_unlock_irqrestore(c->lock, cflags);
1140
1141 return 0;
1142}
1143
1144EXPORT_SYMBOL(z8530_sync_txdma_open);
1145
1146/**
1147 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1148 * @dev: Network device to detach
1149 * @c: Z8530 channel to move into discard mode
1150 *
1151 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1152 * and free the buffers.
1153 */
1154
1155int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1156{
1157 unsigned long dflags, cflags;
1158 u8 chk;
1159
1160
1161 spin_lock_irqsave(c->lock, cflags);
1162
1163 c->irqs = &z8530_nop;
1164 c->max = 0;
1165 c->sync = 0;
1166
1167 /*
1168 * Disable the PC DMA channels
1169 */
1170
1171 dflags = claim_dma_lock();
1172
1173 disable_dma(c->txdma);
1174 clear_dma_ff(c->txdma);
1175 c->txdma_on = 0;
1176 c->tx_dma_used = 0;
1177
1178 release_dma_lock(dflags);
1179
1180 /*
1181 * Disable DMA control mode
1182 */
1183
1184 c->regs[R1]&= ~WT_RDY_ENAB;
1185 write_zsreg(c, R1, c->regs[R1]);
1186 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1187 c->regs[R1]|= INT_ALL_Rx;
1188 write_zsreg(c, R1, c->regs[R1]);
1189 c->regs[R14]&= ~DTRREQ;
1190 write_zsreg(c, R14, c->regs[R14]);
1191
1192 if(c->tx_dma_buf[0])
1193 {
1194 free_page((unsigned long)c->tx_dma_buf[0]);
1195 c->tx_dma_buf[0]=NULL;
1196 }
1197 chk=read_zsreg(c,R0);
1198 write_zsreg(c, R3, c->regs[R3]);
1199 z8530_rtsdtr(c,0);
1200
1201 spin_unlock_irqrestore(c->lock, cflags);
1202 return 0;
1203}
1204
1205
1206EXPORT_SYMBOL(z8530_sync_txdma_close);
1207
1208
1209/*
1210 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1211 * it exists...
1212 */
1213
1214static char *z8530_type_name[]={
1215 "Z8530",
1216 "Z85C30",
1217 "Z85230"
1218};
1219
1220/**
1221 * z8530_describe - Uniformly describe a Z8530 port
1222 * @dev: Z8530 device to describe
1223 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1224 * @io: the port value in question
1225 *
1226 * Describe a Z8530 in a standard format. We must pass the I/O as
1227 * the port offset isnt predictable. The main reason for this function
1228 * is to try and get a common format of report.
1229 */
1230
1231void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1232{
1233 printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
1234 dev->name,
1235 z8530_type_name[dev->type],
1236 mapping,
1237 Z8530_PORT_OF(io),
1238 dev->irq);
1239}
1240
1241EXPORT_SYMBOL(z8530_describe);
1242
1243/*
1244 * Locked operation part of the z8530 init code
1245 */
1246
1247static inline int do_z8530_init(struct z8530_dev *dev)
1248{
1249 /* NOP the interrupt handlers first - we might get a
1250 floating IRQ transition when we reset the chip */
1251 dev->chanA.irqs=&z8530_nop;
1252 dev->chanB.irqs=&z8530_nop;
1253 dev->chanA.dcdcheck=DCD;
1254 dev->chanB.dcdcheck=DCD;
1255
1256 /* Reset the chip */
1257 write_zsreg(&dev->chanA, R9, 0xC0);
1258 udelay(200);
1259 /* Now check its valid */
1260 write_zsreg(&dev->chanA, R12, 0xAA);
1261 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1262 return -ENODEV;
1263 write_zsreg(&dev->chanA, R12, 0x55);
1264 if(read_zsreg(&dev->chanA, R12)!=0x55)
1265 return -ENODEV;
1266
1267 dev->type=Z8530;
1268
1269 /*
1270 * See the application note.
1271 */
1272
1273 write_zsreg(&dev->chanA, R15, 0x01);
1274
1275 /*
1276 * If we can set the low bit of R15 then
1277 * the chip is enhanced.
1278 */
1279
1280 if(read_zsreg(&dev->chanA, R15)==0x01)
1281 {
1282 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1283 /* Put a char in the fifo */
1284 write_zsreg(&dev->chanA, R8, 0);
1285 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1286 dev->type = Z85230; /* Has a FIFO */
1287 else
1288 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1289 }
1290
1291 /*
1292 * The code assumes R7' and friends are
1293 * off. Use write_zsext() for these and keep
1294 * this bit clear.
1295 */
1296
1297 write_zsreg(&dev->chanA, R15, 0);
1298
1299 /*
1300 * At this point it looks like the chip is behaving
1301 */
1302
1303 memcpy(dev->chanA.regs, reg_init, 16);
1304 memcpy(dev->chanB.regs, reg_init ,16);
1305
1306 return 0;
1307}
1308
1309/**
1310 * z8530_init - Initialise a Z8530 device
1311 * @dev: Z8530 device to initialise.
1312 *
1313 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1314 * is present, identify the type and then program it to hopefully
1315 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1316 * state will sometimes get into stupid modes generating 10Khz
1317 * interrupt streams and the like.
1318 *
1319 * We set the interrupt handler up to discard any events, in case
1320 * we get them during reset or setp.
1321 *
1322 * Return 0 for success, or a negative value indicating the problem
1323 * in errno form.
1324 */
1325
1326int z8530_init(struct z8530_dev *dev)
1327{
1328 unsigned long flags;
1329 int ret;
1330
1331 /* Set up the chip level lock */
1332 spin_lock_init(&dev->lock);
1333 dev->chanA.lock = &dev->lock;
1334 dev->chanB.lock = &dev->lock;
1335
1336 spin_lock_irqsave(&dev->lock, flags);
1337 ret = do_z8530_init(dev);
1338 spin_unlock_irqrestore(&dev->lock, flags);
1339
1340 return ret;
1341}
1342
1343
1344EXPORT_SYMBOL(z8530_init);
1345
1346/**
1347 * z8530_shutdown - Shutdown a Z8530 device
1348 * @dev: The Z8530 chip to shutdown
1349 *
1350 * We set the interrupt handlers to silence any interrupts. We then
1351 * reset the chip and wait 100uS to be sure the reset completed. Just
1352 * in case the caller then tries to do stuff.
1353 *
1354 * This is called without the lock held
1355 */
1356
1357int z8530_shutdown(struct z8530_dev *dev)
1358{
1359 unsigned long flags;
1360 /* Reset the chip */
1361
1362 spin_lock_irqsave(&dev->lock, flags);
1363 dev->chanA.irqs=&z8530_nop;
1364 dev->chanB.irqs=&z8530_nop;
1365 write_zsreg(&dev->chanA, R9, 0xC0);
1366 /* We must lock the udelay, the chip is offlimits here */
1367 udelay(100);
1368 spin_unlock_irqrestore(&dev->lock, flags);
1369 return 0;
1370}
1371
1372EXPORT_SYMBOL(z8530_shutdown);
1373
1374/**
1375 * z8530_channel_load - Load channel data
1376 * @c: Z8530 channel to configure
1377 * @rtable: table of register, value pairs
1378 * FIXME: ioctl to allow user uploaded tables
1379 *
1380 * Load a Z8530 channel up from the system data. We use +16 to
1381 * indicate the "prime" registers. The value 255 terminates the
1382 * table.
1383 */
1384
1385int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1386{
1387 unsigned long flags;
1388
1389 spin_lock_irqsave(c->lock, flags);
1390
1391 while(*rtable!=255)
1392 {
1393 int reg=*rtable++;
1394 if(reg>0x0F)
1395 write_zsreg(c, R15, c->regs[15]|1);
1396 write_zsreg(c, reg&0x0F, *rtable);
1397 if(reg>0x0F)
1398 write_zsreg(c, R15, c->regs[15]&~1);
1399 c->regs[reg]=*rtable++;
1400 }
1401 c->rx_function=z8530_null_rx;
1402 c->skb=NULL;
1403 c->tx_skb=NULL;
1404 c->tx_next_skb=NULL;
1405 c->mtu=1500;
1406 c->max=0;
1407 c->count=0;
1408 c->status=read_zsreg(c, R0);
1409 c->sync=1;
1410 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1411
1412 spin_unlock_irqrestore(c->lock, flags);
1413 return 0;
1414}
1415
1416EXPORT_SYMBOL(z8530_channel_load);
1417
1418
1419/**
1420 * z8530_tx_begin - Begin packet transmission
1421 * @c: The Z8530 channel to kick
1422 *
1423 * This is the speed sensitive side of transmission. If we are called
1424 * and no buffer is being transmitted we commence the next buffer. If
1425 * nothing is queued we idle the sync.
1426 *
1427 * Note: We are handling this code path in the interrupt path, keep it
1428 * fast or bad things will happen.
1429 *
1430 * Called with the lock held.
1431 */
1432
1433static void z8530_tx_begin(struct z8530_channel *c)
1434{
1435 unsigned long flags;
1436 if(c->tx_skb)
1437 return;
1438
1439 c->tx_skb=c->tx_next_skb;
1440 c->tx_next_skb=NULL;
1441 c->tx_ptr=c->tx_next_ptr;
1442
1443 if(c->tx_skb==NULL)
1444 {
1445 /* Idle on */
1446 if(c->dma_tx)
1447 {
1448 flags=claim_dma_lock();
1449 disable_dma(c->txdma);
1450 /*
1451 * Check if we crapped out.
1452 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001453 if (get_dma_residue(c->txdma))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001455 c->netdevice->stats.tx_dropped++;
1456 c->netdevice->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 }
1458 release_dma_lock(flags);
1459 }
1460 c->txcount=0;
1461 }
1462 else
1463 {
1464 c->txcount=c->tx_skb->len;
1465
1466
1467 if(c->dma_tx)
1468 {
1469 /*
1470 * FIXME. DMA is broken for the original 8530,
1471 * on the older parts we need to set a flag and
1472 * wait for a further TX interrupt to fire this
1473 * stage off
1474 */
1475
1476 flags=claim_dma_lock();
1477 disable_dma(c->txdma);
1478
1479 /*
1480 * These two are needed by the 8530/85C30
1481 * and must be issued when idling.
1482 */
1483
1484 if(c->dev->type!=Z85230)
1485 {
1486 write_zsctrl(c, RES_Tx_CRC);
1487 write_zsctrl(c, RES_EOM_L);
1488 }
1489 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1490 clear_dma_ff(c->txdma);
1491 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1492 set_dma_count(c->txdma, c->txcount);
1493 enable_dma(c->txdma);
1494 release_dma_lock(flags);
1495 write_zsctrl(c, RES_EOM_L);
1496 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1497 }
1498 else
1499 {
1500
1501 /* ABUNDER off */
1502 write_zsreg(c, R10, c->regs[10]);
1503 write_zsctrl(c, RES_Tx_CRC);
1504
1505 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1506 {
1507 write_zsreg(c, R8, *c->tx_ptr++);
1508 c->txcount--;
1509 }
1510
1511 }
1512 }
1513 /*
1514 * Since we emptied tx_skb we can ask for more
1515 */
1516 netif_wake_queue(c->netdevice);
1517}
1518
1519/**
1520 * z8530_tx_done - TX complete callback
1521 * @c: The channel that completed a transmit.
1522 *
1523 * This is called when we complete a packet send. We wake the queue,
1524 * start the next packet going and then free the buffer of the existing
1525 * packet. This code is fairly timing sensitive.
1526 *
1527 * Called with the register lock held.
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001528 */
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530static void z8530_tx_done(struct z8530_channel *c)
1531{
1532 struct sk_buff *skb;
1533
1534 /* Actually this can happen.*/
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001535 if (c->tx_skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 return;
1537
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001538 skb = c->tx_skb;
1539 c->tx_skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 z8530_tx_begin(c);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001541 c->netdevice->stats.tx_packets++;
1542 c->netdevice->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 dev_kfree_skb_irq(skb);
1544}
1545
1546/**
1547 * z8530_null_rx - Discard a packet
1548 * @c: The channel the packet arrived on
1549 * @skb: The buffer
1550 *
1551 * We point the receive handler at this function when idle. Instead
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001552 * of processing the frames we get to throw them away.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 */
1554
1555void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1556{
1557 dev_kfree_skb_any(skb);
1558}
1559
1560EXPORT_SYMBOL(z8530_null_rx);
1561
1562/**
1563 * z8530_rx_done - Receive completion callback
1564 * @c: The channel that completed a receive
1565 *
1566 * A new packet is complete. Our goal here is to get back into receive
1567 * mode as fast as possible. On the Z85230 we could change to using
1568 * ESCC mode, but on the older chips we have no choice. We flip to the
1569 * new buffer immediately in DMA mode so that the DMA of the next
1570 * frame can occur while we are copying the previous buffer to an sk_buff
1571 *
1572 * Called with the lock held
1573 */
1574
1575static void z8530_rx_done(struct z8530_channel *c)
1576{
1577 struct sk_buff *skb;
1578 int ct;
1579
1580 /*
1581 * Is our receive engine in DMA mode
1582 */
1583
1584 if(c->rxdma_on)
1585 {
1586 /*
1587 * Save the ready state and the buffer currently
1588 * being used as the DMA target
1589 */
1590
1591 int ready=c->dma_ready;
1592 unsigned char *rxb=c->rx_buf[c->dma_num];
1593 unsigned long flags;
1594
1595 /*
1596 * Complete this DMA. Neccessary to find the length
1597 */
1598
1599 flags=claim_dma_lock();
1600
1601 disable_dma(c->rxdma);
1602 clear_dma_ff(c->rxdma);
1603 c->rxdma_on=0;
1604 ct=c->mtu-get_dma_residue(c->rxdma);
1605 if(ct<0)
1606 ct=2; /* Shit happens.. */
1607 c->dma_ready=0;
1608
1609 /*
1610 * Normal case: the other slot is free, start the next DMA
1611 * into it immediately.
1612 */
1613
1614 if(ready)
1615 {
1616 c->dma_num^=1;
1617 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1618 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1619 set_dma_count(c->rxdma, c->mtu);
1620 c->rxdma_on = 1;
1621 enable_dma(c->rxdma);
1622 /* Stop any frames that we missed the head of
1623 from passing */
1624 write_zsreg(c, R0, RES_Rx_CRC);
1625 }
1626 else
1627 /* Can't occur as we dont reenable the DMA irq until
1628 after the flip is done */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001629 printk(KERN_WARNING "%s: DMA flip overrun!\n",
1630 c->netdevice->name);
1631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 release_dma_lock(flags);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001633
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 /*
1635 * Shove the old buffer into an sk_buff. We can't DMA
1636 * directly into one on a PC - it might be above the 16Mb
1637 * boundary. Optimisation - we could check to see if we
1638 * can avoid the copy. Optimisation 2 - make the memcpy
1639 * a copychecksum.
1640 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001641
1642 skb = dev_alloc_skb(ct);
1643 if (skb == NULL) {
1644 c->netdevice->stats.rx_dropped++;
1645 printk(KERN_WARNING "%s: Memory squeeze.\n",
1646 c->netdevice->name);
1647 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 skb_put(skb, ct);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001649 skb_copy_to_linear_data(skb, rxb, ct);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001650 c->netdevice->stats.rx_packets++;
1651 c->netdevice->stats.rx_bytes += ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001653 c->dma_ready = 1;
1654 } else {
1655 RT_LOCK;
1656 skb = c->skb;
1657
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 /*
1659 * The game we play for non DMA is similar. We want to
1660 * get the controller set up for the next packet as fast
1661 * as possible. We potentially only have one byte + the
1662 * fifo length for this. Thus we want to flip to the new
1663 * buffer and then mess around copying and allocating
1664 * things. For the current case it doesn't matter but
1665 * if you build a system where the sync irq isnt blocked
1666 * by the kernel IRQ disable then you need only block the
1667 * sync IRQ for the RT_LOCK area.
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001668 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 */
1670 ct=c->count;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 c->skb = c->skb2;
1673 c->count = 0;
1674 c->max = c->mtu;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001675 if (c->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 c->dptr = c->skb->data;
1677 c->max = c->mtu;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001678 } else {
1679 c->count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 c->max = 0;
1681 }
1682 RT_UNLOCK;
1683
1684 c->skb2 = dev_alloc_skb(c->mtu);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001685 if (c->skb2 == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 printk(KERN_WARNING "%s: memory squeeze.\n",
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001687 c->netdevice->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 else
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001689 skb_put(c->skb2, c->mtu);
1690 c->netdevice->stats.rx_packets++;
1691 c->netdevice->stats.rx_bytes += ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 }
1693 /*
1694 * If we received a frame we must now process it.
1695 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001696 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 skb_trim(skb, ct);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001698 c->rx_function(c, skb);
1699 } else {
1700 c->netdevice->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1702 }
1703}
1704
1705/**
1706 * spans_boundary - Check a packet can be ISA DMA'd
1707 * @skb: The buffer to check
1708 *
1709 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1710 * thing can only DMA within a 64K block not across the edges of it.
1711 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713static inline int spans_boundary(struct sk_buff *skb)
1714{
1715 unsigned long a=(unsigned long)skb->data;
1716 a^=(a+skb->len);
1717 if(a&0x00010000) /* If the 64K bit is different.. */
1718 return 1;
1719 return 0;
1720}
1721
1722/**
1723 * z8530_queue_xmit - Queue a packet
1724 * @c: The channel to use
1725 * @skb: The packet to kick down the channel
1726 *
1727 * Queue a packet for transmission. Because we have rather
1728 * hard to hit interrupt latencies for the Z85230 per packet
1729 * even in DMA mode we do the flip to DMA buffer if needed here
1730 * not in the IRQ.
1731 *
1732 * Called from the network code. The lock is not held at this
1733 * point.
1734 */
1735
1736int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1737{
1738 unsigned long flags;
1739
1740 netif_stop_queue(c->netdevice);
1741 if(c->tx_next_skb)
1742 {
1743 return 1;
1744 }
1745
1746 /* PC SPECIFIC - DMA limits */
1747
1748 /*
1749 * If we will DMA the transmit and its gone over the ISA bus
1750 * limit, then copy to the flip buffer
1751 */
1752
1753 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1754 {
1755 /*
1756 * Send the flip buffer, and flip the flippy bit.
1757 * We don't care which is used when just so long as
1758 * we never use the same buffer twice in a row. Since
1759 * only one buffer can be going out at a time the other
1760 * has to be safe.
1761 */
1762 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1763 c->tx_dma_used^=1; /* Flip temp buffer */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001764 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 }
1766 else
1767 c->tx_next_ptr=skb->data;
1768 RT_LOCK;
1769 c->tx_next_skb=skb;
1770 RT_UNLOCK;
1771
1772 spin_lock_irqsave(c->lock, flags);
1773 z8530_tx_begin(c);
1774 spin_unlock_irqrestore(c->lock, flags);
1775
1776 return 0;
1777}
1778
1779EXPORT_SYMBOL(z8530_queue_xmit);
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781/*
1782 * Module support
1783 */
1784static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1785
1786static int __init z85230_init_driver(void)
1787{
1788 printk(banner);
1789 return 0;
1790}
1791module_init(z85230_init_driver);
1792
1793static void __exit z85230_cleanup_driver(void)
1794{
1795}
1796module_exit(z85230_cleanup_driver);
1797
1798MODULE_AUTHOR("Red Hat Inc.");
1799MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1800MODULE_LICENSE("GPL");