blob: ccd9cd35ecbe9e7f82ca370739415add9e4026b3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8 * (c) Copyright 2000, 2001 Red Hat Inc
9 *
10 * Development of this driver was funded by Equiinet Ltd
11 * http://www.equiinet.com
12 *
13 * ChangeLog:
14 *
15 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16 * unification of all the Z85x30 asynchronous drivers for real.
17 *
18 * DMA now uses get_free_page as kmalloc buffers may span a 64K
19 * boundary.
20 *
Alan Cox113aa832008-10-13 19:01:08 -070021 * Modified for SMP safety and SMP locking by Alan Cox
22 * <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 *
24 * Performance
25 *
26 * Z85230:
27 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28 * X.25 is not unrealistic on all machines. DMA mode can in theory
29 * handle T1/E1 quite nicely. In practice the limit seems to be about
30 * 512Kbit->1Mbit depending on motherboard.
31 *
32 * Z85C30:
33 * 64K will take DMA, 9600 baud X.25 should be ok.
34 *
35 * Z8530:
36 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
37 */
38
39#include <linux/module.h>
40#include <linux/kernel.h>
41#include <linux/mm.h>
42#include <linux/net.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/if_arp.h>
46#include <linux/delay.h>
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +020047#include <linux/hdlc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/ioport.h>
49#include <linux/init.h>
50#include <asm/dma.h>
51#include <asm/io.h>
52#define RT_LOCK
53#define RT_UNLOCK
54#include <linux/spinlock.h>
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include "z85230.h"
57
58
59/**
60 * z8530_read_port - Architecture specific interface function
61 * @p: port to read
62 *
63 * Provided port access methods. The Comtrol SV11 requires no delays
64 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
65 *
66 * In the longer term this should become an architecture specific
67 * section so that this can become a generic driver interface for all
68 * platforms. For now we only handle PC I/O ports with or without the
69 * dread 5uS sanity delay.
70 *
71 * The caller must hold sufficient locks to avoid violating the horrible
72 * 5uS delay rule.
73 */
74
75static inline int z8530_read_port(unsigned long p)
76{
77 u8 r=inb(Z8530_PORT_OF(p));
78 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
79 udelay(5);
80 return r;
81}
82
83/**
84 * z8530_write_port - Architecture specific interface function
85 * @p: port to write
86 * @d: value to write
87 *
88 * Write a value to a port with delays if need be. Note that the
89 * caller must hold locks to avoid read/writes from other contexts
90 * violating the 5uS rule
91 *
92 * In the longer term this should become an architecture specific
93 * section so that this can become a generic driver interface for all
94 * platforms. For now we only handle PC I/O ports with or without the
95 * dread 5uS sanity delay.
96 */
97
98
99static inline void z8530_write_port(unsigned long p, u8 d)
100{
101 outb(d,Z8530_PORT_OF(p));
102 if(p&Z8530_PORT_SLEEP)
103 udelay(5);
104}
105
106
107
108static void z8530_rx_done(struct z8530_channel *c);
109static void z8530_tx_done(struct z8530_channel *c);
110
111
112/**
113 * read_zsreg - Read a register from a Z85230
114 * @c: Z8530 channel to read from (2 per chip)
115 * @reg: Register to read
116 * FIXME: Use a spinlock.
117 *
118 * Most of the Z8530 registers are indexed off the control registers.
119 * A read is done by writing to the control register and reading the
120 * register back. The caller must hold the lock
121 */
122
123static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
124{
125 if(reg)
126 z8530_write_port(c->ctrlio, reg);
127 return z8530_read_port(c->ctrlio);
128}
129
130/**
131 * read_zsdata - Read the data port of a Z8530 channel
132 * @c: The Z8530 channel to read the data port from
133 *
134 * The data port provides fast access to some things. We still
135 * have all the 5uS delays to worry about.
136 */
137
138static inline u8 read_zsdata(struct z8530_channel *c)
139{
140 u8 r;
141 r=z8530_read_port(c->dataio);
142 return r;
143}
144
145/**
146 * write_zsreg - Write to a Z8530 channel register
147 * @c: The Z8530 channel
148 * @reg: Register number
149 * @val: Value to write
150 *
151 * Write a value to an indexed register. The caller must hold the lock
152 * to honour the irritating delay rules. We know about register 0
153 * being fast to access.
154 *
155 * Assumes c->lock is held.
156 */
157static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
158{
159 if(reg)
160 z8530_write_port(c->ctrlio, reg);
161 z8530_write_port(c->ctrlio, val);
162
163}
164
165/**
166 * write_zsctrl - Write to a Z8530 control register
167 * @c: The Z8530 channel
168 * @val: Value to write
169 *
170 * Write directly to the control register on the Z8530
171 */
172
173static inline void write_zsctrl(struct z8530_channel *c, u8 val)
174{
175 z8530_write_port(c->ctrlio, val);
176}
177
178/**
179 * write_zsdata - Write to a Z8530 control register
180 * @c: The Z8530 channel
181 * @val: Value to write
182 *
183 * Write directly to the data register on the Z8530
184 */
185
186
187static inline void write_zsdata(struct z8530_channel *c, u8 val)
188{
189 z8530_write_port(c->dataio, val);
190}
191
192/*
193 * Register loading parameters for a dead port
194 */
195
196u8 z8530_dead_port[]=
197{
198 255
199};
200
201EXPORT_SYMBOL(z8530_dead_port);
202
203/*
204 * Register loading parameters for currently supported circuit types
205 */
206
207
208/*
209 * Data clocked by telco end. This is the correct data for the UK
210 * "kilostream" service, and most other similar services.
211 */
212
213u8 z8530_hdlc_kilostream[]=
214{
215 4, SYNC_ENAB|SDLC|X1CLK,
216 2, 0, /* No vector */
217 1, 0,
218 3, ENT_HM|RxCRC_ENAB|Rx8,
219 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
220 9, 0, /* Disable interrupts */
221 6, 0xFF,
222 7, FLAG,
223 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
224 11, TCTRxCP,
225 14, DISDPLL,
226 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
227 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
228 9, NV|MIE|NORESET,
229 255
230};
231
232EXPORT_SYMBOL(z8530_hdlc_kilostream);
233
234/*
235 * As above but for enhanced chips.
236 */
237
238u8 z8530_hdlc_kilostream_85230[]=
239{
240 4, SYNC_ENAB|SDLC|X1CLK,
241 2, 0, /* No vector */
242 1, 0,
243 3, ENT_HM|RxCRC_ENAB|Rx8,
244 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
245 9, 0, /* Disable interrupts */
246 6, 0xFF,
247 7, FLAG,
248 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
249 11, TCTRxCP,
250 14, DISDPLL,
251 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
252 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
253 9, NV|MIE|NORESET,
254 23, 3, /* Extended mode AUTO TX and EOM*/
255
256 255
257};
258
259EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
260
261/**
262 * z8530_flush_fifo - Flush on chip RX FIFO
263 * @c: Channel to flush
264 *
265 * Flush the receive FIFO. There is no specific option for this, we
266 * blindly read bytes and discard them. Reading when there is no data
267 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
268 *
269 * All locking is handled for the caller. On return data may still be
270 * present if it arrived during the flush.
271 */
272
273static void z8530_flush_fifo(struct z8530_channel *c)
274{
275 read_zsreg(c, R1);
276 read_zsreg(c, R1);
277 read_zsreg(c, R1);
278 read_zsreg(c, R1);
279 if(c->dev->type==Z85230)
280 {
281 read_zsreg(c, R1);
282 read_zsreg(c, R1);
283 read_zsreg(c, R1);
284 read_zsreg(c, R1);
285 }
286}
287
288/**
289 * z8530_rtsdtr - Control the outgoing DTS/RTS line
290 * @c: The Z8530 channel to control;
291 * @set: 1 to set, 0 to clear
292 *
293 * Sets or clears DTR/RTS on the requested line. All locking is handled
294 * by the caller. For now we assume all boards use the actual RTS/DTR
295 * on the chip. Apparently one or two don't. We'll scream about them
296 * later.
297 */
298
299static void z8530_rtsdtr(struct z8530_channel *c, int set)
300{
301 if (set)
302 c->regs[5] |= (RTS | DTR);
303 else
304 c->regs[5] &= ~(RTS | DTR);
305 write_zsreg(c, R5, c->regs[5]);
306}
307
308/**
309 * z8530_rx - Handle a PIO receive event
310 * @c: Z8530 channel to process
311 *
312 * Receive handler for receiving in PIO mode. This is much like the
313 * async one but not quite the same or as complex
314 *
315 * Note: Its intended that this handler can easily be separated from
316 * the main code to run realtime. That'll be needed for some machines
317 * (eg to ever clock 64kbits on a sparc ;)).
318 *
319 * The RT_LOCK macros don't do anything now. Keep the code covered
320 * by them as short as possible in all circumstances - clocks cost
321 * baud. The interrupt handler is assumed to be atomic w.r.t. to
322 * other code - this is true in the RT case too.
323 *
324 * We only cover the sync cases for this. If you want 2Mbit async
325 * do it yourself but consider medical assistance first. This non DMA
326 * synchronous mode is portable code. The DMA mode assumes PCI like
327 * ISA DMA
328 *
329 * Called with the device lock held
330 */
331
332static void z8530_rx(struct z8530_channel *c)
333{
334 u8 ch,stat;
Alan Cox45d3ac42007-02-05 16:31:03 -0800335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 while(1)
337 {
338 /* FIFO empty ? */
339 if(!(read_zsreg(c, R0)&1))
340 break;
341 ch=read_zsdata(c);
342 stat=read_zsreg(c, R1);
343
344 /*
345 * Overrun ?
346 */
347 if(c->count < c->max)
348 {
349 *c->dptr++=ch;
350 c->count++;
351 }
352
353 if(stat&END_FR)
354 {
355
356 /*
357 * Error ?
358 */
359 if(stat&(Rx_OVR|CRC_ERR))
360 {
361 /* Rewind the buffer and return */
362 if(c->skb)
363 c->dptr=c->skb->data;
364 c->count=0;
365 if(stat&Rx_OVR)
366 {
367 printk(KERN_WARNING "%s: overrun\n", c->dev->name);
368 c->rx_overrun++;
369 }
370 if(stat&CRC_ERR)
371 {
372 c->rx_crc_err++;
373 /* printk("crc error\n"); */
374 }
375 /* Shove the frame upstream */
376 }
377 else
378 {
379 /*
380 * Drop the lock for RX processing, or
381 * there are deadlocks
382 */
383 z8530_rx_done(c);
384 write_zsctrl(c, RES_Rx_CRC);
385 }
386 }
387 }
388 /*
389 * Clear irq
390 */
391 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
395
396/**
397 * z8530_tx - Handle a PIO transmit event
398 * @c: Z8530 channel to process
399 *
400 * Z8530 transmit interrupt handler for the PIO mode. The basic
401 * idea is to attempt to keep the FIFO fed. We fill as many bytes
402 * in as possible, its quite possible that we won't keep up with the
403 * data rate otherwise.
404 */
405
406static void z8530_tx(struct z8530_channel *c)
407{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 while(c->txcount) {
409 /* FIFO full ? */
410 if(!(read_zsreg(c, R0)&4))
Alan Coxfe797452007-03-07 16:37:38 +0000411 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 c->txcount--;
413 /*
414 * Shovel out the byte
415 */
416 write_zsreg(c, R8, *c->tx_ptr++);
417 write_zsctrl(c, RES_H_IUS);
418 /* We are about to underflow */
419 if(c->txcount==0)
420 {
421 write_zsctrl(c, RES_EOM_L);
422 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
423 }
424 }
425
426
427 /*
428 * End of frame TX - fire another one
429 */
430
431 write_zsctrl(c, RES_Tx_P);
432
433 z8530_tx_done(c);
434 write_zsctrl(c, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435}
436
437/**
438 * z8530_status - Handle a PIO status exception
439 * @chan: Z8530 channel to process
440 *
441 * A status event occurred in PIO synchronous mode. There are several
442 * reasons the chip will bother us here. A transmit underrun means we
443 * failed to feed the chip fast enough and just broke a packet. A DCD
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200444 * change is a line up or down.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 */
446
447static void z8530_status(struct z8530_channel *chan)
448{
449 u8 status, altered;
450
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200451 status = read_zsreg(chan, R0);
452 altered = chan->status ^ status;
453
454 chan->status = status;
455
456 if (status & TxEOM) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/* printk("%s: Tx underrun.\n", chan->dev->name); */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200458 chan->netdevice->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 write_zsctrl(chan, ERR_RES);
460 z8530_tx_done(chan);
461 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200462
463 if (altered & chan->dcdcheck)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200465 if (status & chan->dcdcheck) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200467 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
468 if (chan->netdevice)
469 netif_carrier_on(chan->netdevice);
470 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200472 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 z8530_flush_fifo(chan);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200474 if (chan->netdevice)
475 netif_carrier_off(chan->netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200477
478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 write_zsctrl(chan, RES_EXT_INT);
480 write_zsctrl(chan, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
482
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200483struct z8530_irqhandler z8530_sync =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
485 z8530_rx,
486 z8530_tx,
487 z8530_status
488};
489
490EXPORT_SYMBOL(z8530_sync);
491
492/**
493 * z8530_dma_rx - Handle a DMA RX event
494 * @chan: Channel to handle
495 *
496 * Non bus mastering DMA interfaces for the Z8x30 devices. This
497 * is really pretty PC specific. The DMA mode means that most receive
498 * events are handled by the DMA hardware. We get a kick here only if
499 * a frame ended.
500 */
501
502static void z8530_dma_rx(struct z8530_channel *chan)
503{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if(chan->rxdma_on)
505 {
506 /* Special condition check only */
507 u8 status;
508
509 read_zsreg(chan, R7);
510 read_zsreg(chan, R6);
511
512 status=read_zsreg(chan, R1);
513
514 if(status&END_FR)
515 {
516 z8530_rx_done(chan); /* Fire up the next one */
517 }
518 write_zsctrl(chan, ERR_RES);
519 write_zsctrl(chan, RES_H_IUS);
520 }
521 else
522 {
523 /* DMA is off right now, drain the slow way */
524 z8530_rx(chan);
525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
528/**
529 * z8530_dma_tx - Handle a DMA TX event
530 * @chan: The Z8530 channel to handle
531 *
532 * We have received an interrupt while doing DMA transmissions. It
533 * shouldn't happen. Scream loudly if it does.
534 */
535
536static void z8530_dma_tx(struct z8530_channel *chan)
537{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if(!chan->dma_tx)
539 {
540 printk(KERN_WARNING "Hey who turned the DMA off?\n");
541 z8530_tx(chan);
542 return;
543 }
544 /* This shouldnt occur in DMA mode */
545 printk(KERN_ERR "DMA tx - bogus event!\n");
546 z8530_tx(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
549/**
550 * z8530_dma_status - Handle a DMA status exception
551 * @chan: Z8530 channel to process
552 *
553 * A status event occurred on the Z8530. We receive these for two reasons
554 * when in DMA mode. Firstly if we finished a packet transfer we get one
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200555 * and kick the next packet out. Secondly we may see a DCD change.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 *
557 */
558
559static void z8530_dma_status(struct z8530_channel *chan)
560{
561 u8 status, altered;
562
563 status=read_zsreg(chan, R0);
564 altered=chan->status^status;
565
566 chan->status=status;
567
568
569 if(chan->dma_tx)
570 {
571 if(status&TxEOM)
572 {
573 unsigned long flags;
574
575 flags=claim_dma_lock();
576 disable_dma(chan->txdma);
577 clear_dma_ff(chan->txdma);
578 chan->txdma_on=0;
579 release_dma_lock(flags);
580 z8530_tx_done(chan);
581 }
582 }
583
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200584 if (altered & chan->dcdcheck)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200586 if (status & chan->dcdcheck) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200588 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
589 if (chan->netdevice)
590 netif_carrier_on(chan->netdevice);
591 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200593 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 z8530_flush_fifo(chan);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200595 if (chan->netdevice)
596 netif_carrier_off(chan->netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 write_zsctrl(chan, RES_EXT_INT);
601 write_zsctrl(chan, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602}
603
604struct z8530_irqhandler z8530_dma_sync=
605{
606 z8530_dma_rx,
607 z8530_dma_tx,
608 z8530_dma_status
609};
610
611EXPORT_SYMBOL(z8530_dma_sync);
612
613struct z8530_irqhandler z8530_txdma_sync=
614{
615 z8530_rx,
616 z8530_dma_tx,
617 z8530_dma_status
618};
619
620EXPORT_SYMBOL(z8530_txdma_sync);
621
622/**
623 * z8530_rx_clear - Handle RX events from a stopped chip
624 * @c: Z8530 channel to shut up
625 *
626 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
627 * For machines with PCI Z85x30 cards, or level triggered interrupts
628 * (eg the MacII) we must clear the interrupt cause or die.
629 */
630
631
632static void z8530_rx_clear(struct z8530_channel *c)
633{
634 /*
635 * Data and status bytes
636 */
637 u8 stat;
638
639 read_zsdata(c);
640 stat=read_zsreg(c, R1);
641
642 if(stat&END_FR)
643 write_zsctrl(c, RES_Rx_CRC);
644 /*
645 * Clear irq
646 */
647 write_zsctrl(c, ERR_RES);
648 write_zsctrl(c, RES_H_IUS);
649}
650
651/**
652 * z8530_tx_clear - Handle TX events from a stopped chip
653 * @c: Z8530 channel to shut up
654 *
655 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
656 * For machines with PCI Z85x30 cards, or level triggered interrupts
657 * (eg the MacII) we must clear the interrupt cause or die.
658 */
659
660static void z8530_tx_clear(struct z8530_channel *c)
661{
662 write_zsctrl(c, RES_Tx_P);
663 write_zsctrl(c, RES_H_IUS);
664}
665
666/**
667 * z8530_status_clear - Handle status events from a stopped chip
668 * @chan: Z8530 channel to shut up
669 *
670 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
671 * For machines with PCI Z85x30 cards, or level triggered interrupts
672 * (eg the MacII) we must clear the interrupt cause or die.
673 */
674
675static void z8530_status_clear(struct z8530_channel *chan)
676{
677 u8 status=read_zsreg(chan, R0);
678 if(status&TxEOM)
679 write_zsctrl(chan, ERR_RES);
680 write_zsctrl(chan, RES_EXT_INT);
681 write_zsctrl(chan, RES_H_IUS);
682}
683
684struct z8530_irqhandler z8530_nop=
685{
686 z8530_rx_clear,
687 z8530_tx_clear,
688 z8530_status_clear
689};
690
691
692EXPORT_SYMBOL(z8530_nop);
693
694/**
695 * z8530_interrupt - Handle an interrupt from a Z8530
696 * @irq: Interrupt number
697 * @dev_id: The Z8530 device that is interrupting.
698 * @regs: unused
699 *
700 * A Z85[2]30 device has stuck its hand in the air for attention.
701 * We scan both the channels on the chip for events and then call
702 * the channel specific call backs for each channel that has events.
703 * We have to use callback functions because the two channels can be
704 * in different modes.
705 *
706 * Locking is done for the handlers. Note that locking is done
707 * at the chip level (the 5uS delay issue is per chip not per
708 * channel). c->lock for both channels points to dev->lock
709 */
710
David Howells7d12e782006-10-05 14:55:46 +0100711irqreturn_t z8530_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
713 struct z8530_dev *dev=dev_id;
714 u8 intr;
715 static volatile int locker=0;
716 int work=0;
717 struct z8530_irqhandler *irqs;
718
719 if(locker)
720 {
721 printk(KERN_ERR "IRQ re-enter\n");
722 return IRQ_NONE;
723 }
724 locker=1;
725
726 spin_lock(&dev->lock);
727
728 while(++work<5000)
729 {
730
731 intr = read_zsreg(&dev->chanA, R3);
732 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
733 break;
734
735 /* This holds the IRQ status. On the 8530 you must read it from chan
736 A even though it applies to the whole chip */
737
738 /* Now walk the chip and see what it is wanting - it may be
739 an IRQ for someone else remember */
740
741 irqs=dev->chanA.irqs;
742
743 if(intr & (CHARxIP|CHATxIP|CHAEXT))
744 {
745 if(intr&CHARxIP)
746 irqs->rx(&dev->chanA);
747 if(intr&CHATxIP)
748 irqs->tx(&dev->chanA);
749 if(intr&CHAEXT)
750 irqs->status(&dev->chanA);
751 }
752
753 irqs=dev->chanB.irqs;
754
755 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
756 {
757 if(intr&CHBRxIP)
758 irqs->rx(&dev->chanB);
759 if(intr&CHBTxIP)
760 irqs->tx(&dev->chanB);
761 if(intr&CHBEXT)
762 irqs->status(&dev->chanB);
763 }
764 }
765 spin_unlock(&dev->lock);
766 if(work==5000)
767 printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
768 /* Ok all done */
769 locker=0;
770 return IRQ_HANDLED;
771}
772
773EXPORT_SYMBOL(z8530_interrupt);
774
775static char reg_init[16]=
776{
777 0,0,0,0,
778 0,0,0,0,
779 0,0,0,0,
780 0x55,0,0,0
781};
782
783
784/**
785 * z8530_sync_open - Open a Z8530 channel for PIO
786 * @dev: The network interface we are using
787 * @c: The Z8530 channel to open in synchronous PIO mode
788 *
789 * Switch a Z8530 into synchronous mode without DMA assist. We
790 * raise the RTS/DTR and commence network operation.
791 */
792
793int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
794{
795 unsigned long flags;
796
797 spin_lock_irqsave(c->lock, flags);
798
799 c->sync = 1;
800 c->mtu = dev->mtu+64;
801 c->count = 0;
802 c->skb = NULL;
803 c->skb2 = NULL;
804 c->irqs = &z8530_sync;
805
806 /* This loads the double buffer up */
807 z8530_rx_done(c); /* Load the frame ring */
808 z8530_rx_done(c); /* Load the backup frame */
809 z8530_rtsdtr(c,1);
810 c->dma_tx = 0;
811 c->regs[R1]|=TxINT_ENAB;
812 write_zsreg(c, R1, c->regs[R1]);
813 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
814
815 spin_unlock_irqrestore(c->lock, flags);
816 return 0;
817}
818
819
820EXPORT_SYMBOL(z8530_sync_open);
821
822/**
823 * z8530_sync_close - Close a PIO Z8530 channel
824 * @dev: Network device to close
825 * @c: Z8530 channel to disassociate and move to idle
826 *
827 * Close down a Z8530 interface and switch its interrupt handlers
828 * to discard future events.
829 */
830
831int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
832{
833 u8 chk;
834 unsigned long flags;
835
836 spin_lock_irqsave(c->lock, flags);
837 c->irqs = &z8530_nop;
838 c->max = 0;
839 c->sync = 0;
840
841 chk=read_zsreg(c,R0);
842 write_zsreg(c, R3, c->regs[R3]);
843 z8530_rtsdtr(c,0);
844
845 spin_unlock_irqrestore(c->lock, flags);
846 return 0;
847}
848
849EXPORT_SYMBOL(z8530_sync_close);
850
851/**
852 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
853 * @dev: The network device to attach
854 * @c: The Z8530 channel to configure in sync DMA mode.
855 *
856 * Set up a Z85x30 device for synchronous DMA in both directions. Two
857 * ISA DMA channels must be available for this to work. We assume ISA
858 * DMA driven I/O and PC limits on access.
859 */
860
861int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
862{
863 unsigned long cflags, dflags;
864
865 c->sync = 1;
866 c->mtu = dev->mtu+64;
867 c->count = 0;
868 c->skb = NULL;
869 c->skb2 = NULL;
870 /*
871 * Load the DMA interfaces up
872 */
873 c->rxdma_on = 0;
874 c->txdma_on = 0;
875
876 /*
877 * Allocate the DMA flip buffers. Limit by page size.
878 * Everyone runs 1500 mtu or less on wan links so this
879 * should be fine.
880 */
881
882 if(c->mtu > PAGE_SIZE/2)
883 return -EMSGSIZE;
884
885 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
886 if(c->rx_buf[0]==NULL)
887 return -ENOBUFS;
888 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
889
890 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
891 if(c->tx_dma_buf[0]==NULL)
892 {
893 free_page((unsigned long)c->rx_buf[0]);
894 c->rx_buf[0]=NULL;
895 return -ENOBUFS;
896 }
897 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
898
899 c->tx_dma_used=0;
900 c->dma_tx = 1;
901 c->dma_num=0;
902 c->dma_ready=1;
903
904 /*
905 * Enable DMA control mode
906 */
907
908 spin_lock_irqsave(c->lock, cflags);
909
910 /*
911 * TX DMA via DIR/REQ
912 */
913
914 c->regs[R14]|= DTRREQ;
915 write_zsreg(c, R14, c->regs[R14]);
916
917 c->regs[R1]&= ~TxINT_ENAB;
918 write_zsreg(c, R1, c->regs[R1]);
919
920 /*
921 * RX DMA via W/Req
922 */
923
924 c->regs[R1]|= WT_FN_RDYFN;
925 c->regs[R1]|= WT_RDY_RT;
926 c->regs[R1]|= INT_ERR_Rx;
927 c->regs[R1]&= ~TxINT_ENAB;
928 write_zsreg(c, R1, c->regs[R1]);
929 c->regs[R1]|= WT_RDY_ENAB;
930 write_zsreg(c, R1, c->regs[R1]);
931
932 /*
933 * DMA interrupts
934 */
935
936 /*
937 * Set up the DMA configuration
938 */
939
940 dflags=claim_dma_lock();
941
942 disable_dma(c->rxdma);
943 clear_dma_ff(c->rxdma);
944 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
945 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
946 set_dma_count(c->rxdma, c->mtu);
947 enable_dma(c->rxdma);
948
949 disable_dma(c->txdma);
950 clear_dma_ff(c->txdma);
951 set_dma_mode(c->txdma, DMA_MODE_WRITE);
952 disable_dma(c->txdma);
953
954 release_dma_lock(dflags);
955
956 /*
957 * Select the DMA interrupt handlers
958 */
959
960 c->rxdma_on = 1;
961 c->txdma_on = 1;
962 c->tx_dma_used = 1;
963
964 c->irqs = &z8530_dma_sync;
965 z8530_rtsdtr(c,1);
966 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
967
968 spin_unlock_irqrestore(c->lock, cflags);
969
970 return 0;
971}
972
973EXPORT_SYMBOL(z8530_sync_dma_open);
974
975/**
976 * z8530_sync_dma_close - Close down DMA I/O
977 * @dev: Network device to detach
978 * @c: Z8530 channel to move into discard mode
979 *
980 * Shut down a DMA mode synchronous interface. Halt the DMA, and
981 * free the buffers.
982 */
983
984int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
985{
986 u8 chk;
987 unsigned long flags;
988
989 c->irqs = &z8530_nop;
990 c->max = 0;
991 c->sync = 0;
992
993 /*
994 * Disable the PC DMA channels
995 */
996
997 flags=claim_dma_lock();
998 disable_dma(c->rxdma);
999 clear_dma_ff(c->rxdma);
1000
1001 c->rxdma_on = 0;
1002
1003 disable_dma(c->txdma);
1004 clear_dma_ff(c->txdma);
1005 release_dma_lock(flags);
1006
1007 c->txdma_on = 0;
1008 c->tx_dma_used = 0;
1009
1010 spin_lock_irqsave(c->lock, flags);
1011
1012 /*
1013 * Disable DMA control mode
1014 */
1015
1016 c->regs[R1]&= ~WT_RDY_ENAB;
1017 write_zsreg(c, R1, c->regs[R1]);
1018 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1019 c->regs[R1]|= INT_ALL_Rx;
1020 write_zsreg(c, R1, c->regs[R1]);
1021 c->regs[R14]&= ~DTRREQ;
1022 write_zsreg(c, R14, c->regs[R14]);
1023
1024 if(c->rx_buf[0])
1025 {
1026 free_page((unsigned long)c->rx_buf[0]);
1027 c->rx_buf[0]=NULL;
1028 }
1029 if(c->tx_dma_buf[0])
1030 {
1031 free_page((unsigned long)c->tx_dma_buf[0]);
1032 c->tx_dma_buf[0]=NULL;
1033 }
1034 chk=read_zsreg(c,R0);
1035 write_zsreg(c, R3, c->regs[R3]);
1036 z8530_rtsdtr(c,0);
1037
1038 spin_unlock_irqrestore(c->lock, flags);
1039
1040 return 0;
1041}
1042
1043EXPORT_SYMBOL(z8530_sync_dma_close);
1044
1045/**
1046 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1047 * @dev: The network device to attach
1048 * @c: The Z8530 channel to configure in sync DMA mode.
1049 *
1050 * Set up a Z85x30 device for synchronous DMA tranmission. One
1051 * ISA DMA channel must be available for this to work. The receive
1052 * side is run in PIO mode, but then it has the bigger FIFO.
1053 */
1054
1055int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1056{
1057 unsigned long cflags, dflags;
1058
1059 printk("Opening sync interface for TX-DMA\n");
1060 c->sync = 1;
1061 c->mtu = dev->mtu+64;
1062 c->count = 0;
1063 c->skb = NULL;
1064 c->skb2 = NULL;
1065
1066 /*
1067 * Allocate the DMA flip buffers. Limit by page size.
1068 * Everyone runs 1500 mtu or less on wan links so this
1069 * should be fine.
1070 */
1071
1072 if(c->mtu > PAGE_SIZE/2)
1073 return -EMSGSIZE;
1074
1075 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1076 if(c->tx_dma_buf[0]==NULL)
1077 return -ENOBUFS;
1078
1079 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1080
1081
1082 spin_lock_irqsave(c->lock, cflags);
1083
1084 /*
1085 * Load the PIO receive ring
1086 */
1087
1088 z8530_rx_done(c);
1089 z8530_rx_done(c);
1090
1091 /*
1092 * Load the DMA interfaces up
1093 */
1094
1095 c->rxdma_on = 0;
1096 c->txdma_on = 0;
1097
1098 c->tx_dma_used=0;
1099 c->dma_num=0;
1100 c->dma_ready=1;
1101 c->dma_tx = 1;
1102
1103 /*
1104 * Enable DMA control mode
1105 */
1106
1107 /*
1108 * TX DMA via DIR/REQ
1109 */
1110 c->regs[R14]|= DTRREQ;
1111 write_zsreg(c, R14, c->regs[R14]);
1112
1113 c->regs[R1]&= ~TxINT_ENAB;
1114 write_zsreg(c, R1, c->regs[R1]);
1115
1116 /*
1117 * Set up the DMA configuration
1118 */
1119
1120 dflags = claim_dma_lock();
1121
1122 disable_dma(c->txdma);
1123 clear_dma_ff(c->txdma);
1124 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1125 disable_dma(c->txdma);
1126
1127 release_dma_lock(dflags);
1128
1129 /*
1130 * Select the DMA interrupt handlers
1131 */
1132
1133 c->rxdma_on = 0;
1134 c->txdma_on = 1;
1135 c->tx_dma_used = 1;
1136
1137 c->irqs = &z8530_txdma_sync;
1138 z8530_rtsdtr(c,1);
1139 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1140 spin_unlock_irqrestore(c->lock, cflags);
1141
1142 return 0;
1143}
1144
1145EXPORT_SYMBOL(z8530_sync_txdma_open);
1146
1147/**
1148 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1149 * @dev: Network device to detach
1150 * @c: Z8530 channel to move into discard mode
1151 *
1152 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1153 * and free the buffers.
1154 */
1155
1156int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1157{
1158 unsigned long dflags, cflags;
1159 u8 chk;
1160
1161
1162 spin_lock_irqsave(c->lock, cflags);
1163
1164 c->irqs = &z8530_nop;
1165 c->max = 0;
1166 c->sync = 0;
1167
1168 /*
1169 * Disable the PC DMA channels
1170 */
1171
1172 dflags = claim_dma_lock();
1173
1174 disable_dma(c->txdma);
1175 clear_dma_ff(c->txdma);
1176 c->txdma_on = 0;
1177 c->tx_dma_used = 0;
1178
1179 release_dma_lock(dflags);
1180
1181 /*
1182 * Disable DMA control mode
1183 */
1184
1185 c->regs[R1]&= ~WT_RDY_ENAB;
1186 write_zsreg(c, R1, c->regs[R1]);
1187 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1188 c->regs[R1]|= INT_ALL_Rx;
1189 write_zsreg(c, R1, c->regs[R1]);
1190 c->regs[R14]&= ~DTRREQ;
1191 write_zsreg(c, R14, c->regs[R14]);
1192
1193 if(c->tx_dma_buf[0])
1194 {
1195 free_page((unsigned long)c->tx_dma_buf[0]);
1196 c->tx_dma_buf[0]=NULL;
1197 }
1198 chk=read_zsreg(c,R0);
1199 write_zsreg(c, R3, c->regs[R3]);
1200 z8530_rtsdtr(c,0);
1201
1202 spin_unlock_irqrestore(c->lock, cflags);
1203 return 0;
1204}
1205
1206
1207EXPORT_SYMBOL(z8530_sync_txdma_close);
1208
1209
1210/*
1211 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1212 * it exists...
1213 */
1214
1215static char *z8530_type_name[]={
1216 "Z8530",
1217 "Z85C30",
1218 "Z85230"
1219};
1220
1221/**
1222 * z8530_describe - Uniformly describe a Z8530 port
1223 * @dev: Z8530 device to describe
1224 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1225 * @io: the port value in question
1226 *
1227 * Describe a Z8530 in a standard format. We must pass the I/O as
1228 * the port offset isnt predictable. The main reason for this function
1229 * is to try and get a common format of report.
1230 */
1231
1232void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1233{
1234 printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
1235 dev->name,
1236 z8530_type_name[dev->type],
1237 mapping,
1238 Z8530_PORT_OF(io),
1239 dev->irq);
1240}
1241
1242EXPORT_SYMBOL(z8530_describe);
1243
1244/*
1245 * Locked operation part of the z8530 init code
1246 */
1247
1248static inline int do_z8530_init(struct z8530_dev *dev)
1249{
1250 /* NOP the interrupt handlers first - we might get a
1251 floating IRQ transition when we reset the chip */
1252 dev->chanA.irqs=&z8530_nop;
1253 dev->chanB.irqs=&z8530_nop;
1254 dev->chanA.dcdcheck=DCD;
1255 dev->chanB.dcdcheck=DCD;
1256
1257 /* Reset the chip */
1258 write_zsreg(&dev->chanA, R9, 0xC0);
1259 udelay(200);
1260 /* Now check its valid */
1261 write_zsreg(&dev->chanA, R12, 0xAA);
1262 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1263 return -ENODEV;
1264 write_zsreg(&dev->chanA, R12, 0x55);
1265 if(read_zsreg(&dev->chanA, R12)!=0x55)
1266 return -ENODEV;
1267
1268 dev->type=Z8530;
1269
1270 /*
1271 * See the application note.
1272 */
1273
1274 write_zsreg(&dev->chanA, R15, 0x01);
1275
1276 /*
1277 * If we can set the low bit of R15 then
1278 * the chip is enhanced.
1279 */
1280
1281 if(read_zsreg(&dev->chanA, R15)==0x01)
1282 {
1283 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1284 /* Put a char in the fifo */
1285 write_zsreg(&dev->chanA, R8, 0);
1286 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1287 dev->type = Z85230; /* Has a FIFO */
1288 else
1289 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1290 }
1291
1292 /*
1293 * The code assumes R7' and friends are
1294 * off. Use write_zsext() for these and keep
1295 * this bit clear.
1296 */
1297
1298 write_zsreg(&dev->chanA, R15, 0);
1299
1300 /*
1301 * At this point it looks like the chip is behaving
1302 */
1303
1304 memcpy(dev->chanA.regs, reg_init, 16);
1305 memcpy(dev->chanB.regs, reg_init ,16);
1306
1307 return 0;
1308}
1309
1310/**
1311 * z8530_init - Initialise a Z8530 device
1312 * @dev: Z8530 device to initialise.
1313 *
1314 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1315 * is present, identify the type and then program it to hopefully
1316 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1317 * state will sometimes get into stupid modes generating 10Khz
1318 * interrupt streams and the like.
1319 *
1320 * We set the interrupt handler up to discard any events, in case
1321 * we get them during reset or setp.
1322 *
1323 * Return 0 for success, or a negative value indicating the problem
1324 * in errno form.
1325 */
1326
1327int z8530_init(struct z8530_dev *dev)
1328{
1329 unsigned long flags;
1330 int ret;
1331
1332 /* Set up the chip level lock */
1333 spin_lock_init(&dev->lock);
1334 dev->chanA.lock = &dev->lock;
1335 dev->chanB.lock = &dev->lock;
1336
1337 spin_lock_irqsave(&dev->lock, flags);
1338 ret = do_z8530_init(dev);
1339 spin_unlock_irqrestore(&dev->lock, flags);
1340
1341 return ret;
1342}
1343
1344
1345EXPORT_SYMBOL(z8530_init);
1346
1347/**
1348 * z8530_shutdown - Shutdown a Z8530 device
1349 * @dev: The Z8530 chip to shutdown
1350 *
1351 * We set the interrupt handlers to silence any interrupts. We then
1352 * reset the chip and wait 100uS to be sure the reset completed. Just
1353 * in case the caller then tries to do stuff.
1354 *
1355 * This is called without the lock held
1356 */
1357
1358int z8530_shutdown(struct z8530_dev *dev)
1359{
1360 unsigned long flags;
1361 /* Reset the chip */
1362
1363 spin_lock_irqsave(&dev->lock, flags);
1364 dev->chanA.irqs=&z8530_nop;
1365 dev->chanB.irqs=&z8530_nop;
1366 write_zsreg(&dev->chanA, R9, 0xC0);
1367 /* We must lock the udelay, the chip is offlimits here */
1368 udelay(100);
1369 spin_unlock_irqrestore(&dev->lock, flags);
1370 return 0;
1371}
1372
1373EXPORT_SYMBOL(z8530_shutdown);
1374
1375/**
1376 * z8530_channel_load - Load channel data
1377 * @c: Z8530 channel to configure
1378 * @rtable: table of register, value pairs
1379 * FIXME: ioctl to allow user uploaded tables
1380 *
1381 * Load a Z8530 channel up from the system data. We use +16 to
1382 * indicate the "prime" registers. The value 255 terminates the
1383 * table.
1384 */
1385
1386int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1387{
1388 unsigned long flags;
1389
1390 spin_lock_irqsave(c->lock, flags);
1391
1392 while(*rtable!=255)
1393 {
1394 int reg=*rtable++;
1395 if(reg>0x0F)
1396 write_zsreg(c, R15, c->regs[15]|1);
1397 write_zsreg(c, reg&0x0F, *rtable);
1398 if(reg>0x0F)
1399 write_zsreg(c, R15, c->regs[15]&~1);
1400 c->regs[reg]=*rtable++;
1401 }
1402 c->rx_function=z8530_null_rx;
1403 c->skb=NULL;
1404 c->tx_skb=NULL;
1405 c->tx_next_skb=NULL;
1406 c->mtu=1500;
1407 c->max=0;
1408 c->count=0;
1409 c->status=read_zsreg(c, R0);
1410 c->sync=1;
1411 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1412
1413 spin_unlock_irqrestore(c->lock, flags);
1414 return 0;
1415}
1416
1417EXPORT_SYMBOL(z8530_channel_load);
1418
1419
1420/**
1421 * z8530_tx_begin - Begin packet transmission
1422 * @c: The Z8530 channel to kick
1423 *
1424 * This is the speed sensitive side of transmission. If we are called
1425 * and no buffer is being transmitted we commence the next buffer. If
1426 * nothing is queued we idle the sync.
1427 *
1428 * Note: We are handling this code path in the interrupt path, keep it
1429 * fast or bad things will happen.
1430 *
1431 * Called with the lock held.
1432 */
1433
1434static void z8530_tx_begin(struct z8530_channel *c)
1435{
1436 unsigned long flags;
1437 if(c->tx_skb)
1438 return;
1439
1440 c->tx_skb=c->tx_next_skb;
1441 c->tx_next_skb=NULL;
1442 c->tx_ptr=c->tx_next_ptr;
1443
1444 if(c->tx_skb==NULL)
1445 {
1446 /* Idle on */
1447 if(c->dma_tx)
1448 {
1449 flags=claim_dma_lock();
1450 disable_dma(c->txdma);
1451 /*
1452 * Check if we crapped out.
1453 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001454 if (get_dma_residue(c->txdma))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001456 c->netdevice->stats.tx_dropped++;
1457 c->netdevice->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 }
1459 release_dma_lock(flags);
1460 }
1461 c->txcount=0;
1462 }
1463 else
1464 {
1465 c->txcount=c->tx_skb->len;
1466
1467
1468 if(c->dma_tx)
1469 {
1470 /*
1471 * FIXME. DMA is broken for the original 8530,
1472 * on the older parts we need to set a flag and
1473 * wait for a further TX interrupt to fire this
1474 * stage off
1475 */
1476
1477 flags=claim_dma_lock();
1478 disable_dma(c->txdma);
1479
1480 /*
1481 * These two are needed by the 8530/85C30
1482 * and must be issued when idling.
1483 */
1484
1485 if(c->dev->type!=Z85230)
1486 {
1487 write_zsctrl(c, RES_Tx_CRC);
1488 write_zsctrl(c, RES_EOM_L);
1489 }
1490 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1491 clear_dma_ff(c->txdma);
1492 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1493 set_dma_count(c->txdma, c->txcount);
1494 enable_dma(c->txdma);
1495 release_dma_lock(flags);
1496 write_zsctrl(c, RES_EOM_L);
1497 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1498 }
1499 else
1500 {
1501
1502 /* ABUNDER off */
1503 write_zsreg(c, R10, c->regs[10]);
1504 write_zsctrl(c, RES_Tx_CRC);
1505
1506 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1507 {
1508 write_zsreg(c, R8, *c->tx_ptr++);
1509 c->txcount--;
1510 }
1511
1512 }
1513 }
1514 /*
1515 * Since we emptied tx_skb we can ask for more
1516 */
1517 netif_wake_queue(c->netdevice);
1518}
1519
1520/**
1521 * z8530_tx_done - TX complete callback
1522 * @c: The channel that completed a transmit.
1523 *
1524 * This is called when we complete a packet send. We wake the queue,
1525 * start the next packet going and then free the buffer of the existing
1526 * packet. This code is fairly timing sensitive.
1527 *
1528 * Called with the register lock held.
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001529 */
1530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531static void z8530_tx_done(struct z8530_channel *c)
1532{
1533 struct sk_buff *skb;
1534
1535 /* Actually this can happen.*/
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001536 if (c->tx_skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 return;
1538
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001539 skb = c->tx_skb;
1540 c->tx_skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 z8530_tx_begin(c);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001542 c->netdevice->stats.tx_packets++;
1543 c->netdevice->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 dev_kfree_skb_irq(skb);
1545}
1546
1547/**
1548 * z8530_null_rx - Discard a packet
1549 * @c: The channel the packet arrived on
1550 * @skb: The buffer
1551 *
1552 * We point the receive handler at this function when idle. Instead
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001553 * of processing the frames we get to throw them away.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 */
1555
1556void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1557{
1558 dev_kfree_skb_any(skb);
1559}
1560
1561EXPORT_SYMBOL(z8530_null_rx);
1562
1563/**
1564 * z8530_rx_done - Receive completion callback
1565 * @c: The channel that completed a receive
1566 *
1567 * A new packet is complete. Our goal here is to get back into receive
1568 * mode as fast as possible. On the Z85230 we could change to using
1569 * ESCC mode, but on the older chips we have no choice. We flip to the
1570 * new buffer immediately in DMA mode so that the DMA of the next
1571 * frame can occur while we are copying the previous buffer to an sk_buff
1572 *
1573 * Called with the lock held
1574 */
1575
1576static void z8530_rx_done(struct z8530_channel *c)
1577{
1578 struct sk_buff *skb;
1579 int ct;
1580
1581 /*
1582 * Is our receive engine in DMA mode
1583 */
1584
1585 if(c->rxdma_on)
1586 {
1587 /*
1588 * Save the ready state and the buffer currently
1589 * being used as the DMA target
1590 */
1591
1592 int ready=c->dma_ready;
1593 unsigned char *rxb=c->rx_buf[c->dma_num];
1594 unsigned long flags;
1595
1596 /*
1597 * Complete this DMA. Neccessary to find the length
1598 */
1599
1600 flags=claim_dma_lock();
1601
1602 disable_dma(c->rxdma);
1603 clear_dma_ff(c->rxdma);
1604 c->rxdma_on=0;
1605 ct=c->mtu-get_dma_residue(c->rxdma);
1606 if(ct<0)
1607 ct=2; /* Shit happens.. */
1608 c->dma_ready=0;
1609
1610 /*
1611 * Normal case: the other slot is free, start the next DMA
1612 * into it immediately.
1613 */
1614
1615 if(ready)
1616 {
1617 c->dma_num^=1;
1618 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1619 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1620 set_dma_count(c->rxdma, c->mtu);
1621 c->rxdma_on = 1;
1622 enable_dma(c->rxdma);
1623 /* Stop any frames that we missed the head of
1624 from passing */
1625 write_zsreg(c, R0, RES_Rx_CRC);
1626 }
1627 else
1628 /* Can't occur as we dont reenable the DMA irq until
1629 after the flip is done */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001630 printk(KERN_WARNING "%s: DMA flip overrun!\n",
1631 c->netdevice->name);
1632
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 release_dma_lock(flags);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 /*
1636 * Shove the old buffer into an sk_buff. We can't DMA
1637 * directly into one on a PC - it might be above the 16Mb
1638 * boundary. Optimisation - we could check to see if we
1639 * can avoid the copy. Optimisation 2 - make the memcpy
1640 * a copychecksum.
1641 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001642
1643 skb = dev_alloc_skb(ct);
1644 if (skb == NULL) {
1645 c->netdevice->stats.rx_dropped++;
1646 printk(KERN_WARNING "%s: Memory squeeze.\n",
1647 c->netdevice->name);
1648 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 skb_put(skb, ct);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001650 skb_copy_to_linear_data(skb, rxb, ct);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001651 c->netdevice->stats.rx_packets++;
1652 c->netdevice->stats.rx_bytes += ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001654 c->dma_ready = 1;
1655 } else {
1656 RT_LOCK;
1657 skb = c->skb;
1658
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 /*
1660 * The game we play for non DMA is similar. We want to
1661 * get the controller set up for the next packet as fast
1662 * as possible. We potentially only have one byte + the
1663 * fifo length for this. Thus we want to flip to the new
1664 * buffer and then mess around copying and allocating
1665 * things. For the current case it doesn't matter but
1666 * if you build a system where the sync irq isnt blocked
1667 * by the kernel IRQ disable then you need only block the
1668 * sync IRQ for the RT_LOCK area.
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001669 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 */
1671 ct=c->count;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 c->skb = c->skb2;
1674 c->count = 0;
1675 c->max = c->mtu;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001676 if (c->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 c->dptr = c->skb->data;
1678 c->max = c->mtu;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001679 } else {
1680 c->count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 c->max = 0;
1682 }
1683 RT_UNLOCK;
1684
1685 c->skb2 = dev_alloc_skb(c->mtu);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001686 if (c->skb2 == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 printk(KERN_WARNING "%s: memory squeeze.\n",
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001688 c->netdevice->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 else
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001690 skb_put(c->skb2, c->mtu);
1691 c->netdevice->stats.rx_packets++;
1692 c->netdevice->stats.rx_bytes += ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 }
1694 /*
1695 * If we received a frame we must now process it.
1696 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001697 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 skb_trim(skb, ct);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001699 c->rx_function(c, skb);
1700 } else {
1701 c->netdevice->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1703 }
1704}
1705
1706/**
1707 * spans_boundary - Check a packet can be ISA DMA'd
1708 * @skb: The buffer to check
1709 *
1710 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1711 * thing can only DMA within a 64K block not across the edges of it.
1712 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714static inline int spans_boundary(struct sk_buff *skb)
1715{
1716 unsigned long a=(unsigned long)skb->data;
1717 a^=(a+skb->len);
1718 if(a&0x00010000) /* If the 64K bit is different.. */
1719 return 1;
1720 return 0;
1721}
1722
1723/**
1724 * z8530_queue_xmit - Queue a packet
1725 * @c: The channel to use
1726 * @skb: The packet to kick down the channel
1727 *
1728 * Queue a packet for transmission. Because we have rather
1729 * hard to hit interrupt latencies for the Z85230 per packet
1730 * even in DMA mode we do the flip to DMA buffer if needed here
1731 * not in the IRQ.
1732 *
1733 * Called from the network code. The lock is not held at this
1734 * point.
1735 */
1736
1737int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1738{
1739 unsigned long flags;
1740
1741 netif_stop_queue(c->netdevice);
1742 if(c->tx_next_skb)
1743 {
1744 return 1;
1745 }
1746
1747 /* PC SPECIFIC - DMA limits */
1748
1749 /*
1750 * If we will DMA the transmit and its gone over the ISA bus
1751 * limit, then copy to the flip buffer
1752 */
1753
1754 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1755 {
1756 /*
1757 * Send the flip buffer, and flip the flippy bit.
1758 * We don't care which is used when just so long as
1759 * we never use the same buffer twice in a row. Since
1760 * only one buffer can be going out at a time the other
1761 * has to be safe.
1762 */
1763 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1764 c->tx_dma_used^=1; /* Flip temp buffer */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001765 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 }
1767 else
1768 c->tx_next_ptr=skb->data;
1769 RT_LOCK;
1770 c->tx_next_skb=skb;
1771 RT_UNLOCK;
1772
1773 spin_lock_irqsave(c->lock, flags);
1774 z8530_tx_begin(c);
1775 spin_unlock_irqrestore(c->lock, flags);
1776
1777 return 0;
1778}
1779
1780EXPORT_SYMBOL(z8530_queue_xmit);
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782/*
1783 * Module support
1784 */
1785static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1786
1787static int __init z85230_init_driver(void)
1788{
1789 printk(banner);
1790 return 0;
1791}
1792module_init(z85230_init_driver);
1793
1794static void __exit z85230_cleanup_driver(void)
1795{
1796}
1797module_exit(z85230_cleanup_driver);
1798
1799MODULE_AUTHOR("Red Hat Inc.");
1800MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1801MODULE_LICENSE("GPL");