blob: fbf5e843d48c1fd9f88b55ef993ea74474488041 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8 * (c) Copyright 2000, 2001 Red Hat Inc
9 *
10 * Development of this driver was funded by Equiinet Ltd
11 * http://www.equiinet.com
12 *
13 * ChangeLog:
14 *
15 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16 * unification of all the Z85x30 asynchronous drivers for real.
17 *
18 * DMA now uses get_free_page as kmalloc buffers may span a 64K
19 * boundary.
20 *
Alan Cox113aa832008-10-13 19:01:08 -070021 * Modified for SMP safety and SMP locking by Alan Cox
22 * <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 *
24 * Performance
25 *
26 * Z85230:
27 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28 * X.25 is not unrealistic on all machines. DMA mode can in theory
29 * handle T1/E1 quite nicely. In practice the limit seems to be about
30 * 512Kbit->1Mbit depending on motherboard.
31 *
32 * Z85C30:
33 * 64K will take DMA, 9600 baud X.25 should be ok.
34 *
35 * Z8530:
36 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
37 */
38
39#include <linux/module.h>
40#include <linux/kernel.h>
41#include <linux/mm.h>
42#include <linux/net.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/if_arp.h>
46#include <linux/delay.h>
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +020047#include <linux/hdlc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/ioport.h>
49#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090050#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/dma.h>
52#include <asm/io.h>
53#define RT_LOCK
54#define RT_UNLOCK
55#include <linux/spinlock.h>
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include "z85230.h"
58
59
60/**
61 * z8530_read_port - Architecture specific interface function
62 * @p: port to read
63 *
64 * Provided port access methods. The Comtrol SV11 requires no delays
65 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
66 *
67 * In the longer term this should become an architecture specific
68 * section so that this can become a generic driver interface for all
69 * platforms. For now we only handle PC I/O ports with or without the
70 * dread 5uS sanity delay.
71 *
72 * The caller must hold sufficient locks to avoid violating the horrible
73 * 5uS delay rule.
74 */
75
76static inline int z8530_read_port(unsigned long p)
77{
78 u8 r=inb(Z8530_PORT_OF(p));
79 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
80 udelay(5);
81 return r;
82}
83
84/**
85 * z8530_write_port - Architecture specific interface function
86 * @p: port to write
87 * @d: value to write
88 *
89 * Write a value to a port with delays if need be. Note that the
90 * caller must hold locks to avoid read/writes from other contexts
91 * violating the 5uS rule
92 *
93 * In the longer term this should become an architecture specific
94 * section so that this can become a generic driver interface for all
95 * platforms. For now we only handle PC I/O ports with or without the
96 * dread 5uS sanity delay.
97 */
98
99
100static inline void z8530_write_port(unsigned long p, u8 d)
101{
102 outb(d,Z8530_PORT_OF(p));
103 if(p&Z8530_PORT_SLEEP)
104 udelay(5);
105}
106
107
108
109static void z8530_rx_done(struct z8530_channel *c);
110static void z8530_tx_done(struct z8530_channel *c);
111
112
113/**
114 * read_zsreg - Read a register from a Z85230
115 * @c: Z8530 channel to read from (2 per chip)
116 * @reg: Register to read
117 * FIXME: Use a spinlock.
118 *
119 * Most of the Z8530 registers are indexed off the control registers.
120 * A read is done by writing to the control register and reading the
121 * register back. The caller must hold the lock
122 */
123
124static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
125{
126 if(reg)
127 z8530_write_port(c->ctrlio, reg);
128 return z8530_read_port(c->ctrlio);
129}
130
131/**
132 * read_zsdata - Read the data port of a Z8530 channel
133 * @c: The Z8530 channel to read the data port from
134 *
135 * The data port provides fast access to some things. We still
136 * have all the 5uS delays to worry about.
137 */
138
139static inline u8 read_zsdata(struct z8530_channel *c)
140{
141 u8 r;
142 r=z8530_read_port(c->dataio);
143 return r;
144}
145
146/**
147 * write_zsreg - Write to a Z8530 channel register
148 * @c: The Z8530 channel
149 * @reg: Register number
150 * @val: Value to write
151 *
152 * Write a value to an indexed register. The caller must hold the lock
153 * to honour the irritating delay rules. We know about register 0
154 * being fast to access.
155 *
156 * Assumes c->lock is held.
157 */
158static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
159{
160 if(reg)
161 z8530_write_port(c->ctrlio, reg);
162 z8530_write_port(c->ctrlio, val);
163
164}
165
166/**
167 * write_zsctrl - Write to a Z8530 control register
168 * @c: The Z8530 channel
169 * @val: Value to write
170 *
171 * Write directly to the control register on the Z8530
172 */
173
174static inline void write_zsctrl(struct z8530_channel *c, u8 val)
175{
176 z8530_write_port(c->ctrlio, val);
177}
178
179/**
180 * write_zsdata - Write to a Z8530 control register
181 * @c: The Z8530 channel
182 * @val: Value to write
183 *
184 * Write directly to the data register on the Z8530
185 */
186
187
188static inline void write_zsdata(struct z8530_channel *c, u8 val)
189{
190 z8530_write_port(c->dataio, val);
191}
192
193/*
194 * Register loading parameters for a dead port
195 */
196
197u8 z8530_dead_port[]=
198{
199 255
200};
201
202EXPORT_SYMBOL(z8530_dead_port);
203
204/*
205 * Register loading parameters for currently supported circuit types
206 */
207
208
209/*
210 * Data clocked by telco end. This is the correct data for the UK
211 * "kilostream" service, and most other similar services.
212 */
213
214u8 z8530_hdlc_kilostream[]=
215{
216 4, SYNC_ENAB|SDLC|X1CLK,
217 2, 0, /* No vector */
218 1, 0,
219 3, ENT_HM|RxCRC_ENAB|Rx8,
220 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
221 9, 0, /* Disable interrupts */
222 6, 0xFF,
223 7, FLAG,
224 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
225 11, TCTRxCP,
226 14, DISDPLL,
227 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
228 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
229 9, NV|MIE|NORESET,
230 255
231};
232
233EXPORT_SYMBOL(z8530_hdlc_kilostream);
234
235/*
236 * As above but for enhanced chips.
237 */
238
239u8 z8530_hdlc_kilostream_85230[]=
240{
241 4, SYNC_ENAB|SDLC|X1CLK,
242 2, 0, /* No vector */
243 1, 0,
244 3, ENT_HM|RxCRC_ENAB|Rx8,
245 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
246 9, 0, /* Disable interrupts */
247 6, 0xFF,
248 7, FLAG,
249 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
250 11, TCTRxCP,
251 14, DISDPLL,
252 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
253 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
254 9, NV|MIE|NORESET,
255 23, 3, /* Extended mode AUTO TX and EOM*/
256
257 255
258};
259
260EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
261
262/**
263 * z8530_flush_fifo - Flush on chip RX FIFO
264 * @c: Channel to flush
265 *
266 * Flush the receive FIFO. There is no specific option for this, we
267 * blindly read bytes and discard them. Reading when there is no data
268 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
269 *
270 * All locking is handled for the caller. On return data may still be
271 * present if it arrived during the flush.
272 */
273
274static void z8530_flush_fifo(struct z8530_channel *c)
275{
276 read_zsreg(c, R1);
277 read_zsreg(c, R1);
278 read_zsreg(c, R1);
279 read_zsreg(c, R1);
280 if(c->dev->type==Z85230)
281 {
282 read_zsreg(c, R1);
283 read_zsreg(c, R1);
284 read_zsreg(c, R1);
285 read_zsreg(c, R1);
286 }
287}
288
289/**
290 * z8530_rtsdtr - Control the outgoing DTS/RTS line
291 * @c: The Z8530 channel to control;
292 * @set: 1 to set, 0 to clear
293 *
294 * Sets or clears DTR/RTS on the requested line. All locking is handled
295 * by the caller. For now we assume all boards use the actual RTS/DTR
296 * on the chip. Apparently one or two don't. We'll scream about them
297 * later.
298 */
299
300static void z8530_rtsdtr(struct z8530_channel *c, int set)
301{
302 if (set)
303 c->regs[5] |= (RTS | DTR);
304 else
305 c->regs[5] &= ~(RTS | DTR);
306 write_zsreg(c, R5, c->regs[5]);
307}
308
309/**
310 * z8530_rx - Handle a PIO receive event
311 * @c: Z8530 channel to process
312 *
313 * Receive handler for receiving in PIO mode. This is much like the
314 * async one but not quite the same or as complex
315 *
316 * Note: Its intended that this handler can easily be separated from
317 * the main code to run realtime. That'll be needed for some machines
318 * (eg to ever clock 64kbits on a sparc ;)).
319 *
320 * The RT_LOCK macros don't do anything now. Keep the code covered
321 * by them as short as possible in all circumstances - clocks cost
322 * baud. The interrupt handler is assumed to be atomic w.r.t. to
323 * other code - this is true in the RT case too.
324 *
325 * We only cover the sync cases for this. If you want 2Mbit async
326 * do it yourself but consider medical assistance first. This non DMA
327 * synchronous mode is portable code. The DMA mode assumes PCI like
328 * ISA DMA
329 *
330 * Called with the device lock held
331 */
332
333static void z8530_rx(struct z8530_channel *c)
334{
335 u8 ch,stat;
Alan Cox45d3ac42007-02-05 16:31:03 -0800336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 while(1)
338 {
339 /* FIFO empty ? */
340 if(!(read_zsreg(c, R0)&1))
341 break;
342 ch=read_zsdata(c);
343 stat=read_zsreg(c, R1);
344
345 /*
346 * Overrun ?
347 */
348 if(c->count < c->max)
349 {
350 *c->dptr++=ch;
351 c->count++;
352 }
353
354 if(stat&END_FR)
355 {
356
357 /*
358 * Error ?
359 */
360 if(stat&(Rx_OVR|CRC_ERR))
361 {
362 /* Rewind the buffer and return */
363 if(c->skb)
364 c->dptr=c->skb->data;
365 c->count=0;
366 if(stat&Rx_OVR)
367 {
368 printk(KERN_WARNING "%s: overrun\n", c->dev->name);
369 c->rx_overrun++;
370 }
371 if(stat&CRC_ERR)
372 {
373 c->rx_crc_err++;
374 /* printk("crc error\n"); */
375 }
376 /* Shove the frame upstream */
377 }
378 else
379 {
380 /*
381 * Drop the lock for RX processing, or
382 * there are deadlocks
383 */
384 z8530_rx_done(c);
385 write_zsctrl(c, RES_Rx_CRC);
386 }
387 }
388 }
389 /*
390 * Clear irq
391 */
392 write_zsctrl(c, ERR_RES);
393 write_zsctrl(c, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394}
395
396
397/**
398 * z8530_tx - Handle a PIO transmit event
399 * @c: Z8530 channel to process
400 *
401 * Z8530 transmit interrupt handler for the PIO mode. The basic
402 * idea is to attempt to keep the FIFO fed. We fill as many bytes
403 * in as possible, its quite possible that we won't keep up with the
404 * data rate otherwise.
405 */
406
407static void z8530_tx(struct z8530_channel *c)
408{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 while(c->txcount) {
410 /* FIFO full ? */
411 if(!(read_zsreg(c, R0)&4))
Alan Coxfe797452007-03-07 16:37:38 +0000412 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 c->txcount--;
414 /*
415 * Shovel out the byte
416 */
417 write_zsreg(c, R8, *c->tx_ptr++);
418 write_zsctrl(c, RES_H_IUS);
419 /* We are about to underflow */
420 if(c->txcount==0)
421 {
422 write_zsctrl(c, RES_EOM_L);
423 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
424 }
425 }
426
427
428 /*
429 * End of frame TX - fire another one
430 */
431
432 write_zsctrl(c, RES_Tx_P);
433
434 z8530_tx_done(c);
435 write_zsctrl(c, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436}
437
438/**
439 * z8530_status - Handle a PIO status exception
440 * @chan: Z8530 channel to process
441 *
442 * A status event occurred in PIO synchronous mode. There are several
443 * reasons the chip will bother us here. A transmit underrun means we
444 * failed to feed the chip fast enough and just broke a packet. A DCD
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200445 * change is a line up or down.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 */
447
448static void z8530_status(struct z8530_channel *chan)
449{
450 u8 status, altered;
451
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200452 status = read_zsreg(chan, R0);
453 altered = chan->status ^ status;
454
455 chan->status = status;
456
457 if (status & TxEOM) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458/* printk("%s: Tx underrun.\n", chan->dev->name); */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200459 chan->netdevice->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 write_zsctrl(chan, ERR_RES);
461 z8530_tx_done(chan);
462 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200463
464 if (altered & chan->dcdcheck)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200466 if (status & chan->dcdcheck) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200468 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
469 if (chan->netdevice)
470 netif_carrier_on(chan->netdevice);
471 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200473 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 z8530_flush_fifo(chan);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200475 if (chan->netdevice)
476 netif_carrier_off(chan->netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200478
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 write_zsctrl(chan, RES_EXT_INT);
481 write_zsctrl(chan, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
483
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200484struct z8530_irqhandler z8530_sync =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
486 z8530_rx,
487 z8530_tx,
488 z8530_status
489};
490
491EXPORT_SYMBOL(z8530_sync);
492
493/**
494 * z8530_dma_rx - Handle a DMA RX event
495 * @chan: Channel to handle
496 *
497 * Non bus mastering DMA interfaces for the Z8x30 devices. This
498 * is really pretty PC specific. The DMA mode means that most receive
499 * events are handled by the DMA hardware. We get a kick here only if
500 * a frame ended.
501 */
502
503static void z8530_dma_rx(struct z8530_channel *chan)
504{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if(chan->rxdma_on)
506 {
507 /* Special condition check only */
508 u8 status;
509
510 read_zsreg(chan, R7);
511 read_zsreg(chan, R6);
512
513 status=read_zsreg(chan, R1);
514
515 if(status&END_FR)
516 {
517 z8530_rx_done(chan); /* Fire up the next one */
518 }
519 write_zsctrl(chan, ERR_RES);
520 write_zsctrl(chan, RES_H_IUS);
521 }
522 else
523 {
524 /* DMA is off right now, drain the slow way */
525 z8530_rx(chan);
526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
529/**
530 * z8530_dma_tx - Handle a DMA TX event
531 * @chan: The Z8530 channel to handle
532 *
533 * We have received an interrupt while doing DMA transmissions. It
534 * shouldn't happen. Scream loudly if it does.
535 */
536
537static void z8530_dma_tx(struct z8530_channel *chan)
538{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 if(!chan->dma_tx)
540 {
541 printk(KERN_WARNING "Hey who turned the DMA off?\n");
542 z8530_tx(chan);
543 return;
544 }
545 /* This shouldnt occur in DMA mode */
546 printk(KERN_ERR "DMA tx - bogus event!\n");
547 z8530_tx(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548}
549
550/**
551 * z8530_dma_status - Handle a DMA status exception
552 * @chan: Z8530 channel to process
553 *
554 * A status event occurred on the Z8530. We receive these for two reasons
555 * when in DMA mode. Firstly if we finished a packet transfer we get one
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200556 * and kick the next packet out. Secondly we may see a DCD change.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 *
558 */
559
560static void z8530_dma_status(struct z8530_channel *chan)
561{
562 u8 status, altered;
563
564 status=read_zsreg(chan, R0);
565 altered=chan->status^status;
566
567 chan->status=status;
568
569
570 if(chan->dma_tx)
571 {
572 if(status&TxEOM)
573 {
574 unsigned long flags;
575
576 flags=claim_dma_lock();
577 disable_dma(chan->txdma);
578 clear_dma_ff(chan->txdma);
579 chan->txdma_on=0;
580 release_dma_lock(flags);
581 z8530_tx_done(chan);
582 }
583 }
584
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200585 if (altered & chan->dcdcheck)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200587 if (status & chan->dcdcheck) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200589 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
590 if (chan->netdevice)
591 netif_carrier_on(chan->netdevice);
592 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200594 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 z8530_flush_fifo(chan);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200596 if (chan->netdevice)
597 netif_carrier_off(chan->netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +0200599 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601 write_zsctrl(chan, RES_EXT_INT);
602 write_zsctrl(chan, RES_H_IUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
Hannes Eder62c53452008-12-26 00:12:08 -0800605static struct z8530_irqhandler z8530_dma_sync = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 z8530_dma_rx,
607 z8530_dma_tx,
608 z8530_dma_status
609};
610
Hannes Eder62c53452008-12-26 00:12:08 -0800611static struct z8530_irqhandler z8530_txdma_sync = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 z8530_rx,
613 z8530_dma_tx,
614 z8530_dma_status
615};
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617/**
618 * z8530_rx_clear - Handle RX events from a stopped chip
619 * @c: Z8530 channel to shut up
620 *
621 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
622 * For machines with PCI Z85x30 cards, or level triggered interrupts
623 * (eg the MacII) we must clear the interrupt cause or die.
624 */
625
626
627static void z8530_rx_clear(struct z8530_channel *c)
628{
629 /*
630 * Data and status bytes
631 */
632 u8 stat;
633
634 read_zsdata(c);
635 stat=read_zsreg(c, R1);
636
637 if(stat&END_FR)
638 write_zsctrl(c, RES_Rx_CRC);
639 /*
640 * Clear irq
641 */
642 write_zsctrl(c, ERR_RES);
643 write_zsctrl(c, RES_H_IUS);
644}
645
646/**
647 * z8530_tx_clear - Handle TX events from a stopped chip
648 * @c: Z8530 channel to shut up
649 *
650 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
651 * For machines with PCI Z85x30 cards, or level triggered interrupts
652 * (eg the MacII) we must clear the interrupt cause or die.
653 */
654
655static void z8530_tx_clear(struct z8530_channel *c)
656{
657 write_zsctrl(c, RES_Tx_P);
658 write_zsctrl(c, RES_H_IUS);
659}
660
661/**
662 * z8530_status_clear - Handle status events from a stopped chip
663 * @chan: Z8530 channel to shut up
664 *
665 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
666 * For machines with PCI Z85x30 cards, or level triggered interrupts
667 * (eg the MacII) we must clear the interrupt cause or die.
668 */
669
670static void z8530_status_clear(struct z8530_channel *chan)
671{
672 u8 status=read_zsreg(chan, R0);
673 if(status&TxEOM)
674 write_zsctrl(chan, ERR_RES);
675 write_zsctrl(chan, RES_EXT_INT);
676 write_zsctrl(chan, RES_H_IUS);
677}
678
679struct z8530_irqhandler z8530_nop=
680{
681 z8530_rx_clear,
682 z8530_tx_clear,
683 z8530_status_clear
684};
685
686
687EXPORT_SYMBOL(z8530_nop);
688
689/**
690 * z8530_interrupt - Handle an interrupt from a Z8530
691 * @irq: Interrupt number
692 * @dev_id: The Z8530 device that is interrupting.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 *
694 * A Z85[2]30 device has stuck its hand in the air for attention.
695 * We scan both the channels on the chip for events and then call
696 * the channel specific call backs for each channel that has events.
697 * We have to use callback functions because the two channels can be
698 * in different modes.
699 *
700 * Locking is done for the handlers. Note that locking is done
701 * at the chip level (the 5uS delay issue is per chip not per
702 * channel). c->lock for both channels points to dev->lock
703 */
704
David Howells7d12e782006-10-05 14:55:46 +0100705irqreturn_t z8530_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706{
707 struct z8530_dev *dev=dev_id;
Ingo Molnar41744392008-11-25 16:53:08 -0800708 u8 uninitialized_var(intr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 static volatile int locker=0;
710 int work=0;
711 struct z8530_irqhandler *irqs;
712
713 if(locker)
714 {
715 printk(KERN_ERR "IRQ re-enter\n");
716 return IRQ_NONE;
717 }
718 locker=1;
719
720 spin_lock(&dev->lock);
721
722 while(++work<5000)
723 {
724
725 intr = read_zsreg(&dev->chanA, R3);
726 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
727 break;
728
729 /* This holds the IRQ status. On the 8530 you must read it from chan
730 A even though it applies to the whole chip */
731
732 /* Now walk the chip and see what it is wanting - it may be
733 an IRQ for someone else remember */
734
735 irqs=dev->chanA.irqs;
736
737 if(intr & (CHARxIP|CHATxIP|CHAEXT))
738 {
739 if(intr&CHARxIP)
740 irqs->rx(&dev->chanA);
741 if(intr&CHATxIP)
742 irqs->tx(&dev->chanA);
743 if(intr&CHAEXT)
744 irqs->status(&dev->chanA);
745 }
746
747 irqs=dev->chanB.irqs;
748
749 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
750 {
751 if(intr&CHBRxIP)
752 irqs->rx(&dev->chanB);
753 if(intr&CHBTxIP)
754 irqs->tx(&dev->chanB);
755 if(intr&CHBEXT)
756 irqs->status(&dev->chanB);
757 }
758 }
759 spin_unlock(&dev->lock);
760 if(work==5000)
761 printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
762 /* Ok all done */
763 locker=0;
764 return IRQ_HANDLED;
765}
766
767EXPORT_SYMBOL(z8530_interrupt);
768
769static char reg_init[16]=
770{
771 0,0,0,0,
772 0,0,0,0,
773 0,0,0,0,
774 0x55,0,0,0
775};
776
777
778/**
779 * z8530_sync_open - Open a Z8530 channel for PIO
780 * @dev: The network interface we are using
781 * @c: The Z8530 channel to open in synchronous PIO mode
782 *
783 * Switch a Z8530 into synchronous mode without DMA assist. We
784 * raise the RTS/DTR and commence network operation.
785 */
786
787int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
788{
789 unsigned long flags;
790
791 spin_lock_irqsave(c->lock, flags);
792
793 c->sync = 1;
794 c->mtu = dev->mtu+64;
795 c->count = 0;
796 c->skb = NULL;
797 c->skb2 = NULL;
798 c->irqs = &z8530_sync;
799
800 /* This loads the double buffer up */
801 z8530_rx_done(c); /* Load the frame ring */
802 z8530_rx_done(c); /* Load the backup frame */
803 z8530_rtsdtr(c,1);
804 c->dma_tx = 0;
805 c->regs[R1]|=TxINT_ENAB;
806 write_zsreg(c, R1, c->regs[R1]);
807 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
808
809 spin_unlock_irqrestore(c->lock, flags);
810 return 0;
811}
812
813
814EXPORT_SYMBOL(z8530_sync_open);
815
816/**
817 * z8530_sync_close - Close a PIO Z8530 channel
818 * @dev: Network device to close
819 * @c: Z8530 channel to disassociate and move to idle
820 *
821 * Close down a Z8530 interface and switch its interrupt handlers
822 * to discard future events.
823 */
824
825int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
826{
827 u8 chk;
828 unsigned long flags;
829
830 spin_lock_irqsave(c->lock, flags);
831 c->irqs = &z8530_nop;
832 c->max = 0;
833 c->sync = 0;
834
835 chk=read_zsreg(c,R0);
836 write_zsreg(c, R3, c->regs[R3]);
837 z8530_rtsdtr(c,0);
838
839 spin_unlock_irqrestore(c->lock, flags);
840 return 0;
841}
842
843EXPORT_SYMBOL(z8530_sync_close);
844
845/**
846 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
847 * @dev: The network device to attach
848 * @c: The Z8530 channel to configure in sync DMA mode.
849 *
850 * Set up a Z85x30 device for synchronous DMA in both directions. Two
851 * ISA DMA channels must be available for this to work. We assume ISA
852 * DMA driven I/O and PC limits on access.
853 */
854
855int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
856{
857 unsigned long cflags, dflags;
858
859 c->sync = 1;
860 c->mtu = dev->mtu+64;
861 c->count = 0;
862 c->skb = NULL;
863 c->skb2 = NULL;
864 /*
865 * Load the DMA interfaces up
866 */
867 c->rxdma_on = 0;
868 c->txdma_on = 0;
869
870 /*
871 * Allocate the DMA flip buffers. Limit by page size.
872 * Everyone runs 1500 mtu or less on wan links so this
873 * should be fine.
874 */
875
876 if(c->mtu > PAGE_SIZE/2)
877 return -EMSGSIZE;
878
879 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
880 if(c->rx_buf[0]==NULL)
881 return -ENOBUFS;
882 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
883
884 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
885 if(c->tx_dma_buf[0]==NULL)
886 {
887 free_page((unsigned long)c->rx_buf[0]);
888 c->rx_buf[0]=NULL;
889 return -ENOBUFS;
890 }
891 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
892
893 c->tx_dma_used=0;
894 c->dma_tx = 1;
895 c->dma_num=0;
896 c->dma_ready=1;
897
898 /*
899 * Enable DMA control mode
900 */
901
902 spin_lock_irqsave(c->lock, cflags);
903
904 /*
905 * TX DMA via DIR/REQ
906 */
907
908 c->regs[R14]|= DTRREQ;
909 write_zsreg(c, R14, c->regs[R14]);
910
911 c->regs[R1]&= ~TxINT_ENAB;
912 write_zsreg(c, R1, c->regs[R1]);
913
914 /*
915 * RX DMA via W/Req
916 */
917
918 c->regs[R1]|= WT_FN_RDYFN;
919 c->regs[R1]|= WT_RDY_RT;
920 c->regs[R1]|= INT_ERR_Rx;
921 c->regs[R1]&= ~TxINT_ENAB;
922 write_zsreg(c, R1, c->regs[R1]);
923 c->regs[R1]|= WT_RDY_ENAB;
924 write_zsreg(c, R1, c->regs[R1]);
925
926 /*
927 * DMA interrupts
928 */
929
930 /*
931 * Set up the DMA configuration
932 */
933
934 dflags=claim_dma_lock();
935
936 disable_dma(c->rxdma);
937 clear_dma_ff(c->rxdma);
938 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
939 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
940 set_dma_count(c->rxdma, c->mtu);
941 enable_dma(c->rxdma);
942
943 disable_dma(c->txdma);
944 clear_dma_ff(c->txdma);
945 set_dma_mode(c->txdma, DMA_MODE_WRITE);
946 disable_dma(c->txdma);
947
948 release_dma_lock(dflags);
949
950 /*
951 * Select the DMA interrupt handlers
952 */
953
954 c->rxdma_on = 1;
955 c->txdma_on = 1;
956 c->tx_dma_used = 1;
957
958 c->irqs = &z8530_dma_sync;
959 z8530_rtsdtr(c,1);
960 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
961
962 spin_unlock_irqrestore(c->lock, cflags);
963
964 return 0;
965}
966
967EXPORT_SYMBOL(z8530_sync_dma_open);
968
969/**
970 * z8530_sync_dma_close - Close down DMA I/O
971 * @dev: Network device to detach
972 * @c: Z8530 channel to move into discard mode
973 *
974 * Shut down a DMA mode synchronous interface. Halt the DMA, and
975 * free the buffers.
976 */
977
978int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
979{
980 u8 chk;
981 unsigned long flags;
982
983 c->irqs = &z8530_nop;
984 c->max = 0;
985 c->sync = 0;
986
987 /*
988 * Disable the PC DMA channels
989 */
990
991 flags=claim_dma_lock();
992 disable_dma(c->rxdma);
993 clear_dma_ff(c->rxdma);
994
995 c->rxdma_on = 0;
996
997 disable_dma(c->txdma);
998 clear_dma_ff(c->txdma);
999 release_dma_lock(flags);
1000
1001 c->txdma_on = 0;
1002 c->tx_dma_used = 0;
1003
1004 spin_lock_irqsave(c->lock, flags);
1005
1006 /*
1007 * Disable DMA control mode
1008 */
1009
1010 c->regs[R1]&= ~WT_RDY_ENAB;
1011 write_zsreg(c, R1, c->regs[R1]);
1012 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1013 c->regs[R1]|= INT_ALL_Rx;
1014 write_zsreg(c, R1, c->regs[R1]);
1015 c->regs[R14]&= ~DTRREQ;
1016 write_zsreg(c, R14, c->regs[R14]);
1017
1018 if(c->rx_buf[0])
1019 {
1020 free_page((unsigned long)c->rx_buf[0]);
1021 c->rx_buf[0]=NULL;
1022 }
1023 if(c->tx_dma_buf[0])
1024 {
1025 free_page((unsigned long)c->tx_dma_buf[0]);
1026 c->tx_dma_buf[0]=NULL;
1027 }
1028 chk=read_zsreg(c,R0);
1029 write_zsreg(c, R3, c->regs[R3]);
1030 z8530_rtsdtr(c,0);
1031
1032 spin_unlock_irqrestore(c->lock, flags);
1033
1034 return 0;
1035}
1036
1037EXPORT_SYMBOL(z8530_sync_dma_close);
1038
1039/**
1040 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1041 * @dev: The network device to attach
1042 * @c: The Z8530 channel to configure in sync DMA mode.
1043 *
1044 * Set up a Z85x30 device for synchronous DMA tranmission. One
1045 * ISA DMA channel must be available for this to work. The receive
1046 * side is run in PIO mode, but then it has the bigger FIFO.
1047 */
1048
1049int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1050{
1051 unsigned long cflags, dflags;
1052
1053 printk("Opening sync interface for TX-DMA\n");
1054 c->sync = 1;
1055 c->mtu = dev->mtu+64;
1056 c->count = 0;
1057 c->skb = NULL;
1058 c->skb2 = NULL;
1059
1060 /*
1061 * Allocate the DMA flip buffers. Limit by page size.
1062 * Everyone runs 1500 mtu or less on wan links so this
1063 * should be fine.
1064 */
1065
1066 if(c->mtu > PAGE_SIZE/2)
1067 return -EMSGSIZE;
1068
1069 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1070 if(c->tx_dma_buf[0]==NULL)
1071 return -ENOBUFS;
1072
1073 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1074
1075
1076 spin_lock_irqsave(c->lock, cflags);
1077
1078 /*
1079 * Load the PIO receive ring
1080 */
1081
1082 z8530_rx_done(c);
1083 z8530_rx_done(c);
1084
1085 /*
1086 * Load the DMA interfaces up
1087 */
1088
1089 c->rxdma_on = 0;
1090 c->txdma_on = 0;
1091
1092 c->tx_dma_used=0;
1093 c->dma_num=0;
1094 c->dma_ready=1;
1095 c->dma_tx = 1;
1096
1097 /*
1098 * Enable DMA control mode
1099 */
1100
1101 /*
1102 * TX DMA via DIR/REQ
1103 */
1104 c->regs[R14]|= DTRREQ;
1105 write_zsreg(c, R14, c->regs[R14]);
1106
1107 c->regs[R1]&= ~TxINT_ENAB;
1108 write_zsreg(c, R1, c->regs[R1]);
1109
1110 /*
1111 * Set up the DMA configuration
1112 */
1113
1114 dflags = claim_dma_lock();
1115
1116 disable_dma(c->txdma);
1117 clear_dma_ff(c->txdma);
1118 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1119 disable_dma(c->txdma);
1120
1121 release_dma_lock(dflags);
1122
1123 /*
1124 * Select the DMA interrupt handlers
1125 */
1126
1127 c->rxdma_on = 0;
1128 c->txdma_on = 1;
1129 c->tx_dma_used = 1;
1130
1131 c->irqs = &z8530_txdma_sync;
1132 z8530_rtsdtr(c,1);
1133 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1134 spin_unlock_irqrestore(c->lock, cflags);
1135
1136 return 0;
1137}
1138
1139EXPORT_SYMBOL(z8530_sync_txdma_open);
1140
1141/**
1142 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1143 * @dev: Network device to detach
1144 * @c: Z8530 channel to move into discard mode
1145 *
1146 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1147 * and free the buffers.
1148 */
1149
1150int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1151{
1152 unsigned long dflags, cflags;
1153 u8 chk;
1154
1155
1156 spin_lock_irqsave(c->lock, cflags);
1157
1158 c->irqs = &z8530_nop;
1159 c->max = 0;
1160 c->sync = 0;
1161
1162 /*
1163 * Disable the PC DMA channels
1164 */
1165
1166 dflags = claim_dma_lock();
1167
1168 disable_dma(c->txdma);
1169 clear_dma_ff(c->txdma);
1170 c->txdma_on = 0;
1171 c->tx_dma_used = 0;
1172
1173 release_dma_lock(dflags);
1174
1175 /*
1176 * Disable DMA control mode
1177 */
1178
1179 c->regs[R1]&= ~WT_RDY_ENAB;
1180 write_zsreg(c, R1, c->regs[R1]);
1181 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1182 c->regs[R1]|= INT_ALL_Rx;
1183 write_zsreg(c, R1, c->regs[R1]);
1184 c->regs[R14]&= ~DTRREQ;
1185 write_zsreg(c, R14, c->regs[R14]);
1186
1187 if(c->tx_dma_buf[0])
1188 {
1189 free_page((unsigned long)c->tx_dma_buf[0]);
1190 c->tx_dma_buf[0]=NULL;
1191 }
1192 chk=read_zsreg(c,R0);
1193 write_zsreg(c, R3, c->regs[R3]);
1194 z8530_rtsdtr(c,0);
1195
1196 spin_unlock_irqrestore(c->lock, cflags);
1197 return 0;
1198}
1199
1200
1201EXPORT_SYMBOL(z8530_sync_txdma_close);
1202
1203
1204/*
1205 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1206 * it exists...
1207 */
1208
1209static char *z8530_type_name[]={
1210 "Z8530",
1211 "Z85C30",
1212 "Z85230"
1213};
1214
1215/**
1216 * z8530_describe - Uniformly describe a Z8530 port
1217 * @dev: Z8530 device to describe
1218 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1219 * @io: the port value in question
1220 *
1221 * Describe a Z8530 in a standard format. We must pass the I/O as
1222 * the port offset isnt predictable. The main reason for this function
1223 * is to try and get a common format of report.
1224 */
1225
1226void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1227{
1228 printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
1229 dev->name,
1230 z8530_type_name[dev->type],
1231 mapping,
1232 Z8530_PORT_OF(io),
1233 dev->irq);
1234}
1235
1236EXPORT_SYMBOL(z8530_describe);
1237
1238/*
1239 * Locked operation part of the z8530 init code
1240 */
1241
1242static inline int do_z8530_init(struct z8530_dev *dev)
1243{
1244 /* NOP the interrupt handlers first - we might get a
1245 floating IRQ transition when we reset the chip */
1246 dev->chanA.irqs=&z8530_nop;
1247 dev->chanB.irqs=&z8530_nop;
1248 dev->chanA.dcdcheck=DCD;
1249 dev->chanB.dcdcheck=DCD;
1250
1251 /* Reset the chip */
1252 write_zsreg(&dev->chanA, R9, 0xC0);
1253 udelay(200);
1254 /* Now check its valid */
1255 write_zsreg(&dev->chanA, R12, 0xAA);
1256 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1257 return -ENODEV;
1258 write_zsreg(&dev->chanA, R12, 0x55);
1259 if(read_zsreg(&dev->chanA, R12)!=0x55)
1260 return -ENODEV;
1261
1262 dev->type=Z8530;
1263
1264 /*
1265 * See the application note.
1266 */
1267
1268 write_zsreg(&dev->chanA, R15, 0x01);
1269
1270 /*
1271 * If we can set the low bit of R15 then
1272 * the chip is enhanced.
1273 */
1274
1275 if(read_zsreg(&dev->chanA, R15)==0x01)
1276 {
1277 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1278 /* Put a char in the fifo */
1279 write_zsreg(&dev->chanA, R8, 0);
1280 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1281 dev->type = Z85230; /* Has a FIFO */
1282 else
1283 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1284 }
1285
1286 /*
1287 * The code assumes R7' and friends are
1288 * off. Use write_zsext() for these and keep
1289 * this bit clear.
1290 */
1291
1292 write_zsreg(&dev->chanA, R15, 0);
1293
1294 /*
1295 * At this point it looks like the chip is behaving
1296 */
1297
1298 memcpy(dev->chanA.regs, reg_init, 16);
1299 memcpy(dev->chanB.regs, reg_init ,16);
1300
1301 return 0;
1302}
1303
1304/**
1305 * z8530_init - Initialise a Z8530 device
1306 * @dev: Z8530 device to initialise.
1307 *
1308 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1309 * is present, identify the type and then program it to hopefully
1310 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1311 * state will sometimes get into stupid modes generating 10Khz
1312 * interrupt streams and the like.
1313 *
1314 * We set the interrupt handler up to discard any events, in case
1315 * we get them during reset or setp.
1316 *
1317 * Return 0 for success, or a negative value indicating the problem
1318 * in errno form.
1319 */
1320
1321int z8530_init(struct z8530_dev *dev)
1322{
1323 unsigned long flags;
1324 int ret;
1325
1326 /* Set up the chip level lock */
1327 spin_lock_init(&dev->lock);
1328 dev->chanA.lock = &dev->lock;
1329 dev->chanB.lock = &dev->lock;
1330
1331 spin_lock_irqsave(&dev->lock, flags);
1332 ret = do_z8530_init(dev);
1333 spin_unlock_irqrestore(&dev->lock, flags);
1334
1335 return ret;
1336}
1337
1338
1339EXPORT_SYMBOL(z8530_init);
1340
1341/**
1342 * z8530_shutdown - Shutdown a Z8530 device
1343 * @dev: The Z8530 chip to shutdown
1344 *
1345 * We set the interrupt handlers to silence any interrupts. We then
1346 * reset the chip and wait 100uS to be sure the reset completed. Just
1347 * in case the caller then tries to do stuff.
1348 *
1349 * This is called without the lock held
1350 */
1351
1352int z8530_shutdown(struct z8530_dev *dev)
1353{
1354 unsigned long flags;
1355 /* Reset the chip */
1356
1357 spin_lock_irqsave(&dev->lock, flags);
1358 dev->chanA.irqs=&z8530_nop;
1359 dev->chanB.irqs=&z8530_nop;
1360 write_zsreg(&dev->chanA, R9, 0xC0);
1361 /* We must lock the udelay, the chip is offlimits here */
1362 udelay(100);
1363 spin_unlock_irqrestore(&dev->lock, flags);
1364 return 0;
1365}
1366
1367EXPORT_SYMBOL(z8530_shutdown);
1368
1369/**
1370 * z8530_channel_load - Load channel data
1371 * @c: Z8530 channel to configure
1372 * @rtable: table of register, value pairs
1373 * FIXME: ioctl to allow user uploaded tables
1374 *
1375 * Load a Z8530 channel up from the system data. We use +16 to
1376 * indicate the "prime" registers. The value 255 terminates the
1377 * table.
1378 */
1379
1380int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1381{
1382 unsigned long flags;
1383
1384 spin_lock_irqsave(c->lock, flags);
1385
1386 while(*rtable!=255)
1387 {
1388 int reg=*rtable++;
1389 if(reg>0x0F)
1390 write_zsreg(c, R15, c->regs[15]|1);
1391 write_zsreg(c, reg&0x0F, *rtable);
1392 if(reg>0x0F)
1393 write_zsreg(c, R15, c->regs[15]&~1);
1394 c->regs[reg]=*rtable++;
1395 }
1396 c->rx_function=z8530_null_rx;
1397 c->skb=NULL;
1398 c->tx_skb=NULL;
1399 c->tx_next_skb=NULL;
1400 c->mtu=1500;
1401 c->max=0;
1402 c->count=0;
1403 c->status=read_zsreg(c, R0);
1404 c->sync=1;
1405 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1406
1407 spin_unlock_irqrestore(c->lock, flags);
1408 return 0;
1409}
1410
1411EXPORT_SYMBOL(z8530_channel_load);
1412
1413
1414/**
1415 * z8530_tx_begin - Begin packet transmission
1416 * @c: The Z8530 channel to kick
1417 *
1418 * This is the speed sensitive side of transmission. If we are called
1419 * and no buffer is being transmitted we commence the next buffer. If
1420 * nothing is queued we idle the sync.
1421 *
1422 * Note: We are handling this code path in the interrupt path, keep it
1423 * fast or bad things will happen.
1424 *
1425 * Called with the lock held.
1426 */
1427
1428static void z8530_tx_begin(struct z8530_channel *c)
1429{
1430 unsigned long flags;
1431 if(c->tx_skb)
1432 return;
1433
1434 c->tx_skb=c->tx_next_skb;
1435 c->tx_next_skb=NULL;
1436 c->tx_ptr=c->tx_next_ptr;
1437
1438 if(c->tx_skb==NULL)
1439 {
1440 /* Idle on */
1441 if(c->dma_tx)
1442 {
1443 flags=claim_dma_lock();
1444 disable_dma(c->txdma);
1445 /*
1446 * Check if we crapped out.
1447 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001448 if (get_dma_residue(c->txdma))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 {
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001450 c->netdevice->stats.tx_dropped++;
1451 c->netdevice->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 }
1453 release_dma_lock(flags);
1454 }
1455 c->txcount=0;
1456 }
1457 else
1458 {
1459 c->txcount=c->tx_skb->len;
1460
1461
1462 if(c->dma_tx)
1463 {
1464 /*
1465 * FIXME. DMA is broken for the original 8530,
1466 * on the older parts we need to set a flag and
1467 * wait for a further TX interrupt to fire this
1468 * stage off
1469 */
1470
1471 flags=claim_dma_lock();
1472 disable_dma(c->txdma);
1473
1474 /*
1475 * These two are needed by the 8530/85C30
1476 * and must be issued when idling.
1477 */
1478
1479 if(c->dev->type!=Z85230)
1480 {
1481 write_zsctrl(c, RES_Tx_CRC);
1482 write_zsctrl(c, RES_EOM_L);
1483 }
1484 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1485 clear_dma_ff(c->txdma);
1486 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1487 set_dma_count(c->txdma, c->txcount);
1488 enable_dma(c->txdma);
1489 release_dma_lock(flags);
1490 write_zsctrl(c, RES_EOM_L);
1491 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1492 }
1493 else
1494 {
1495
1496 /* ABUNDER off */
1497 write_zsreg(c, R10, c->regs[10]);
1498 write_zsctrl(c, RES_Tx_CRC);
1499
1500 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1501 {
1502 write_zsreg(c, R8, *c->tx_ptr++);
1503 c->txcount--;
1504 }
1505
1506 }
1507 }
1508 /*
1509 * Since we emptied tx_skb we can ask for more
1510 */
1511 netif_wake_queue(c->netdevice);
1512}
1513
1514/**
1515 * z8530_tx_done - TX complete callback
1516 * @c: The channel that completed a transmit.
1517 *
1518 * This is called when we complete a packet send. We wake the queue,
1519 * start the next packet going and then free the buffer of the existing
1520 * packet. This code is fairly timing sensitive.
1521 *
1522 * Called with the register lock held.
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001523 */
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525static void z8530_tx_done(struct z8530_channel *c)
1526{
1527 struct sk_buff *skb;
1528
1529 /* Actually this can happen.*/
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001530 if (c->tx_skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 return;
1532
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001533 skb = c->tx_skb;
1534 c->tx_skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 z8530_tx_begin(c);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001536 c->netdevice->stats.tx_packets++;
1537 c->netdevice->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 dev_kfree_skb_irq(skb);
1539}
1540
1541/**
1542 * z8530_null_rx - Discard a packet
1543 * @c: The channel the packet arrived on
1544 * @skb: The buffer
1545 *
1546 * We point the receive handler at this function when idle. Instead
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001547 * of processing the frames we get to throw them away.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 */
1549
1550void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1551{
1552 dev_kfree_skb_any(skb);
1553}
1554
1555EXPORT_SYMBOL(z8530_null_rx);
1556
1557/**
1558 * z8530_rx_done - Receive completion callback
1559 * @c: The channel that completed a receive
1560 *
1561 * A new packet is complete. Our goal here is to get back into receive
1562 * mode as fast as possible. On the Z85230 we could change to using
1563 * ESCC mode, but on the older chips we have no choice. We flip to the
1564 * new buffer immediately in DMA mode so that the DMA of the next
1565 * frame can occur while we are copying the previous buffer to an sk_buff
1566 *
1567 * Called with the lock held
1568 */
1569
1570static void z8530_rx_done(struct z8530_channel *c)
1571{
1572 struct sk_buff *skb;
1573 int ct;
1574
1575 /*
1576 * Is our receive engine in DMA mode
1577 */
1578
1579 if(c->rxdma_on)
1580 {
1581 /*
1582 * Save the ready state and the buffer currently
1583 * being used as the DMA target
1584 */
1585
1586 int ready=c->dma_ready;
1587 unsigned char *rxb=c->rx_buf[c->dma_num];
1588 unsigned long flags;
1589
1590 /*
1591 * Complete this DMA. Neccessary to find the length
1592 */
1593
1594 flags=claim_dma_lock();
1595
1596 disable_dma(c->rxdma);
1597 clear_dma_ff(c->rxdma);
1598 c->rxdma_on=0;
1599 ct=c->mtu-get_dma_residue(c->rxdma);
1600 if(ct<0)
1601 ct=2; /* Shit happens.. */
1602 c->dma_ready=0;
1603
1604 /*
1605 * Normal case: the other slot is free, start the next DMA
1606 * into it immediately.
1607 */
1608
1609 if(ready)
1610 {
1611 c->dma_num^=1;
1612 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1613 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1614 set_dma_count(c->rxdma, c->mtu);
1615 c->rxdma_on = 1;
1616 enable_dma(c->rxdma);
1617 /* Stop any frames that we missed the head of
1618 from passing */
1619 write_zsreg(c, R0, RES_Rx_CRC);
1620 }
1621 else
1622 /* Can't occur as we dont reenable the DMA irq until
1623 after the flip is done */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001624 printk(KERN_WARNING "%s: DMA flip overrun!\n",
1625 c->netdevice->name);
1626
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 release_dma_lock(flags);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001628
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 /*
1630 * Shove the old buffer into an sk_buff. We can't DMA
1631 * directly into one on a PC - it might be above the 16Mb
1632 * boundary. Optimisation - we could check to see if we
1633 * can avoid the copy. Optimisation 2 - make the memcpy
1634 * a copychecksum.
1635 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001636
1637 skb = dev_alloc_skb(ct);
1638 if (skb == NULL) {
1639 c->netdevice->stats.rx_dropped++;
1640 printk(KERN_WARNING "%s: Memory squeeze.\n",
1641 c->netdevice->name);
1642 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 skb_put(skb, ct);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001644 skb_copy_to_linear_data(skb, rxb, ct);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001645 c->netdevice->stats.rx_packets++;
1646 c->netdevice->stats.rx_bytes += ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 }
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001648 c->dma_ready = 1;
1649 } else {
1650 RT_LOCK;
1651 skb = c->skb;
1652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 /*
1654 * The game we play for non DMA is similar. We want to
1655 * get the controller set up for the next packet as fast
1656 * as possible. We potentially only have one byte + the
1657 * fifo length for this. Thus we want to flip to the new
1658 * buffer and then mess around copying and allocating
1659 * things. For the current case it doesn't matter but
1660 * if you build a system where the sync irq isnt blocked
1661 * by the kernel IRQ disable then you need only block the
1662 * sync IRQ for the RT_LOCK area.
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001663 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 */
1665 ct=c->count;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 c->skb = c->skb2;
1668 c->count = 0;
1669 c->max = c->mtu;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001670 if (c->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 c->dptr = c->skb->data;
1672 c->max = c->mtu;
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001673 } else {
1674 c->count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 c->max = 0;
1676 }
1677 RT_UNLOCK;
1678
1679 c->skb2 = dev_alloc_skb(c->mtu);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001680 if (c->skb2 == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 printk(KERN_WARNING "%s: memory squeeze.\n",
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001682 c->netdevice->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 else
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001684 skb_put(c->skb2, c->mtu);
1685 c->netdevice->stats.rx_packets++;
1686 c->netdevice->stats.rx_bytes += ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 }
1688 /*
1689 * If we received a frame we must now process it.
1690 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001691 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 skb_trim(skb, ct);
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001693 c->rx_function(c, skb);
1694 } else {
1695 c->netdevice->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1697 }
1698}
1699
1700/**
1701 * spans_boundary - Check a packet can be ISA DMA'd
1702 * @skb: The buffer to check
1703 *
1704 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1705 * thing can only DMA within a 64K block not across the edges of it.
1706 */
Krzysztof Hałasa52e8a6a2008-07-02 17:47:52 +02001707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708static inline int spans_boundary(struct sk_buff *skb)
1709{
1710 unsigned long a=(unsigned long)skb->data;
1711 a^=(a+skb->len);
1712 if(a&0x00010000) /* If the 64K bit is different.. */
1713 return 1;
1714 return 0;
1715}
1716
1717/**
1718 * z8530_queue_xmit - Queue a packet
1719 * @c: The channel to use
1720 * @skb: The packet to kick down the channel
1721 *
1722 * Queue a packet for transmission. Because we have rather
1723 * hard to hit interrupt latencies for the Z85230 per packet
1724 * even in DMA mode we do the flip to DMA buffer if needed here
1725 * not in the IRQ.
1726 *
1727 * Called from the network code. The lock is not held at this
1728 * point.
1729 */
1730
Stephen Hemmingerd71a6742009-08-31 19:50:47 +00001731netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732{
1733 unsigned long flags;
1734
1735 netif_stop_queue(c->netdevice);
1736 if(c->tx_next_skb)
Stephen Hemmingerd71a6742009-08-31 19:50:47 +00001737 return NETDEV_TX_BUSY;
1738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
1740 /* PC SPECIFIC - DMA limits */
1741
1742 /*
1743 * If we will DMA the transmit and its gone over the ISA bus
1744 * limit, then copy to the flip buffer
1745 */
1746
1747 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1748 {
1749 /*
1750 * Send the flip buffer, and flip the flippy bit.
1751 * We don't care which is used when just so long as
1752 * we never use the same buffer twice in a row. Since
1753 * only one buffer can be going out at a time the other
1754 * has to be safe.
1755 */
1756 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1757 c->tx_dma_used^=1; /* Flip temp buffer */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001758 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 }
1760 else
1761 c->tx_next_ptr=skb->data;
1762 RT_LOCK;
1763 c->tx_next_skb=skb;
1764 RT_UNLOCK;
1765
1766 spin_lock_irqsave(c->lock, flags);
1767 z8530_tx_begin(c);
1768 spin_unlock_irqrestore(c->lock, flags);
1769
Stephen Hemmingerd71a6742009-08-31 19:50:47 +00001770 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772
1773EXPORT_SYMBOL(z8530_queue_xmit);
1774
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775/*
1776 * Module support
1777 */
Hannes Eder5ee0d592009-02-14 11:48:07 +00001778static const char banner[] __initdata =
1779 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
1781static int __init z85230_init_driver(void)
1782{
1783 printk(banner);
1784 return 0;
1785}
1786module_init(z85230_init_driver);
1787
1788static void __exit z85230_cleanup_driver(void)
1789{
1790}
1791module_exit(z85230_cleanup_driver);
1792
1793MODULE_AUTHOR("Red Hat Inc.");
1794MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1795MODULE_LICENSE("GPL");