blob: ea2d54be4843e07c17efc83ebf14a6677e599a5d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/char/synclink.c
3 *
Paul Fulghum4a918bc2005-09-09 13:02:12 -07004 * $Id: synclink.c,v 4.37 2005/09/07 13:13:19 paulkf Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#if defined(__i386__)
57# define BREAKPOINT() asm(" int $3");
58#else
59# define BREAKPOINT() { }
60#endif
61
62#define MAX_ISA_DEVICES 10
63#define MAX_PCI_DEVICES 10
64#define MAX_TOTAL_DEVICES 20
65
66#include <linux/config.h>
67#include <linux/module.h>
68#include <linux/errno.h>
69#include <linux/signal.h>
70#include <linux/sched.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <linux/pci.h>
74#include <linux/tty.h>
75#include <linux/tty_flip.h>
76#include <linux/serial.h>
77#include <linux/major.h>
78#include <linux/string.h>
79#include <linux/fcntl.h>
80#include <linux/ptrace.h>
81#include <linux/ioport.h>
82#include <linux/mm.h>
83#include <linux/slab.h>
84#include <linux/delay.h>
85
86#include <linux/netdevice.h>
87
88#include <linux/vmalloc.h>
89#include <linux/init.h>
90#include <asm/serial.h>
91
92#include <linux/delay.h>
93#include <linux/ioctl.h>
94
95#include <asm/system.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98#include <asm/dma.h>
99#include <linux/bitops.h>
100#include <asm/types.h>
101#include <linux/termios.h>
102#include <linux/workqueue.h>
103#include <linux/hdlc.h>
104
105#ifdef CONFIG_HDLC_MODULE
106#define CONFIG_HDLC 1
107#endif
108
109#define GET_USER(error,value,addr) error = get_user(value,addr)
110#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
111#define PUT_USER(error,value,addr) error = put_user(value,addr)
112#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
113
114#include <asm/uaccess.h>
115
116#include "linux/synclink.h"
117
118#define RCLRVALUE 0xffff
119
120static MGSL_PARAMS default_params = {
121 MGSL_MODE_HDLC, /* unsigned long mode */
122 0, /* unsigned char loopback; */
123 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
124 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
125 0, /* unsigned long clock_speed; */
126 0xff, /* unsigned char addr_filter; */
127 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
128 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
129 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
130 9600, /* unsigned long data_rate; */
131 8, /* unsigned char data_bits; */
132 1, /* unsigned char stop_bits; */
133 ASYNC_PARITY_NONE /* unsigned char parity; */
134};
135
136#define SHARED_MEM_ADDRESS_SIZE 0x40000
137#define BUFFERLISTSIZE (PAGE_SIZE)
138#define DMABUFFERSIZE (PAGE_SIZE)
139#define MAXRXFRAMES 7
140
141typedef struct _DMABUFFERENTRY
142{
143 u32 phys_addr; /* 32-bit flat physical address of data buffer */
Paul Fulghum4a918bc2005-09-09 13:02:12 -0700144 volatile u16 count; /* buffer size/data count */
145 volatile u16 status; /* Control/status field */
146 volatile u16 rcc; /* character count field */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 u16 reserved; /* padding required by 16C32 */
148 u32 link; /* 32-bit flat link to next buffer entry */
149 char *virt_addr; /* virtual address of data buffer */
150 u32 phys_entry; /* physical address of this buffer entry */
151} DMABUFFERENTRY, *DMAPBUFFERENTRY;
152
153/* The queue of BH actions to be performed */
154
155#define BH_RECEIVE 1
156#define BH_TRANSMIT 2
157#define BH_STATUS 4
158
159#define IO_PIN_SHUTDOWN_LIMIT 100
160
161#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
162
163struct _input_signal_events {
164 int ri_up;
165 int ri_down;
166 int dsr_up;
167 int dsr_down;
168 int dcd_up;
169 int dcd_down;
170 int cts_up;
171 int cts_down;
172};
173
174/* transmit holding buffer definitions*/
175#define MAX_TX_HOLDING_BUFFERS 5
176struct tx_holding_buffer {
177 int buffer_size;
178 unsigned char * buffer;
179};
180
181
182/*
183 * Device instance data structure
184 */
185
186struct mgsl_struct {
187 int magic;
188 int flags;
189 int count; /* count of opens */
190 int line;
191 int hw_version;
192 unsigned short close_delay;
193 unsigned short closing_wait; /* time to wait before closing */
194
195 struct mgsl_icount icount;
196
197 struct tty_struct *tty;
198 int timeout;
199 int x_char; /* xon/xoff character */
200 int blocked_open; /* # of blocked opens */
201 u16 read_status_mask;
202 u16 ignore_status_mask;
203 unsigned char *xmit_buf;
204 int xmit_head;
205 int xmit_tail;
206 int xmit_cnt;
207
208 wait_queue_head_t open_wait;
209 wait_queue_head_t close_wait;
210
211 wait_queue_head_t status_event_wait_q;
212 wait_queue_head_t event_wait_q;
213 struct timer_list tx_timer; /* HDLC transmit timeout timer */
214 struct mgsl_struct *next_device; /* device list link */
215
216 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
217 struct work_struct task; /* task structure for scheduling bh */
218
219 u32 EventMask; /* event trigger mask */
220 u32 RecordedEvents; /* pending events */
221
222 u32 max_frame_size; /* as set by device config */
223
224 u32 pending_bh;
225
226 int bh_running; /* Protection from multiple */
227 int isr_overflow;
228 int bh_requested;
229
230 int dcd_chkcount; /* check counts to prevent */
231 int cts_chkcount; /* too many IRQs if a signal */
232 int dsr_chkcount; /* is floating */
233 int ri_chkcount;
234
235 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
236 unsigned long buffer_list_phys;
237
238 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
239 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
240 unsigned int current_rx_buffer;
241
242 int num_tx_dma_buffers; /* number of tx dma frames required */
243 int tx_dma_buffers_used;
244 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
245 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
246 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
247 int current_tx_buffer; /* next tx dma buffer to be loaded */
248
249 unsigned char *intermediate_rxbuffer;
250
251 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
252 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
253 int put_tx_holding_index; /* next tx holding buffer to store user request */
254 int tx_holding_count; /* number of tx holding buffers waiting */
255 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
256
257 int rx_enabled;
258 int rx_overflow;
259 int rx_rcc_underrun;
260
261 int tx_enabled;
262 int tx_active;
263 u32 idle_mode;
264
265 u16 cmr_value;
266 u16 tcsr_value;
267
268 char device_name[25]; /* device instance name */
269
270 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
271 unsigned char bus; /* expansion bus number (zero based) */
272 unsigned char function; /* PCI device number */
273
274 unsigned int io_base; /* base I/O address of adapter */
275 unsigned int io_addr_size; /* size of the I/O address range */
276 int io_addr_requested; /* nonzero if I/O address requested */
277
278 unsigned int irq_level; /* interrupt level */
279 unsigned long irq_flags;
280 int irq_requested; /* nonzero if IRQ requested */
281
282 unsigned int dma_level; /* DMA channel */
283 int dma_requested; /* nonzero if dma channel requested */
284
285 u16 mbre_bit;
286 u16 loopback_bits;
287 u16 usc_idle_mode;
288
289 MGSL_PARAMS params; /* communications parameters */
290
291 unsigned char serial_signals; /* current serial signal states */
292
293 int irq_occurred; /* for diagnostics use */
294 unsigned int init_error; /* Initialization startup error (DIAGS) */
295 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
296
297 u32 last_mem_alloc;
298 unsigned char* memory_base; /* shared memory address (PCI only) */
299 u32 phys_memory_base;
300 int shared_mem_requested;
301
302 unsigned char* lcr_base; /* local config registers (PCI only) */
303 u32 phys_lcr_base;
304 u32 lcr_offset;
305 int lcr_mem_requested;
306
307 u32 misc_ctrl_value;
308 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
309 char char_buf[MAX_ASYNC_BUFFER_SIZE];
310 BOOLEAN drop_rts_on_tx_done;
311
312 BOOLEAN loopmode_insert_requested;
313 BOOLEAN loopmode_send_done_requested;
314
315 struct _input_signal_events input_signal_events;
316
317 /* generic HDLC device parts */
318 int netcount;
319 int dosyncppp;
320 spinlock_t netlock;
321
322#ifdef CONFIG_HDLC
323 struct net_device *netdev;
324#endif
325};
326
327#define MGSL_MAGIC 0x5401
328
329/*
330 * The size of the serial xmit buffer is 1 page, or 4096 bytes
331 */
332#ifndef SERIAL_XMIT_SIZE
333#define SERIAL_XMIT_SIZE 4096
334#endif
335
336/*
337 * These macros define the offsets used in calculating the
338 * I/O address of the specified USC registers.
339 */
340
341
342#define DCPIN 2 /* Bit 1 of I/O address */
343#define SDPIN 4 /* Bit 2 of I/O address */
344
345#define DCAR 0 /* DMA command/address register */
346#define CCAR SDPIN /* channel command/address register */
347#define DATAREG DCPIN + SDPIN /* serial data register */
348#define MSBONLY 0x41
349#define LSBONLY 0x40
350
351/*
352 * These macros define the register address (ordinal number)
353 * used for writing address/value pairs to the USC.
354 */
355
356#define CMR 0x02 /* Channel mode Register */
357#define CCSR 0x04 /* Channel Command/status Register */
358#define CCR 0x06 /* Channel Control Register */
359#define PSR 0x08 /* Port status Register */
360#define PCR 0x0a /* Port Control Register */
361#define TMDR 0x0c /* Test mode Data Register */
362#define TMCR 0x0e /* Test mode Control Register */
363#define CMCR 0x10 /* Clock mode Control Register */
364#define HCR 0x12 /* Hardware Configuration Register */
365#define IVR 0x14 /* Interrupt Vector Register */
366#define IOCR 0x16 /* Input/Output Control Register */
367#define ICR 0x18 /* Interrupt Control Register */
368#define DCCR 0x1a /* Daisy Chain Control Register */
369#define MISR 0x1c /* Misc Interrupt status Register */
370#define SICR 0x1e /* status Interrupt Control Register */
371#define RDR 0x20 /* Receive Data Register */
372#define RMR 0x22 /* Receive mode Register */
373#define RCSR 0x24 /* Receive Command/status Register */
374#define RICR 0x26 /* Receive Interrupt Control Register */
375#define RSR 0x28 /* Receive Sync Register */
376#define RCLR 0x2a /* Receive count Limit Register */
377#define RCCR 0x2c /* Receive Character count Register */
378#define TC0R 0x2e /* Time Constant 0 Register */
379#define TDR 0x30 /* Transmit Data Register */
380#define TMR 0x32 /* Transmit mode Register */
381#define TCSR 0x34 /* Transmit Command/status Register */
382#define TICR 0x36 /* Transmit Interrupt Control Register */
383#define TSR 0x38 /* Transmit Sync Register */
384#define TCLR 0x3a /* Transmit count Limit Register */
385#define TCCR 0x3c /* Transmit Character count Register */
386#define TC1R 0x3e /* Time Constant 1 Register */
387
388
389/*
390 * MACRO DEFINITIONS FOR DMA REGISTERS
391 */
392
393#define DCR 0x06 /* DMA Control Register (shared) */
394#define DACR 0x08 /* DMA Array count Register (shared) */
395#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
396#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
397#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
398#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
399#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
400
401#define TDMR 0x02 /* Transmit DMA mode Register */
402#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
403#define TBCR 0x2a /* Transmit Byte count Register */
404#define TARL 0x2c /* Transmit Address Register (low) */
405#define TARU 0x2e /* Transmit Address Register (high) */
406#define NTBCR 0x3a /* Next Transmit Byte count Register */
407#define NTARL 0x3c /* Next Transmit Address Register (low) */
408#define NTARU 0x3e /* Next Transmit Address Register (high) */
409
410#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
411#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
412#define RBCR 0xaa /* Receive Byte count Register */
413#define RARL 0xac /* Receive Address Register (low) */
414#define RARU 0xae /* Receive Address Register (high) */
415#define NRBCR 0xba /* Next Receive Byte count Register */
416#define NRARL 0xbc /* Next Receive Address Register (low) */
417#define NRARU 0xbe /* Next Receive Address Register (high) */
418
419
420/*
421 * MACRO DEFINITIONS FOR MODEM STATUS BITS
422 */
423
424#define MODEMSTATUS_DTR 0x80
425#define MODEMSTATUS_DSR 0x40
426#define MODEMSTATUS_RTS 0x20
427#define MODEMSTATUS_CTS 0x10
428#define MODEMSTATUS_RI 0x04
429#define MODEMSTATUS_DCD 0x01
430
431
432/*
433 * Channel Command/Address Register (CCAR) Command Codes
434 */
435
436#define RTCmd_Null 0x0000
437#define RTCmd_ResetHighestIus 0x1000
438#define RTCmd_TriggerChannelLoadDma 0x2000
439#define RTCmd_TriggerRxDma 0x2800
440#define RTCmd_TriggerTxDma 0x3000
441#define RTCmd_TriggerRxAndTxDma 0x3800
442#define RTCmd_PurgeRxFifo 0x4800
443#define RTCmd_PurgeTxFifo 0x5000
444#define RTCmd_PurgeRxAndTxFifo 0x5800
445#define RTCmd_LoadRcc 0x6800
446#define RTCmd_LoadTcc 0x7000
447#define RTCmd_LoadRccAndTcc 0x7800
448#define RTCmd_LoadTC0 0x8800
449#define RTCmd_LoadTC1 0x9000
450#define RTCmd_LoadTC0AndTC1 0x9800
451#define RTCmd_SerialDataLSBFirst 0xa000
452#define RTCmd_SerialDataMSBFirst 0xa800
453#define RTCmd_SelectBigEndian 0xb000
454#define RTCmd_SelectLittleEndian 0xb800
455
456
457/*
458 * DMA Command/Address Register (DCAR) Command Codes
459 */
460
461#define DmaCmd_Null 0x0000
462#define DmaCmd_ResetTxChannel 0x1000
463#define DmaCmd_ResetRxChannel 0x1200
464#define DmaCmd_StartTxChannel 0x2000
465#define DmaCmd_StartRxChannel 0x2200
466#define DmaCmd_ContinueTxChannel 0x3000
467#define DmaCmd_ContinueRxChannel 0x3200
468#define DmaCmd_PauseTxChannel 0x4000
469#define DmaCmd_PauseRxChannel 0x4200
470#define DmaCmd_AbortTxChannel 0x5000
471#define DmaCmd_AbortRxChannel 0x5200
472#define DmaCmd_InitTxChannel 0x7000
473#define DmaCmd_InitRxChannel 0x7200
474#define DmaCmd_ResetHighestDmaIus 0x8000
475#define DmaCmd_ResetAllChannels 0x9000
476#define DmaCmd_StartAllChannels 0xa000
477#define DmaCmd_ContinueAllChannels 0xb000
478#define DmaCmd_PauseAllChannels 0xc000
479#define DmaCmd_AbortAllChannels 0xd000
480#define DmaCmd_InitAllChannels 0xf000
481
482#define TCmd_Null 0x0000
483#define TCmd_ClearTxCRC 0x2000
484#define TCmd_SelectTicrTtsaData 0x4000
485#define TCmd_SelectTicrTxFifostatus 0x5000
486#define TCmd_SelectTicrIntLevel 0x6000
487#define TCmd_SelectTicrdma_level 0x7000
488#define TCmd_SendFrame 0x8000
489#define TCmd_SendAbort 0x9000
490#define TCmd_EnableDleInsertion 0xc000
491#define TCmd_DisableDleInsertion 0xd000
492#define TCmd_ClearEofEom 0xe000
493#define TCmd_SetEofEom 0xf000
494
495#define RCmd_Null 0x0000
496#define RCmd_ClearRxCRC 0x2000
497#define RCmd_EnterHuntmode 0x3000
498#define RCmd_SelectRicrRtsaData 0x4000
499#define RCmd_SelectRicrRxFifostatus 0x5000
500#define RCmd_SelectRicrIntLevel 0x6000
501#define RCmd_SelectRicrdma_level 0x7000
502
503/*
504 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
505 */
506
507#define RECEIVE_STATUS BIT5
508#define RECEIVE_DATA BIT4
509#define TRANSMIT_STATUS BIT3
510#define TRANSMIT_DATA BIT2
511#define IO_PIN BIT1
512#define MISC BIT0
513
514
515/*
516 * Receive status Bits in Receive Command/status Register RCSR
517 */
518
519#define RXSTATUS_SHORT_FRAME BIT8
520#define RXSTATUS_CODE_VIOLATION BIT8
521#define RXSTATUS_EXITED_HUNT BIT7
522#define RXSTATUS_IDLE_RECEIVED BIT6
523#define RXSTATUS_BREAK_RECEIVED BIT5
524#define RXSTATUS_ABORT_RECEIVED BIT5
525#define RXSTATUS_RXBOUND BIT4
526#define RXSTATUS_CRC_ERROR BIT3
527#define RXSTATUS_FRAMING_ERROR BIT3
528#define RXSTATUS_ABORT BIT2
529#define RXSTATUS_PARITY_ERROR BIT2
530#define RXSTATUS_OVERRUN BIT1
531#define RXSTATUS_DATA_AVAILABLE BIT0
532#define RXSTATUS_ALL 0x01f6
533#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
534
535/*
536 * Values for setting transmit idle mode in
537 * Transmit Control/status Register (TCSR)
538 */
539#define IDLEMODE_FLAGS 0x0000
540#define IDLEMODE_ALT_ONE_ZERO 0x0100
541#define IDLEMODE_ZERO 0x0200
542#define IDLEMODE_ONE 0x0300
543#define IDLEMODE_ALT_MARK_SPACE 0x0500
544#define IDLEMODE_SPACE 0x0600
545#define IDLEMODE_MARK 0x0700
546#define IDLEMODE_MASK 0x0700
547
548/*
549 * IUSC revision identifiers
550 */
551#define IUSC_SL1660 0x4d44
552#define IUSC_PRE_SL1660 0x4553
553
554/*
555 * Transmit status Bits in Transmit Command/status Register (TCSR)
556 */
557
558#define TCSR_PRESERVE 0x0F00
559
560#define TCSR_UNDERWAIT BIT11
561#define TXSTATUS_PREAMBLE_SENT BIT7
562#define TXSTATUS_IDLE_SENT BIT6
563#define TXSTATUS_ABORT_SENT BIT5
564#define TXSTATUS_EOF_SENT BIT4
565#define TXSTATUS_EOM_SENT BIT4
566#define TXSTATUS_CRC_SENT BIT3
567#define TXSTATUS_ALL_SENT BIT2
568#define TXSTATUS_UNDERRUN BIT1
569#define TXSTATUS_FIFO_EMPTY BIT0
570#define TXSTATUS_ALL 0x00fa
571#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
572
573
574#define MISCSTATUS_RXC_LATCHED BIT15
575#define MISCSTATUS_RXC BIT14
576#define MISCSTATUS_TXC_LATCHED BIT13
577#define MISCSTATUS_TXC BIT12
578#define MISCSTATUS_RI_LATCHED BIT11
579#define MISCSTATUS_RI BIT10
580#define MISCSTATUS_DSR_LATCHED BIT9
581#define MISCSTATUS_DSR BIT8
582#define MISCSTATUS_DCD_LATCHED BIT7
583#define MISCSTATUS_DCD BIT6
584#define MISCSTATUS_CTS_LATCHED BIT5
585#define MISCSTATUS_CTS BIT4
586#define MISCSTATUS_RCC_UNDERRUN BIT3
587#define MISCSTATUS_DPLL_NO_SYNC BIT2
588#define MISCSTATUS_BRG1_ZERO BIT1
589#define MISCSTATUS_BRG0_ZERO BIT0
590
591#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
592#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
593
594#define SICR_RXC_ACTIVE BIT15
595#define SICR_RXC_INACTIVE BIT14
596#define SICR_RXC (BIT15+BIT14)
597#define SICR_TXC_ACTIVE BIT13
598#define SICR_TXC_INACTIVE BIT12
599#define SICR_TXC (BIT13+BIT12)
600#define SICR_RI_ACTIVE BIT11
601#define SICR_RI_INACTIVE BIT10
602#define SICR_RI (BIT11+BIT10)
603#define SICR_DSR_ACTIVE BIT9
604#define SICR_DSR_INACTIVE BIT8
605#define SICR_DSR (BIT9+BIT8)
606#define SICR_DCD_ACTIVE BIT7
607#define SICR_DCD_INACTIVE BIT6
608#define SICR_DCD (BIT7+BIT6)
609#define SICR_CTS_ACTIVE BIT5
610#define SICR_CTS_INACTIVE BIT4
611#define SICR_CTS (BIT5+BIT4)
612#define SICR_RCC_UNDERFLOW BIT3
613#define SICR_DPLL_NO_SYNC BIT2
614#define SICR_BRG1_ZERO BIT1
615#define SICR_BRG0_ZERO BIT0
616
617void usc_DisableMasterIrqBit( struct mgsl_struct *info );
618void usc_EnableMasterIrqBit( struct mgsl_struct *info );
619void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
620void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
621void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
622
623#define usc_EnableInterrupts( a, b ) \
624 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
625
626#define usc_DisableInterrupts( a, b ) \
627 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
628
629#define usc_EnableMasterIrqBit(a) \
630 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
631
632#define usc_DisableMasterIrqBit(a) \
633 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
634
635#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
636
637/*
638 * Transmit status Bits in Transmit Control status Register (TCSR)
639 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
640 */
641
642#define TXSTATUS_PREAMBLE_SENT BIT7
643#define TXSTATUS_IDLE_SENT BIT6
644#define TXSTATUS_ABORT_SENT BIT5
645#define TXSTATUS_EOF BIT4
646#define TXSTATUS_CRC_SENT BIT3
647#define TXSTATUS_ALL_SENT BIT2
648#define TXSTATUS_UNDERRUN BIT1
649#define TXSTATUS_FIFO_EMPTY BIT0
650
651#define DICR_MASTER BIT15
652#define DICR_TRANSMIT BIT0
653#define DICR_RECEIVE BIT1
654
655#define usc_EnableDmaInterrupts(a,b) \
656 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
657
658#define usc_DisableDmaInterrupts(a,b) \
659 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
660
661#define usc_EnableStatusIrqs(a,b) \
662 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
663
664#define usc_DisablestatusIrqs(a,b) \
665 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
666
667/* Transmit status Bits in Transmit Control status Register (TCSR) */
668/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
669
670
671#define DISABLE_UNCONDITIONAL 0
672#define DISABLE_END_OF_FRAME 1
673#define ENABLE_UNCONDITIONAL 2
674#define ENABLE_AUTO_CTS 3
675#define ENABLE_AUTO_DCD 3
676#define usc_EnableTransmitter(a,b) \
677 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
678#define usc_EnableReceiver(a,b) \
679 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
680
681static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
682static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
683static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
684
685static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
686static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
687static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
688void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
689void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
690
691#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
692#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
693
694#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
695
696static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
697static void usc_start_receiver( struct mgsl_struct *info );
698static void usc_stop_receiver( struct mgsl_struct *info );
699
700static void usc_start_transmitter( struct mgsl_struct *info );
701static void usc_stop_transmitter( struct mgsl_struct *info );
702static void usc_set_txidle( struct mgsl_struct *info );
703static void usc_load_txfifo( struct mgsl_struct *info );
704
705static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
706static void usc_enable_loopback( struct mgsl_struct *info, int enable );
707
708static void usc_get_serial_signals( struct mgsl_struct *info );
709static void usc_set_serial_signals( struct mgsl_struct *info );
710
711static void usc_reset( struct mgsl_struct *info );
712
713static void usc_set_sync_mode( struct mgsl_struct *info );
714static void usc_set_sdlc_mode( struct mgsl_struct *info );
715static void usc_set_async_mode( struct mgsl_struct *info );
716static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
717
718static void usc_loopback_frame( struct mgsl_struct *info );
719
720static void mgsl_tx_timeout(unsigned long context);
721
722
723static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
724static void usc_loopmode_insert_request( struct mgsl_struct * info );
725static int usc_loopmode_active( struct mgsl_struct * info);
726static void usc_loopmode_send_done( struct mgsl_struct * info );
727
728static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
729
730#ifdef CONFIG_HDLC
731#define dev_to_port(D) (dev_to_hdlc(D)->priv)
732static void hdlcdev_tx_done(struct mgsl_struct *info);
733static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
734static int hdlcdev_init(struct mgsl_struct *info);
735static void hdlcdev_exit(struct mgsl_struct *info);
736#endif
737
738/*
739 * Defines a BUS descriptor value for the PCI adapter
740 * local bus address ranges.
741 */
742
743#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
744(0x00400020 + \
745((WrHold) << 30) + \
746((WrDly) << 28) + \
747((RdDly) << 26) + \
748((Nwdd) << 20) + \
749((Nwad) << 15) + \
750((Nxda) << 13) + \
751((Nrdd) << 11) + \
752((Nrad) << 6) )
753
754static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
755
756/*
757 * Adapter diagnostic routines
758 */
759static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
760static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
761static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
762static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
763static int mgsl_adapter_test( struct mgsl_struct *info );
764
765/*
766 * device and resource management routines
767 */
768static int mgsl_claim_resources(struct mgsl_struct *info);
769static void mgsl_release_resources(struct mgsl_struct *info);
770static void mgsl_add_device(struct mgsl_struct *info);
771static struct mgsl_struct* mgsl_allocate_device(void);
772
773/*
774 * DMA buffer manupulation functions.
775 */
776static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
777static int mgsl_get_rx_frame( struct mgsl_struct *info );
778static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
779static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
780static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
781static int num_free_tx_dma_buffers(struct mgsl_struct *info);
782static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
783static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
784
785/*
786 * DMA and Shared Memory buffer allocation and formatting
787 */
788static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
789static void mgsl_free_dma_buffers(struct mgsl_struct *info);
790static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
791static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
792static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
793static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
794static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
795static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
796static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
797static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
798static int load_next_tx_holding_buffer(struct mgsl_struct *info);
799static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
800
801/*
802 * Bottom half interrupt handlers
803 */
804static void mgsl_bh_handler(void* Context);
805static void mgsl_bh_receive(struct mgsl_struct *info);
806static void mgsl_bh_transmit(struct mgsl_struct *info);
807static void mgsl_bh_status(struct mgsl_struct *info);
808
809/*
810 * Interrupt handler routines and dispatch table.
811 */
812static void mgsl_isr_null( struct mgsl_struct *info );
813static void mgsl_isr_transmit_data( struct mgsl_struct *info );
814static void mgsl_isr_receive_data( struct mgsl_struct *info );
815static void mgsl_isr_receive_status( struct mgsl_struct *info );
816static void mgsl_isr_transmit_status( struct mgsl_struct *info );
817static void mgsl_isr_io_pin( struct mgsl_struct *info );
818static void mgsl_isr_misc( struct mgsl_struct *info );
819static void mgsl_isr_receive_dma( struct mgsl_struct *info );
820static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
821
822typedef void (*isr_dispatch_func)(struct mgsl_struct *);
823
824static isr_dispatch_func UscIsrTable[7] =
825{
826 mgsl_isr_null,
827 mgsl_isr_misc,
828 mgsl_isr_io_pin,
829 mgsl_isr_transmit_data,
830 mgsl_isr_transmit_status,
831 mgsl_isr_receive_data,
832 mgsl_isr_receive_status
833};
834
835/*
836 * ioctl call handlers
837 */
838static int tiocmget(struct tty_struct *tty, struct file *file);
839static int tiocmset(struct tty_struct *tty, struct file *file,
840 unsigned int set, unsigned int clear);
841static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
842 __user *user_icount);
843static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
844static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
845static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
846static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
847static int mgsl_txenable(struct mgsl_struct * info, int enable);
848static int mgsl_txabort(struct mgsl_struct * info);
849static int mgsl_rxenable(struct mgsl_struct * info, int enable);
850static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
851static int mgsl_loopmode_send_done( struct mgsl_struct * info );
852
853/* set non-zero on successful registration with PCI subsystem */
854static int pci_registered;
855
856/*
857 * Global linked list of SyncLink devices
858 */
859static struct mgsl_struct *mgsl_device_list;
860static int mgsl_device_count;
861
862/*
863 * Set this param to non-zero to load eax with the
864 * .text section address and breakpoint on module load.
865 * This is useful for use with gdb and add-symbol-file command.
866 */
867static int break_on_load;
868
869/*
870 * Driver major number, defaults to zero to get auto
871 * assigned major number. May be forced as module parameter.
872 */
873static int ttymajor;
874
875/*
876 * Array of user specified options for ISA adapters.
877 */
878static int io[MAX_ISA_DEVICES];
879static int irq[MAX_ISA_DEVICES];
880static int dma[MAX_ISA_DEVICES];
881static int debug_level;
882static int maxframe[MAX_TOTAL_DEVICES];
883static int dosyncppp[MAX_TOTAL_DEVICES];
884static int txdmabufs[MAX_TOTAL_DEVICES];
885static int txholdbufs[MAX_TOTAL_DEVICES];
886
887module_param(break_on_load, bool, 0);
888module_param(ttymajor, int, 0);
889module_param_array(io, int, NULL, 0);
890module_param_array(irq, int, NULL, 0);
891module_param_array(dma, int, NULL, 0);
892module_param(debug_level, int, 0);
893module_param_array(maxframe, int, NULL, 0);
894module_param_array(dosyncppp, int, NULL, 0);
895module_param_array(txdmabufs, int, NULL, 0);
896module_param_array(txholdbufs, int, NULL, 0);
897
898static char *driver_name = "SyncLink serial driver";
Paul Fulghum4a918bc2005-09-09 13:02:12 -0700899static char *driver_version = "$Revision: 4.37 $";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
901static int synclink_init_one (struct pci_dev *dev,
902 const struct pci_device_id *ent);
903static void synclink_remove_one (struct pci_dev *dev);
904
905static struct pci_device_id synclink_pci_tbl[] = {
906 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
907 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
908 { 0, }, /* terminate list */
909};
910MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
911
912MODULE_LICENSE("GPL");
913
914static struct pci_driver synclink_pci_driver = {
915 .name = "synclink",
916 .id_table = synclink_pci_tbl,
917 .probe = synclink_init_one,
918 .remove = __devexit_p(synclink_remove_one),
919};
920
921static struct tty_driver *serial_driver;
922
923/* number of characters left in xmit buffer before we ask for more */
924#define WAKEUP_CHARS 256
925
926
927static void mgsl_change_params(struct mgsl_struct *info);
928static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
929
930/*
931 * 1st function defined in .text section. Calling this function in
932 * init_module() followed by a breakpoint allows a remote debugger
933 * (gdb) to get the .text address for the add-symbol-file command.
934 * This allows remote debugging of dynamically loadable modules.
935 */
936static void* mgsl_get_text_ptr(void)
937{
938 return mgsl_get_text_ptr;
939}
940
941/*
942 * tmp_buf is used as a temporary buffer by mgsl_write. We need to
943 * lock it in case the COPY_FROM_USER blocks while swapping in a page,
944 * and some other program tries to do a serial write at the same time.
945 * Since the lock will only come under contention when the system is
946 * swapping and available memory is low, it makes sense to share one
947 * buffer across all the serial ioports, since it significantly saves
948 * memory if large numbers of serial ports are open.
949 */
950static unsigned char *tmp_buf;
951static DECLARE_MUTEX(tmp_buf_sem);
952
953static inline int mgsl_paranoia_check(struct mgsl_struct *info,
954 char *name, const char *routine)
955{
956#ifdef MGSL_PARANOIA_CHECK
957 static const char *badmagic =
958 "Warning: bad magic number for mgsl struct (%s) in %s\n";
959 static const char *badinfo =
960 "Warning: null mgsl_struct for (%s) in %s\n";
961
962 if (!info) {
963 printk(badinfo, name, routine);
964 return 1;
965 }
966 if (info->magic != MGSL_MAGIC) {
967 printk(badmagic, name, routine);
968 return 1;
969 }
970#else
971 if (!info)
972 return 1;
973#endif
974 return 0;
975}
976
977/**
978 * line discipline callback wrappers
979 *
980 * The wrappers maintain line discipline references
981 * while calling into the line discipline.
982 *
983 * ldisc_receive_buf - pass receive data to line discipline
984 */
985
986static void ldisc_receive_buf(struct tty_struct *tty,
987 const __u8 *data, char *flags, int count)
988{
989 struct tty_ldisc *ld;
990 if (!tty)
991 return;
992 ld = tty_ldisc_ref(tty);
993 if (ld) {
994 if (ld->receive_buf)
995 ld->receive_buf(tty, data, flags, count);
996 tty_ldisc_deref(ld);
997 }
998}
999
1000/* mgsl_stop() throttle (stop) transmitter
1001 *
1002 * Arguments: tty pointer to tty info structure
1003 * Return Value: None
1004 */
1005static void mgsl_stop(struct tty_struct *tty)
1006{
1007 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1008 unsigned long flags;
1009
1010 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1011 return;
1012
1013 if ( debug_level >= DEBUG_LEVEL_INFO )
1014 printk("mgsl_stop(%s)\n",info->device_name);
1015
1016 spin_lock_irqsave(&info->irq_spinlock,flags);
1017 if (info->tx_enabled)
1018 usc_stop_transmitter(info);
1019 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1020
1021} /* end of mgsl_stop() */
1022
1023/* mgsl_start() release (start) transmitter
1024 *
1025 * Arguments: tty pointer to tty info structure
1026 * Return Value: None
1027 */
1028static void mgsl_start(struct tty_struct *tty)
1029{
1030 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1031 unsigned long flags;
1032
1033 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1034 return;
1035
1036 if ( debug_level >= DEBUG_LEVEL_INFO )
1037 printk("mgsl_start(%s)\n",info->device_name);
1038
1039 spin_lock_irqsave(&info->irq_spinlock,flags);
1040 if (!info->tx_enabled)
1041 usc_start_transmitter(info);
1042 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1043
1044} /* end of mgsl_start() */
1045
1046/*
1047 * Bottom half work queue access functions
1048 */
1049
1050/* mgsl_bh_action() Return next bottom half action to perform.
1051 * Return Value: BH action code or 0 if nothing to do.
1052 */
1053static int mgsl_bh_action(struct mgsl_struct *info)
1054{
1055 unsigned long flags;
1056 int rc = 0;
1057
1058 spin_lock_irqsave(&info->irq_spinlock,flags);
1059
1060 if (info->pending_bh & BH_RECEIVE) {
1061 info->pending_bh &= ~BH_RECEIVE;
1062 rc = BH_RECEIVE;
1063 } else if (info->pending_bh & BH_TRANSMIT) {
1064 info->pending_bh &= ~BH_TRANSMIT;
1065 rc = BH_TRANSMIT;
1066 } else if (info->pending_bh & BH_STATUS) {
1067 info->pending_bh &= ~BH_STATUS;
1068 rc = BH_STATUS;
1069 }
1070
1071 if (!rc) {
1072 /* Mark BH routine as complete */
1073 info->bh_running = 0;
1074 info->bh_requested = 0;
1075 }
1076
1077 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1078
1079 return rc;
1080}
1081
1082/*
1083 * Perform bottom half processing of work items queued by ISR.
1084 */
1085static void mgsl_bh_handler(void* Context)
1086{
1087 struct mgsl_struct *info = (struct mgsl_struct*)Context;
1088 int action;
1089
1090 if (!info)
1091 return;
1092
1093 if ( debug_level >= DEBUG_LEVEL_BH )
1094 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1095 __FILE__,__LINE__,info->device_name);
1096
1097 info->bh_running = 1;
1098
1099 while((action = mgsl_bh_action(info)) != 0) {
1100
1101 /* Process work item */
1102 if ( debug_level >= DEBUG_LEVEL_BH )
1103 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1104 __FILE__,__LINE__,action);
1105
1106 switch (action) {
1107
1108 case BH_RECEIVE:
1109 mgsl_bh_receive(info);
1110 break;
1111 case BH_TRANSMIT:
1112 mgsl_bh_transmit(info);
1113 break;
1114 case BH_STATUS:
1115 mgsl_bh_status(info);
1116 break;
1117 default:
1118 /* unknown work item ID */
1119 printk("Unknown work item ID=%08X!\n", action);
1120 break;
1121 }
1122 }
1123
1124 if ( debug_level >= DEBUG_LEVEL_BH )
1125 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1126 __FILE__,__LINE__,info->device_name);
1127}
1128
1129static void mgsl_bh_receive(struct mgsl_struct *info)
1130{
1131 int (*get_rx_frame)(struct mgsl_struct *info) =
1132 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1133
1134 if ( debug_level >= DEBUG_LEVEL_BH )
1135 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1136 __FILE__,__LINE__,info->device_name);
1137
1138 do
1139 {
1140 if (info->rx_rcc_underrun) {
1141 unsigned long flags;
1142 spin_lock_irqsave(&info->irq_spinlock,flags);
1143 usc_start_receiver(info);
1144 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1145 return;
1146 }
1147 } while(get_rx_frame(info));
1148}
1149
1150static void mgsl_bh_transmit(struct mgsl_struct *info)
1151{
1152 struct tty_struct *tty = info->tty;
1153 unsigned long flags;
1154
1155 if ( debug_level >= DEBUG_LEVEL_BH )
1156 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1157 __FILE__,__LINE__,info->device_name);
1158
1159 if (tty) {
1160 tty_wakeup(tty);
1161 wake_up_interruptible(&tty->write_wait);
1162 }
1163
1164 /* if transmitter idle and loopmode_send_done_requested
1165 * then start echoing RxD to TxD
1166 */
1167 spin_lock_irqsave(&info->irq_spinlock,flags);
1168 if ( !info->tx_active && info->loopmode_send_done_requested )
1169 usc_loopmode_send_done( info );
1170 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1171}
1172
1173static void mgsl_bh_status(struct mgsl_struct *info)
1174{
1175 if ( debug_level >= DEBUG_LEVEL_BH )
1176 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1177 __FILE__,__LINE__,info->device_name);
1178
1179 info->ri_chkcount = 0;
1180 info->dsr_chkcount = 0;
1181 info->dcd_chkcount = 0;
1182 info->cts_chkcount = 0;
1183}
1184
1185/* mgsl_isr_receive_status()
1186 *
1187 * Service a receive status interrupt. The type of status
1188 * interrupt is indicated by the state of the RCSR.
1189 * This is only used for HDLC mode.
1190 *
1191 * Arguments: info pointer to device instance data
1192 * Return Value: None
1193 */
1194static void mgsl_isr_receive_status( struct mgsl_struct *info )
1195{
1196 u16 status = usc_InReg( info, RCSR );
1197
1198 if ( debug_level >= DEBUG_LEVEL_ISR )
1199 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1200 __FILE__,__LINE__,status);
1201
1202 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1203 info->loopmode_insert_requested &&
1204 usc_loopmode_active(info) )
1205 {
1206 ++info->icount.rxabort;
1207 info->loopmode_insert_requested = FALSE;
1208
1209 /* clear CMR:13 to start echoing RxD to TxD */
1210 info->cmr_value &= ~BIT13;
1211 usc_OutReg(info, CMR, info->cmr_value);
1212
1213 /* disable received abort irq (no longer required) */
1214 usc_OutReg(info, RICR,
1215 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1216 }
1217
1218 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1219 if (status & RXSTATUS_EXITED_HUNT)
1220 info->icount.exithunt++;
1221 if (status & RXSTATUS_IDLE_RECEIVED)
1222 info->icount.rxidle++;
1223 wake_up_interruptible(&info->event_wait_q);
1224 }
1225
1226 if (status & RXSTATUS_OVERRUN){
1227 info->icount.rxover++;
1228 usc_process_rxoverrun_sync( info );
1229 }
1230
1231 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1232 usc_UnlatchRxstatusBits( info, status );
1233
1234} /* end of mgsl_isr_receive_status() */
1235
1236/* mgsl_isr_transmit_status()
1237 *
1238 * Service a transmit status interrupt
1239 * HDLC mode :end of transmit frame
1240 * Async mode:all data is sent
1241 * transmit status is indicated by bits in the TCSR.
1242 *
1243 * Arguments: info pointer to device instance data
1244 * Return Value: None
1245 */
1246static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1247{
1248 u16 status = usc_InReg( info, TCSR );
1249
1250 if ( debug_level >= DEBUG_LEVEL_ISR )
1251 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1252 __FILE__,__LINE__,status);
1253
1254 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1255 usc_UnlatchTxstatusBits( info, status );
1256
1257 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1258 {
1259 /* finished sending HDLC abort. This may leave */
1260 /* the TxFifo with data from the aborted frame */
1261 /* so purge the TxFifo. Also shutdown the DMA */
1262 /* channel in case there is data remaining in */
1263 /* the DMA buffer */
1264 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1265 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1266 }
1267
1268 if ( status & TXSTATUS_EOF_SENT )
1269 info->icount.txok++;
1270 else if ( status & TXSTATUS_UNDERRUN )
1271 info->icount.txunder++;
1272 else if ( status & TXSTATUS_ABORT_SENT )
1273 info->icount.txabort++;
1274 else
1275 info->icount.txunder++;
1276
1277 info->tx_active = 0;
1278 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1279 del_timer(&info->tx_timer);
1280
1281 if ( info->drop_rts_on_tx_done ) {
1282 usc_get_serial_signals( info );
1283 if ( info->serial_signals & SerialSignal_RTS ) {
1284 info->serial_signals &= ~SerialSignal_RTS;
1285 usc_set_serial_signals( info );
1286 }
1287 info->drop_rts_on_tx_done = 0;
1288 }
1289
1290#ifdef CONFIG_HDLC
1291 if (info->netcount)
1292 hdlcdev_tx_done(info);
1293 else
1294#endif
1295 {
1296 if (info->tty->stopped || info->tty->hw_stopped) {
1297 usc_stop_transmitter(info);
1298 return;
1299 }
1300 info->pending_bh |= BH_TRANSMIT;
1301 }
1302
1303} /* end of mgsl_isr_transmit_status() */
1304
1305/* mgsl_isr_io_pin()
1306 *
1307 * Service an Input/Output pin interrupt. The type of
1308 * interrupt is indicated by bits in the MISR
1309 *
1310 * Arguments: info pointer to device instance data
1311 * Return Value: None
1312 */
1313static void mgsl_isr_io_pin( struct mgsl_struct *info )
1314{
1315 struct mgsl_icount *icount;
1316 u16 status = usc_InReg( info, MISR );
1317
1318 if ( debug_level >= DEBUG_LEVEL_ISR )
1319 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1320 __FILE__,__LINE__,status);
1321
1322 usc_ClearIrqPendingBits( info, IO_PIN );
1323 usc_UnlatchIostatusBits( info, status );
1324
1325 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1326 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1327 icount = &info->icount;
1328 /* update input line counters */
1329 if (status & MISCSTATUS_RI_LATCHED) {
1330 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1331 usc_DisablestatusIrqs(info,SICR_RI);
1332 icount->rng++;
1333 if ( status & MISCSTATUS_RI )
1334 info->input_signal_events.ri_up++;
1335 else
1336 info->input_signal_events.ri_down++;
1337 }
1338 if (status & MISCSTATUS_DSR_LATCHED) {
1339 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1340 usc_DisablestatusIrqs(info,SICR_DSR);
1341 icount->dsr++;
1342 if ( status & MISCSTATUS_DSR )
1343 info->input_signal_events.dsr_up++;
1344 else
1345 info->input_signal_events.dsr_down++;
1346 }
1347 if (status & MISCSTATUS_DCD_LATCHED) {
1348 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1349 usc_DisablestatusIrqs(info,SICR_DCD);
1350 icount->dcd++;
1351 if (status & MISCSTATUS_DCD) {
1352 info->input_signal_events.dcd_up++;
1353 } else
1354 info->input_signal_events.dcd_down++;
1355#ifdef CONFIG_HDLC
1356 if (info->netcount)
1357 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev);
1358#endif
1359 }
1360 if (status & MISCSTATUS_CTS_LATCHED)
1361 {
1362 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1363 usc_DisablestatusIrqs(info,SICR_CTS);
1364 icount->cts++;
1365 if ( status & MISCSTATUS_CTS )
1366 info->input_signal_events.cts_up++;
1367 else
1368 info->input_signal_events.cts_down++;
1369 }
1370 wake_up_interruptible(&info->status_event_wait_q);
1371 wake_up_interruptible(&info->event_wait_q);
1372
1373 if ( (info->flags & ASYNC_CHECK_CD) &&
1374 (status & MISCSTATUS_DCD_LATCHED) ) {
1375 if ( debug_level >= DEBUG_LEVEL_ISR )
1376 printk("%s CD now %s...", info->device_name,
1377 (status & MISCSTATUS_DCD) ? "on" : "off");
1378 if (status & MISCSTATUS_DCD)
1379 wake_up_interruptible(&info->open_wait);
1380 else {
1381 if ( debug_level >= DEBUG_LEVEL_ISR )
1382 printk("doing serial hangup...");
1383 if (info->tty)
1384 tty_hangup(info->tty);
1385 }
1386 }
1387
1388 if ( (info->flags & ASYNC_CTS_FLOW) &&
1389 (status & MISCSTATUS_CTS_LATCHED) ) {
1390 if (info->tty->hw_stopped) {
1391 if (status & MISCSTATUS_CTS) {
1392 if ( debug_level >= DEBUG_LEVEL_ISR )
1393 printk("CTS tx start...");
1394 if (info->tty)
1395 info->tty->hw_stopped = 0;
1396 usc_start_transmitter(info);
1397 info->pending_bh |= BH_TRANSMIT;
1398 return;
1399 }
1400 } else {
1401 if (!(status & MISCSTATUS_CTS)) {
1402 if ( debug_level >= DEBUG_LEVEL_ISR )
1403 printk("CTS tx stop...");
1404 if (info->tty)
1405 info->tty->hw_stopped = 1;
1406 usc_stop_transmitter(info);
1407 }
1408 }
1409 }
1410 }
1411
1412 info->pending_bh |= BH_STATUS;
1413
1414 /* for diagnostics set IRQ flag */
1415 if ( status & MISCSTATUS_TXC_LATCHED ){
1416 usc_OutReg( info, SICR,
1417 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1418 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1419 info->irq_occurred = 1;
1420 }
1421
1422} /* end of mgsl_isr_io_pin() */
1423
1424/* mgsl_isr_transmit_data()
1425 *
1426 * Service a transmit data interrupt (async mode only).
1427 *
1428 * Arguments: info pointer to device instance data
1429 * Return Value: None
1430 */
1431static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1432{
1433 if ( debug_level >= DEBUG_LEVEL_ISR )
1434 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1435 __FILE__,__LINE__,info->xmit_cnt);
1436
1437 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1438
1439 if (info->tty->stopped || info->tty->hw_stopped) {
1440 usc_stop_transmitter(info);
1441 return;
1442 }
1443
1444 if ( info->xmit_cnt )
1445 usc_load_txfifo( info );
1446 else
1447 info->tx_active = 0;
1448
1449 if (info->xmit_cnt < WAKEUP_CHARS)
1450 info->pending_bh |= BH_TRANSMIT;
1451
1452} /* end of mgsl_isr_transmit_data() */
1453
1454/* mgsl_isr_receive_data()
1455 *
1456 * Service a receive data interrupt. This occurs
1457 * when operating in asynchronous interrupt transfer mode.
1458 * The receive data FIFO is flushed to the receive data buffers.
1459 *
1460 * Arguments: info pointer to device instance data
1461 * Return Value: None
1462 */
1463static void mgsl_isr_receive_data( struct mgsl_struct *info )
1464{
1465 int Fifocount;
1466 u16 status;
1467 unsigned char DataByte;
1468 struct tty_struct *tty = info->tty;
1469 struct mgsl_icount *icount = &info->icount;
1470
1471 if ( debug_level >= DEBUG_LEVEL_ISR )
1472 printk("%s(%d):mgsl_isr_receive_data\n",
1473 __FILE__,__LINE__);
1474
1475 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1476
1477 /* select FIFO status for RICR readback */
1478 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1479
1480 /* clear the Wordstatus bit so that status readback */
1481 /* only reflects the status of this byte */
1482 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1483
1484 /* flush the receive FIFO */
1485
1486 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1487 /* read one byte from RxFIFO */
1488 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1489 info->io_base + CCAR );
1490 DataByte = inb( info->io_base + CCAR );
1491
1492 /* get the status of the received byte */
1493 status = usc_InReg(info, RCSR);
1494 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1495 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1496 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1497
1498 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
1499 continue;
1500
1501 *tty->flip.char_buf_ptr = DataByte;
1502 icount->rx++;
1503
1504 *tty->flip.flag_buf_ptr = 0;
1505 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1506 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1507 printk("rxerr=%04X\n",status);
1508 /* update error statistics */
1509 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1510 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1511 icount->brk++;
1512 } else if (status & RXSTATUS_PARITY_ERROR)
1513 icount->parity++;
1514 else if (status & RXSTATUS_FRAMING_ERROR)
1515 icount->frame++;
1516 else if (status & RXSTATUS_OVERRUN) {
1517 /* must issue purge fifo cmd before */
1518 /* 16C32 accepts more receive chars */
1519 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1520 icount->overrun++;
1521 }
1522
1523 /* discard char if tty control flags say so */
1524 if (status & info->ignore_status_mask)
1525 continue;
1526
1527 status &= info->read_status_mask;
1528
1529 if (status & RXSTATUS_BREAK_RECEIVED) {
1530 *tty->flip.flag_buf_ptr = TTY_BREAK;
1531 if (info->flags & ASYNC_SAK)
1532 do_SAK(tty);
1533 } else if (status & RXSTATUS_PARITY_ERROR)
1534 *tty->flip.flag_buf_ptr = TTY_PARITY;
1535 else if (status & RXSTATUS_FRAMING_ERROR)
1536 *tty->flip.flag_buf_ptr = TTY_FRAME;
1537 if (status & RXSTATUS_OVERRUN) {
1538 /* Overrun is special, since it's
1539 * reported immediately, and doesn't
1540 * affect the current character
1541 */
1542 if (tty->flip.count < TTY_FLIPBUF_SIZE) {
1543 tty->flip.count++;
1544 tty->flip.flag_buf_ptr++;
1545 tty->flip.char_buf_ptr++;
1546 *tty->flip.flag_buf_ptr = TTY_OVERRUN;
1547 }
1548 }
1549 } /* end of if (error) */
1550
1551 tty->flip.flag_buf_ptr++;
1552 tty->flip.char_buf_ptr++;
1553 tty->flip.count++;
1554 }
1555
1556 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1557 printk("%s(%d):mgsl_isr_receive_data flip count=%d\n",
1558 __FILE__,__LINE__,tty->flip.count);
1559 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1560 __FILE__,__LINE__,icount->rx,icount->brk,
1561 icount->parity,icount->frame,icount->overrun);
1562 }
1563
1564 if ( tty->flip.count )
1565 tty_flip_buffer_push(tty);
1566}
1567
1568/* mgsl_isr_misc()
1569 *
1570 * Service a miscellaneos interrupt source.
1571 *
1572 * Arguments: info pointer to device extension (instance data)
1573 * Return Value: None
1574 */
1575static void mgsl_isr_misc( struct mgsl_struct *info )
1576{
1577 u16 status = usc_InReg( info, MISR );
1578
1579 if ( debug_level >= DEBUG_LEVEL_ISR )
1580 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1581 __FILE__,__LINE__,status);
1582
1583 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1584 (info->params.mode == MGSL_MODE_HDLC)) {
1585
1586 /* turn off receiver and rx DMA */
1587 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1588 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1589 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1590 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1591 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1592
1593 /* schedule BH handler to restart receiver */
1594 info->pending_bh |= BH_RECEIVE;
1595 info->rx_rcc_underrun = 1;
1596 }
1597
1598 usc_ClearIrqPendingBits( info, MISC );
1599 usc_UnlatchMiscstatusBits( info, status );
1600
1601} /* end of mgsl_isr_misc() */
1602
1603/* mgsl_isr_null()
1604 *
1605 * Services undefined interrupt vectors from the
1606 * USC. (hence this function SHOULD never be called)
1607 *
1608 * Arguments: info pointer to device extension (instance data)
1609 * Return Value: None
1610 */
1611static void mgsl_isr_null( struct mgsl_struct *info )
1612{
1613
1614} /* end of mgsl_isr_null() */
1615
1616/* mgsl_isr_receive_dma()
1617 *
1618 * Service a receive DMA channel interrupt.
1619 * For this driver there are two sources of receive DMA interrupts
1620 * as identified in the Receive DMA mode Register (RDMR):
1621 *
1622 * BIT3 EOA/EOL End of List, all receive buffers in receive
1623 * buffer list have been filled (no more free buffers
1624 * available). The DMA controller has shut down.
1625 *
1626 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1627 * DMA buffer is terminated in response to completion
1628 * of a good frame or a frame with errors. The status
1629 * of the frame is stored in the buffer entry in the
1630 * list of receive buffer entries.
1631 *
1632 * Arguments: info pointer to device instance data
1633 * Return Value: None
1634 */
1635static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1636{
1637 u16 status;
1638
1639 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1640 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1641
1642 /* Read the receive DMA status to identify interrupt type. */
1643 /* This also clears the status bits. */
1644 status = usc_InDmaReg( info, RDMR );
1645
1646 if ( debug_level >= DEBUG_LEVEL_ISR )
1647 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1648 __FILE__,__LINE__,info->device_name,status);
1649
1650 info->pending_bh |= BH_RECEIVE;
1651
1652 if ( status & BIT3 ) {
1653 info->rx_overflow = 1;
1654 info->icount.buf_overrun++;
1655 }
1656
1657} /* end of mgsl_isr_receive_dma() */
1658
1659/* mgsl_isr_transmit_dma()
1660 *
1661 * This function services a transmit DMA channel interrupt.
1662 *
1663 * For this driver there is one source of transmit DMA interrupts
1664 * as identified in the Transmit DMA Mode Register (TDMR):
1665 *
1666 * BIT2 EOB End of Buffer. This interrupt occurs when a
1667 * transmit DMA buffer has been emptied.
1668 *
1669 * The driver maintains enough transmit DMA buffers to hold at least
1670 * one max frame size transmit frame. When operating in a buffered
1671 * transmit mode, there may be enough transmit DMA buffers to hold at
1672 * least two or more max frame size frames. On an EOB condition,
1673 * determine if there are any queued transmit buffers and copy into
1674 * transmit DMA buffers if we have room.
1675 *
1676 * Arguments: info pointer to device instance data
1677 * Return Value: None
1678 */
1679static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1680{
1681 u16 status;
1682
1683 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1684 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1685
1686 /* Read the transmit DMA status to identify interrupt type. */
1687 /* This also clears the status bits. */
1688
1689 status = usc_InDmaReg( info, TDMR );
1690
1691 if ( debug_level >= DEBUG_LEVEL_ISR )
1692 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1693 __FILE__,__LINE__,info->device_name,status);
1694
1695 if ( status & BIT2 ) {
1696 --info->tx_dma_buffers_used;
1697
1698 /* if there are transmit frames queued,
1699 * try to load the next one
1700 */
1701 if ( load_next_tx_holding_buffer(info) ) {
1702 /* if call returns non-zero value, we have
1703 * at least one free tx holding buffer
1704 */
1705 info->pending_bh |= BH_TRANSMIT;
1706 }
1707 }
1708
1709} /* end of mgsl_isr_transmit_dma() */
1710
1711/* mgsl_interrupt()
1712 *
1713 * Interrupt service routine entry point.
1714 *
1715 * Arguments:
1716 *
1717 * irq interrupt number that caused interrupt
1718 * dev_id device ID supplied during interrupt registration
1719 * regs interrupted processor context
1720 *
1721 * Return Value: None
1722 */
1723static irqreturn_t mgsl_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1724{
1725 struct mgsl_struct * info;
1726 u16 UscVector;
1727 u16 DmaVector;
1728
1729 if ( debug_level >= DEBUG_LEVEL_ISR )
1730 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1731 __FILE__,__LINE__,irq);
1732
1733 info = (struct mgsl_struct *)dev_id;
1734 if (!info)
1735 return IRQ_NONE;
1736
1737 spin_lock(&info->irq_spinlock);
1738
1739 for(;;) {
1740 /* Read the interrupt vectors from hardware. */
1741 UscVector = usc_InReg(info, IVR) >> 9;
1742 DmaVector = usc_InDmaReg(info, DIVR);
1743
1744 if ( debug_level >= DEBUG_LEVEL_ISR )
1745 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1746 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1747
1748 if ( !UscVector && !DmaVector )
1749 break;
1750
1751 /* Dispatch interrupt vector */
1752 if ( UscVector )
1753 (*UscIsrTable[UscVector])(info);
1754 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1755 mgsl_isr_transmit_dma(info);
1756 else
1757 mgsl_isr_receive_dma(info);
1758
1759 if ( info->isr_overflow ) {
1760 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1761 __FILE__,__LINE__,info->device_name, irq);
1762 usc_DisableMasterIrqBit(info);
1763 usc_DisableDmaInterrupts(info,DICR_MASTER);
1764 break;
1765 }
1766 }
1767
1768 /* Request bottom half processing if there's something
1769 * for it to do and the bh is not already running
1770 */
1771
1772 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1773 if ( debug_level >= DEBUG_LEVEL_ISR )
1774 printk("%s(%d):%s queueing bh task.\n",
1775 __FILE__,__LINE__,info->device_name);
1776 schedule_work(&info->task);
1777 info->bh_requested = 1;
1778 }
1779
1780 spin_unlock(&info->irq_spinlock);
1781
1782 if ( debug_level >= DEBUG_LEVEL_ISR )
1783 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1784 __FILE__,__LINE__,irq);
1785 return IRQ_HANDLED;
1786} /* end of mgsl_interrupt() */
1787
1788/* startup()
1789 *
1790 * Initialize and start device.
1791 *
1792 * Arguments: info pointer to device instance data
1793 * Return Value: 0 if success, otherwise error code
1794 */
1795static int startup(struct mgsl_struct * info)
1796{
1797 int retval = 0;
1798
1799 if ( debug_level >= DEBUG_LEVEL_INFO )
1800 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1801
1802 if (info->flags & ASYNC_INITIALIZED)
1803 return 0;
1804
1805 if (!info->xmit_buf) {
1806 /* allocate a page of memory for a transmit buffer */
1807 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1808 if (!info->xmit_buf) {
1809 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1810 __FILE__,__LINE__,info->device_name);
1811 return -ENOMEM;
1812 }
1813 }
1814
1815 info->pending_bh = 0;
1816
Paul Fulghum96612392005-09-09 13:02:13 -07001817 memset(&info->icount, 0, sizeof(info->icount));
1818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 init_timer(&info->tx_timer);
1820 info->tx_timer.data = (unsigned long)info;
1821 info->tx_timer.function = mgsl_tx_timeout;
1822
1823 /* Allocate and claim adapter resources */
1824 retval = mgsl_claim_resources(info);
1825
1826 /* perform existence check and diagnostics */
1827 if ( !retval )
1828 retval = mgsl_adapter_test(info);
1829
1830 if ( retval ) {
1831 if (capable(CAP_SYS_ADMIN) && info->tty)
1832 set_bit(TTY_IO_ERROR, &info->tty->flags);
1833 mgsl_release_resources(info);
1834 return retval;
1835 }
1836
1837 /* program hardware for current parameters */
1838 mgsl_change_params(info);
1839
1840 if (info->tty)
1841 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1842
1843 info->flags |= ASYNC_INITIALIZED;
1844
1845 return 0;
1846
1847} /* end of startup() */
1848
1849/* shutdown()
1850 *
1851 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1852 *
1853 * Arguments: info pointer to device instance data
1854 * Return Value: None
1855 */
1856static void shutdown(struct mgsl_struct * info)
1857{
1858 unsigned long flags;
1859
1860 if (!(info->flags & ASYNC_INITIALIZED))
1861 return;
1862
1863 if (debug_level >= DEBUG_LEVEL_INFO)
1864 printk("%s(%d):mgsl_shutdown(%s)\n",
1865 __FILE__,__LINE__, info->device_name );
1866
1867 /* clear status wait queue because status changes */
1868 /* can't happen after shutting down the hardware */
1869 wake_up_interruptible(&info->status_event_wait_q);
1870 wake_up_interruptible(&info->event_wait_q);
1871
1872 del_timer(&info->tx_timer);
1873
1874 if (info->xmit_buf) {
1875 free_page((unsigned long) info->xmit_buf);
1876 info->xmit_buf = NULL;
1877 }
1878
1879 spin_lock_irqsave(&info->irq_spinlock,flags);
1880 usc_DisableMasterIrqBit(info);
1881 usc_stop_receiver(info);
1882 usc_stop_transmitter(info);
1883 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1884 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1885 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1886
1887 /* Disable DMAEN (Port 7, Bit 14) */
1888 /* This disconnects the DMA request signal from the ISA bus */
1889 /* on the ISA adapter. This has no effect for the PCI adapter */
1890 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1891
1892 /* Disable INTEN (Port 6, Bit12) */
1893 /* This disconnects the IRQ request signal to the ISA bus */
1894 /* on the ISA adapter. This has no effect for the PCI adapter */
1895 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1896
1897 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1898 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1899 usc_set_serial_signals(info);
1900 }
1901
1902 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1903
1904 mgsl_release_resources(info);
1905
1906 if (info->tty)
1907 set_bit(TTY_IO_ERROR, &info->tty->flags);
1908
1909 info->flags &= ~ASYNC_INITIALIZED;
1910
1911} /* end of shutdown() */
1912
1913static void mgsl_program_hw(struct mgsl_struct *info)
1914{
1915 unsigned long flags;
1916
1917 spin_lock_irqsave(&info->irq_spinlock,flags);
1918
1919 usc_stop_receiver(info);
1920 usc_stop_transmitter(info);
1921 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1922
1923 if (info->params.mode == MGSL_MODE_HDLC ||
1924 info->params.mode == MGSL_MODE_RAW ||
1925 info->netcount)
1926 usc_set_sync_mode(info);
1927 else
1928 usc_set_async_mode(info);
1929
1930 usc_set_serial_signals(info);
1931
1932 info->dcd_chkcount = 0;
1933 info->cts_chkcount = 0;
1934 info->ri_chkcount = 0;
1935 info->dsr_chkcount = 0;
1936
1937 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1938 usc_EnableInterrupts(info, IO_PIN);
1939 usc_get_serial_signals(info);
1940
1941 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1942 usc_start_receiver(info);
1943
1944 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1945}
1946
1947/* Reconfigure adapter based on new parameters
1948 */
1949static void mgsl_change_params(struct mgsl_struct *info)
1950{
1951 unsigned cflag;
1952 int bits_per_char;
1953
1954 if (!info->tty || !info->tty->termios)
1955 return;
1956
1957 if (debug_level >= DEBUG_LEVEL_INFO)
1958 printk("%s(%d):mgsl_change_params(%s)\n",
1959 __FILE__,__LINE__, info->device_name );
1960
1961 cflag = info->tty->termios->c_cflag;
1962
1963 /* if B0 rate (hangup) specified then negate DTR and RTS */
1964 /* otherwise assert DTR and RTS */
1965 if (cflag & CBAUD)
1966 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1967 else
1968 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1969
1970 /* byte size and parity */
1971
1972 switch (cflag & CSIZE) {
1973 case CS5: info->params.data_bits = 5; break;
1974 case CS6: info->params.data_bits = 6; break;
1975 case CS7: info->params.data_bits = 7; break;
1976 case CS8: info->params.data_bits = 8; break;
1977 /* Never happens, but GCC is too dumb to figure it out */
1978 default: info->params.data_bits = 7; break;
1979 }
1980
1981 if (cflag & CSTOPB)
1982 info->params.stop_bits = 2;
1983 else
1984 info->params.stop_bits = 1;
1985
1986 info->params.parity = ASYNC_PARITY_NONE;
1987 if (cflag & PARENB) {
1988 if (cflag & PARODD)
1989 info->params.parity = ASYNC_PARITY_ODD;
1990 else
1991 info->params.parity = ASYNC_PARITY_EVEN;
1992#ifdef CMSPAR
1993 if (cflag & CMSPAR)
1994 info->params.parity = ASYNC_PARITY_SPACE;
1995#endif
1996 }
1997
1998 /* calculate number of jiffies to transmit a full
1999 * FIFO (32 bytes) at specified data rate
2000 */
2001 bits_per_char = info->params.data_bits +
2002 info->params.stop_bits + 1;
2003
2004 /* if port data rate is set to 460800 or less then
2005 * allow tty settings to override, otherwise keep the
2006 * current data rate.
2007 */
2008 if (info->params.data_rate <= 460800)
2009 info->params.data_rate = tty_get_baud_rate(info->tty);
2010
2011 if ( info->params.data_rate ) {
2012 info->timeout = (32*HZ*bits_per_char) /
2013 info->params.data_rate;
2014 }
2015 info->timeout += HZ/50; /* Add .02 seconds of slop */
2016
2017 if (cflag & CRTSCTS)
2018 info->flags |= ASYNC_CTS_FLOW;
2019 else
2020 info->flags &= ~ASYNC_CTS_FLOW;
2021
2022 if (cflag & CLOCAL)
2023 info->flags &= ~ASYNC_CHECK_CD;
2024 else
2025 info->flags |= ASYNC_CHECK_CD;
2026
2027 /* process tty input control flags */
2028
2029 info->read_status_mask = RXSTATUS_OVERRUN;
2030 if (I_INPCK(info->tty))
2031 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2032 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2033 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2034
2035 if (I_IGNPAR(info->tty))
2036 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2037 if (I_IGNBRK(info->tty)) {
2038 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2039 /* If ignoring parity and break indicators, ignore
2040 * overruns too. (For real raw support).
2041 */
2042 if (I_IGNPAR(info->tty))
2043 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2044 }
2045
2046 mgsl_program_hw(info);
2047
2048} /* end of mgsl_change_params() */
2049
2050/* mgsl_put_char()
2051 *
2052 * Add a character to the transmit buffer.
2053 *
2054 * Arguments: tty pointer to tty information structure
2055 * ch character to add to transmit buffer
2056 *
2057 * Return Value: None
2058 */
2059static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2060{
2061 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2062 unsigned long flags;
2063
2064 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2065 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2066 __FILE__,__LINE__,ch,info->device_name);
2067 }
2068
2069 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2070 return;
2071
2072 if (!tty || !info->xmit_buf)
2073 return;
2074
2075 spin_lock_irqsave(&info->irq_spinlock,flags);
2076
2077 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2078
2079 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2080 info->xmit_buf[info->xmit_head++] = ch;
2081 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2082 info->xmit_cnt++;
2083 }
2084 }
2085
2086 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2087
2088} /* end of mgsl_put_char() */
2089
2090/* mgsl_flush_chars()
2091 *
2092 * Enable transmitter so remaining characters in the
2093 * transmit buffer are sent.
2094 *
2095 * Arguments: tty pointer to tty information structure
2096 * Return Value: None
2097 */
2098static void mgsl_flush_chars(struct tty_struct *tty)
2099{
2100 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2101 unsigned long flags;
2102
2103 if ( debug_level >= DEBUG_LEVEL_INFO )
2104 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2105 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2106
2107 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2108 return;
2109
2110 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2111 !info->xmit_buf)
2112 return;
2113
2114 if ( debug_level >= DEBUG_LEVEL_INFO )
2115 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2116 __FILE__,__LINE__,info->device_name );
2117
2118 spin_lock_irqsave(&info->irq_spinlock,flags);
2119
2120 if (!info->tx_active) {
2121 if ( (info->params.mode == MGSL_MODE_HDLC ||
2122 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2123 /* operating in synchronous (frame oriented) mode */
2124 /* copy data from circular xmit_buf to */
2125 /* transmit DMA buffer. */
2126 mgsl_load_tx_dma_buffer(info,
2127 info->xmit_buf,info->xmit_cnt);
2128 }
2129 usc_start_transmitter(info);
2130 }
2131
2132 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2133
2134} /* end of mgsl_flush_chars() */
2135
2136/* mgsl_write()
2137 *
2138 * Send a block of data
2139 *
2140 * Arguments:
2141 *
2142 * tty pointer to tty information structure
2143 * buf pointer to buffer containing send data
2144 * count size of send data in bytes
2145 *
2146 * Return Value: number of characters written
2147 */
2148static int mgsl_write(struct tty_struct * tty,
2149 const unsigned char *buf, int count)
2150{
2151 int c, ret = 0;
2152 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2153 unsigned long flags;
2154
2155 if ( debug_level >= DEBUG_LEVEL_INFO )
2156 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2157 __FILE__,__LINE__,info->device_name,count);
2158
2159 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2160 goto cleanup;
2161
2162 if (!tty || !info->xmit_buf || !tmp_buf)
2163 goto cleanup;
2164
2165 if ( info->params.mode == MGSL_MODE_HDLC ||
2166 info->params.mode == MGSL_MODE_RAW ) {
2167 /* operating in synchronous (frame oriented) mode */
2168 /* operating in synchronous (frame oriented) mode */
2169 if (info->tx_active) {
2170
2171 if ( info->params.mode == MGSL_MODE_HDLC ) {
2172 ret = 0;
2173 goto cleanup;
2174 }
2175 /* transmitter is actively sending data -
2176 * if we have multiple transmit dma and
2177 * holding buffers, attempt to queue this
2178 * frame for transmission at a later time.
2179 */
2180 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2181 /* no tx holding buffers available */
2182 ret = 0;
2183 goto cleanup;
2184 }
2185
2186 /* queue transmit frame request */
2187 ret = count;
2188 save_tx_buffer_request(info,buf,count);
2189
2190 /* if we have sufficient tx dma buffers,
2191 * load the next buffered tx request
2192 */
2193 spin_lock_irqsave(&info->irq_spinlock,flags);
2194 load_next_tx_holding_buffer(info);
2195 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2196 goto cleanup;
2197 }
2198
2199 /* if operating in HDLC LoopMode and the adapter */
2200 /* has yet to be inserted into the loop, we can't */
2201 /* transmit */
2202
2203 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2204 !usc_loopmode_active(info) )
2205 {
2206 ret = 0;
2207 goto cleanup;
2208 }
2209
2210 if ( info->xmit_cnt ) {
2211 /* Send accumulated from send_char() calls */
2212 /* as frame and wait before accepting more data. */
2213 ret = 0;
2214
2215 /* copy data from circular xmit_buf to */
2216 /* transmit DMA buffer. */
2217 mgsl_load_tx_dma_buffer(info,
2218 info->xmit_buf,info->xmit_cnt);
2219 if ( debug_level >= DEBUG_LEVEL_INFO )
2220 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2221 __FILE__,__LINE__,info->device_name);
2222 } else {
2223 if ( debug_level >= DEBUG_LEVEL_INFO )
2224 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2225 __FILE__,__LINE__,info->device_name);
2226 ret = count;
2227 info->xmit_cnt = count;
2228 mgsl_load_tx_dma_buffer(info,buf,count);
2229 }
2230 } else {
2231 while (1) {
2232 spin_lock_irqsave(&info->irq_spinlock,flags);
2233 c = min_t(int, count,
2234 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2235 SERIAL_XMIT_SIZE - info->xmit_head));
2236 if (c <= 0) {
2237 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2238 break;
2239 }
2240 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2241 info->xmit_head = ((info->xmit_head + c) &
2242 (SERIAL_XMIT_SIZE-1));
2243 info->xmit_cnt += c;
2244 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2245 buf += c;
2246 count -= c;
2247 ret += c;
2248 }
2249 }
2250
2251 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2252 spin_lock_irqsave(&info->irq_spinlock,flags);
2253 if (!info->tx_active)
2254 usc_start_transmitter(info);
2255 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2256 }
2257cleanup:
2258 if ( debug_level >= DEBUG_LEVEL_INFO )
2259 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2260 __FILE__,__LINE__,info->device_name,ret);
2261
2262 return ret;
2263
2264} /* end of mgsl_write() */
2265
2266/* mgsl_write_room()
2267 *
2268 * Return the count of free bytes in transmit buffer
2269 *
2270 * Arguments: tty pointer to tty info structure
2271 * Return Value: None
2272 */
2273static int mgsl_write_room(struct tty_struct *tty)
2274{
2275 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2276 int ret;
2277
2278 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2279 return 0;
2280 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2281 if (ret < 0)
2282 ret = 0;
2283
2284 if (debug_level >= DEBUG_LEVEL_INFO)
2285 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2286 __FILE__,__LINE__, info->device_name,ret );
2287
2288 if ( info->params.mode == MGSL_MODE_HDLC ||
2289 info->params.mode == MGSL_MODE_RAW ) {
2290 /* operating in synchronous (frame oriented) mode */
2291 if ( info->tx_active )
2292 return 0;
2293 else
2294 return HDLC_MAX_FRAME_SIZE;
2295 }
2296
2297 return ret;
2298
2299} /* end of mgsl_write_room() */
2300
2301/* mgsl_chars_in_buffer()
2302 *
2303 * Return the count of bytes in transmit buffer
2304 *
2305 * Arguments: tty pointer to tty info structure
2306 * Return Value: None
2307 */
2308static int mgsl_chars_in_buffer(struct tty_struct *tty)
2309{
2310 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2311
2312 if (debug_level >= DEBUG_LEVEL_INFO)
2313 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2314 __FILE__,__LINE__, info->device_name );
2315
2316 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2317 return 0;
2318
2319 if (debug_level >= DEBUG_LEVEL_INFO)
2320 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2321 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2322
2323 if ( info->params.mode == MGSL_MODE_HDLC ||
2324 info->params.mode == MGSL_MODE_RAW ) {
2325 /* operating in synchronous (frame oriented) mode */
2326 if ( info->tx_active )
2327 return info->max_frame_size;
2328 else
2329 return 0;
2330 }
2331
2332 return info->xmit_cnt;
2333} /* end of mgsl_chars_in_buffer() */
2334
2335/* mgsl_flush_buffer()
2336 *
2337 * Discard all data in the send buffer
2338 *
2339 * Arguments: tty pointer to tty info structure
2340 * Return Value: None
2341 */
2342static void mgsl_flush_buffer(struct tty_struct *tty)
2343{
2344 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2345 unsigned long flags;
2346
2347 if (debug_level >= DEBUG_LEVEL_INFO)
2348 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2349 __FILE__,__LINE__, info->device_name );
2350
2351 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2352 return;
2353
2354 spin_lock_irqsave(&info->irq_spinlock,flags);
2355 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2356 del_timer(&info->tx_timer);
2357 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2358
2359 wake_up_interruptible(&tty->write_wait);
2360 tty_wakeup(tty);
2361}
2362
2363/* mgsl_send_xchar()
2364 *
2365 * Send a high-priority XON/XOFF character
2366 *
2367 * Arguments: tty pointer to tty info structure
2368 * ch character to send
2369 * Return Value: None
2370 */
2371static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2372{
2373 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2374 unsigned long flags;
2375
2376 if (debug_level >= DEBUG_LEVEL_INFO)
2377 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2378 __FILE__,__LINE__, info->device_name, ch );
2379
2380 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2381 return;
2382
2383 info->x_char = ch;
2384 if (ch) {
2385 /* Make sure transmit interrupts are on */
2386 spin_lock_irqsave(&info->irq_spinlock,flags);
2387 if (!info->tx_enabled)
2388 usc_start_transmitter(info);
2389 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2390 }
2391} /* end of mgsl_send_xchar() */
2392
2393/* mgsl_throttle()
2394 *
2395 * Signal remote device to throttle send data (our receive data)
2396 *
2397 * Arguments: tty pointer to tty info structure
2398 * Return Value: None
2399 */
2400static void mgsl_throttle(struct tty_struct * tty)
2401{
2402 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2403 unsigned long flags;
2404
2405 if (debug_level >= DEBUG_LEVEL_INFO)
2406 printk("%s(%d):mgsl_throttle(%s) entry\n",
2407 __FILE__,__LINE__, info->device_name );
2408
2409 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2410 return;
2411
2412 if (I_IXOFF(tty))
2413 mgsl_send_xchar(tty, STOP_CHAR(tty));
2414
2415 if (tty->termios->c_cflag & CRTSCTS) {
2416 spin_lock_irqsave(&info->irq_spinlock,flags);
2417 info->serial_signals &= ~SerialSignal_RTS;
2418 usc_set_serial_signals(info);
2419 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2420 }
2421} /* end of mgsl_throttle() */
2422
2423/* mgsl_unthrottle()
2424 *
2425 * Signal remote device to stop throttling send data (our receive data)
2426 *
2427 * Arguments: tty pointer to tty info structure
2428 * Return Value: None
2429 */
2430static void mgsl_unthrottle(struct tty_struct * tty)
2431{
2432 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2433 unsigned long flags;
2434
2435 if (debug_level >= DEBUG_LEVEL_INFO)
2436 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2437 __FILE__,__LINE__, info->device_name );
2438
2439 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2440 return;
2441
2442 if (I_IXOFF(tty)) {
2443 if (info->x_char)
2444 info->x_char = 0;
2445 else
2446 mgsl_send_xchar(tty, START_CHAR(tty));
2447 }
2448
2449 if (tty->termios->c_cflag & CRTSCTS) {
2450 spin_lock_irqsave(&info->irq_spinlock,flags);
2451 info->serial_signals |= SerialSignal_RTS;
2452 usc_set_serial_signals(info);
2453 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2454 }
2455
2456} /* end of mgsl_unthrottle() */
2457
2458/* mgsl_get_stats()
2459 *
2460 * get the current serial parameters information
2461 *
2462 * Arguments: info pointer to device instance data
2463 * user_icount pointer to buffer to hold returned stats
2464 *
2465 * Return Value: 0 if success, otherwise error code
2466 */
2467static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2468{
2469 int err;
2470
2471 if (debug_level >= DEBUG_LEVEL_INFO)
2472 printk("%s(%d):mgsl_get_params(%s)\n",
2473 __FILE__,__LINE__, info->device_name);
2474
Paul Fulghum96612392005-09-09 13:02:13 -07002475 if (!user_icount) {
2476 memset(&info->icount, 0, sizeof(info->icount));
2477 } else {
2478 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2479 if (err)
2480 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 }
2482
2483 return 0;
2484
2485} /* end of mgsl_get_stats() */
2486
2487/* mgsl_get_params()
2488 *
2489 * get the current serial parameters information
2490 *
2491 * Arguments: info pointer to device instance data
2492 * user_params pointer to buffer to hold returned params
2493 *
2494 * Return Value: 0 if success, otherwise error code
2495 */
2496static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2497{
2498 int err;
2499 if (debug_level >= DEBUG_LEVEL_INFO)
2500 printk("%s(%d):mgsl_get_params(%s)\n",
2501 __FILE__,__LINE__, info->device_name);
2502
2503 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2504 if (err) {
2505 if ( debug_level >= DEBUG_LEVEL_INFO )
2506 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2507 __FILE__,__LINE__,info->device_name);
2508 return -EFAULT;
2509 }
2510
2511 return 0;
2512
2513} /* end of mgsl_get_params() */
2514
2515/* mgsl_set_params()
2516 *
2517 * set the serial parameters
2518 *
2519 * Arguments:
2520 *
2521 * info pointer to device instance data
2522 * new_params user buffer containing new serial params
2523 *
2524 * Return Value: 0 if success, otherwise error code
2525 */
2526static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2527{
2528 unsigned long flags;
2529 MGSL_PARAMS tmp_params;
2530 int err;
2531
2532 if (debug_level >= DEBUG_LEVEL_INFO)
2533 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2534 info->device_name );
2535 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2536 if (err) {
2537 if ( debug_level >= DEBUG_LEVEL_INFO )
2538 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2539 __FILE__,__LINE__,info->device_name);
2540 return -EFAULT;
2541 }
2542
2543 spin_lock_irqsave(&info->irq_spinlock,flags);
2544 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2545 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2546
2547 mgsl_change_params(info);
2548
2549 return 0;
2550
2551} /* end of mgsl_set_params() */
2552
2553/* mgsl_get_txidle()
2554 *
2555 * get the current transmit idle mode
2556 *
2557 * Arguments: info pointer to device instance data
2558 * idle_mode pointer to buffer to hold returned idle mode
2559 *
2560 * Return Value: 0 if success, otherwise error code
2561 */
2562static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2563{
2564 int err;
2565
2566 if (debug_level >= DEBUG_LEVEL_INFO)
2567 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2568 __FILE__,__LINE__, info->device_name, info->idle_mode);
2569
2570 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2571 if (err) {
2572 if ( debug_level >= DEBUG_LEVEL_INFO )
2573 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2574 __FILE__,__LINE__,info->device_name);
2575 return -EFAULT;
2576 }
2577
2578 return 0;
2579
2580} /* end of mgsl_get_txidle() */
2581
2582/* mgsl_set_txidle() service ioctl to set transmit idle mode
2583 *
2584 * Arguments: info pointer to device instance data
2585 * idle_mode new idle mode
2586 *
2587 * Return Value: 0 if success, otherwise error code
2588 */
2589static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2590{
2591 unsigned long flags;
2592
2593 if (debug_level >= DEBUG_LEVEL_INFO)
2594 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2595 info->device_name, idle_mode );
2596
2597 spin_lock_irqsave(&info->irq_spinlock,flags);
2598 info->idle_mode = idle_mode;
2599 usc_set_txidle( info );
2600 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2601 return 0;
2602
2603} /* end of mgsl_set_txidle() */
2604
2605/* mgsl_txenable()
2606 *
2607 * enable or disable the transmitter
2608 *
2609 * Arguments:
2610 *
2611 * info pointer to device instance data
2612 * enable 1 = enable, 0 = disable
2613 *
2614 * Return Value: 0 if success, otherwise error code
2615 */
2616static int mgsl_txenable(struct mgsl_struct * info, int enable)
2617{
2618 unsigned long flags;
2619
2620 if (debug_level >= DEBUG_LEVEL_INFO)
2621 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2622 info->device_name, enable);
2623
2624 spin_lock_irqsave(&info->irq_spinlock,flags);
2625 if ( enable ) {
2626 if ( !info->tx_enabled ) {
2627
2628 usc_start_transmitter(info);
2629 /*--------------------------------------------------
2630 * if HDLC/SDLC Loop mode, attempt to insert the
2631 * station in the 'loop' by setting CMR:13. Upon
2632 * receipt of the next GoAhead (RxAbort) sequence,
2633 * the OnLoop indicator (CCSR:7) should go active
2634 * to indicate that we are on the loop
2635 *--------------------------------------------------*/
2636 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2637 usc_loopmode_insert_request( info );
2638 }
2639 } else {
2640 if ( info->tx_enabled )
2641 usc_stop_transmitter(info);
2642 }
2643 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2644 return 0;
2645
2646} /* end of mgsl_txenable() */
2647
2648/* mgsl_txabort() abort send HDLC frame
2649 *
2650 * Arguments: info pointer to device instance data
2651 * Return Value: 0 if success, otherwise error code
2652 */
2653static int mgsl_txabort(struct mgsl_struct * info)
2654{
2655 unsigned long flags;
2656
2657 if (debug_level >= DEBUG_LEVEL_INFO)
2658 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2659 info->device_name);
2660
2661 spin_lock_irqsave(&info->irq_spinlock,flags);
2662 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2663 {
2664 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2665 usc_loopmode_cancel_transmit( info );
2666 else
2667 usc_TCmd(info,TCmd_SendAbort);
2668 }
2669 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2670 return 0;
2671
2672} /* end of mgsl_txabort() */
2673
2674/* mgsl_rxenable() enable or disable the receiver
2675 *
2676 * Arguments: info pointer to device instance data
2677 * enable 1 = enable, 0 = disable
2678 * Return Value: 0 if success, otherwise error code
2679 */
2680static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2681{
2682 unsigned long flags;
2683
2684 if (debug_level >= DEBUG_LEVEL_INFO)
2685 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2686 info->device_name, enable);
2687
2688 spin_lock_irqsave(&info->irq_spinlock,flags);
2689 if ( enable ) {
2690 if ( !info->rx_enabled )
2691 usc_start_receiver(info);
2692 } else {
2693 if ( info->rx_enabled )
2694 usc_stop_receiver(info);
2695 }
2696 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2697 return 0;
2698
2699} /* end of mgsl_rxenable() */
2700
2701/* mgsl_wait_event() wait for specified event to occur
2702 *
2703 * Arguments: info pointer to device instance data
2704 * mask pointer to bitmask of events to wait for
2705 * Return Value: 0 if successful and bit mask updated with
2706 * of events triggerred,
2707 * otherwise error code
2708 */
2709static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2710{
2711 unsigned long flags;
2712 int s;
2713 int rc=0;
2714 struct mgsl_icount cprev, cnow;
2715 int events;
2716 int mask;
2717 struct _input_signal_events oldsigs, newsigs;
2718 DECLARE_WAITQUEUE(wait, current);
2719
2720 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2721 if (rc) {
2722 return -EFAULT;
2723 }
2724
2725 if (debug_level >= DEBUG_LEVEL_INFO)
2726 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2727 info->device_name, mask);
2728
2729 spin_lock_irqsave(&info->irq_spinlock,flags);
2730
2731 /* return immediately if state matches requested events */
2732 usc_get_serial_signals(info);
2733 s = info->serial_signals;
2734 events = mask &
2735 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2736 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2737 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2738 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2739 if (events) {
2740 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2741 goto exit;
2742 }
2743
2744 /* save current irq counts */
2745 cprev = info->icount;
2746 oldsigs = info->input_signal_events;
2747
2748 /* enable hunt and idle irqs if needed */
2749 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2750 u16 oldreg = usc_InReg(info,RICR);
2751 u16 newreg = oldreg +
2752 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2753 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2754 if (oldreg != newreg)
2755 usc_OutReg(info, RICR, newreg);
2756 }
2757
2758 set_current_state(TASK_INTERRUPTIBLE);
2759 add_wait_queue(&info->event_wait_q, &wait);
2760
2761 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2762
2763
2764 for(;;) {
2765 schedule();
2766 if (signal_pending(current)) {
2767 rc = -ERESTARTSYS;
2768 break;
2769 }
2770
2771 /* get current irq counts */
2772 spin_lock_irqsave(&info->irq_spinlock,flags);
2773 cnow = info->icount;
2774 newsigs = info->input_signal_events;
2775 set_current_state(TASK_INTERRUPTIBLE);
2776 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2777
2778 /* if no change, wait aborted for some reason */
2779 if (newsigs.dsr_up == oldsigs.dsr_up &&
2780 newsigs.dsr_down == oldsigs.dsr_down &&
2781 newsigs.dcd_up == oldsigs.dcd_up &&
2782 newsigs.dcd_down == oldsigs.dcd_down &&
2783 newsigs.cts_up == oldsigs.cts_up &&
2784 newsigs.cts_down == oldsigs.cts_down &&
2785 newsigs.ri_up == oldsigs.ri_up &&
2786 newsigs.ri_down == oldsigs.ri_down &&
2787 cnow.exithunt == cprev.exithunt &&
2788 cnow.rxidle == cprev.rxidle) {
2789 rc = -EIO;
2790 break;
2791 }
2792
2793 events = mask &
2794 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2795 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2796 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2797 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2798 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2799 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2800 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2801 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2802 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2803 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2804 if (events)
2805 break;
2806
2807 cprev = cnow;
2808 oldsigs = newsigs;
2809 }
2810
2811 remove_wait_queue(&info->event_wait_q, &wait);
2812 set_current_state(TASK_RUNNING);
2813
2814 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2815 spin_lock_irqsave(&info->irq_spinlock,flags);
2816 if (!waitqueue_active(&info->event_wait_q)) {
2817 /* disable enable exit hunt mode/idle rcvd IRQs */
2818 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2819 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2820 }
2821 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2822 }
2823exit:
2824 if ( rc == 0 )
2825 PUT_USER(rc, events, mask_ptr);
2826
2827 return rc;
2828
2829} /* end of mgsl_wait_event() */
2830
2831static int modem_input_wait(struct mgsl_struct *info,int arg)
2832{
2833 unsigned long flags;
2834 int rc;
2835 struct mgsl_icount cprev, cnow;
2836 DECLARE_WAITQUEUE(wait, current);
2837
2838 /* save current irq counts */
2839 spin_lock_irqsave(&info->irq_spinlock,flags);
2840 cprev = info->icount;
2841 add_wait_queue(&info->status_event_wait_q, &wait);
2842 set_current_state(TASK_INTERRUPTIBLE);
2843 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2844
2845 for(;;) {
2846 schedule();
2847 if (signal_pending(current)) {
2848 rc = -ERESTARTSYS;
2849 break;
2850 }
2851
2852 /* get new irq counts */
2853 spin_lock_irqsave(&info->irq_spinlock,flags);
2854 cnow = info->icount;
2855 set_current_state(TASK_INTERRUPTIBLE);
2856 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2857
2858 /* if no change, wait aborted for some reason */
2859 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2860 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2861 rc = -EIO;
2862 break;
2863 }
2864
2865 /* check for change in caller specified modem input */
2866 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2867 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2868 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2869 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2870 rc = 0;
2871 break;
2872 }
2873
2874 cprev = cnow;
2875 }
2876 remove_wait_queue(&info->status_event_wait_q, &wait);
2877 set_current_state(TASK_RUNNING);
2878 return rc;
2879}
2880
2881/* return the state of the serial control and status signals
2882 */
2883static int tiocmget(struct tty_struct *tty, struct file *file)
2884{
2885 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2886 unsigned int result;
2887 unsigned long flags;
2888
2889 spin_lock_irqsave(&info->irq_spinlock,flags);
2890 usc_get_serial_signals(info);
2891 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2892
2893 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2894 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2895 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2896 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2897 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2898 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2899
2900 if (debug_level >= DEBUG_LEVEL_INFO)
2901 printk("%s(%d):%s tiocmget() value=%08X\n",
2902 __FILE__,__LINE__, info->device_name, result );
2903 return result;
2904}
2905
2906/* set modem control signals (DTR/RTS)
2907 */
2908static int tiocmset(struct tty_struct *tty, struct file *file,
2909 unsigned int set, unsigned int clear)
2910{
2911 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2912 unsigned long flags;
2913
2914 if (debug_level >= DEBUG_LEVEL_INFO)
2915 printk("%s(%d):%s tiocmset(%x,%x)\n",
2916 __FILE__,__LINE__,info->device_name, set, clear);
2917
2918 if (set & TIOCM_RTS)
2919 info->serial_signals |= SerialSignal_RTS;
2920 if (set & TIOCM_DTR)
2921 info->serial_signals |= SerialSignal_DTR;
2922 if (clear & TIOCM_RTS)
2923 info->serial_signals &= ~SerialSignal_RTS;
2924 if (clear & TIOCM_DTR)
2925 info->serial_signals &= ~SerialSignal_DTR;
2926
2927 spin_lock_irqsave(&info->irq_spinlock,flags);
2928 usc_set_serial_signals(info);
2929 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2930
2931 return 0;
2932}
2933
2934/* mgsl_break() Set or clear transmit break condition
2935 *
2936 * Arguments: tty pointer to tty instance data
2937 * break_state -1=set break condition, 0=clear
2938 * Return Value: None
2939 */
2940static void mgsl_break(struct tty_struct *tty, int break_state)
2941{
2942 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2943 unsigned long flags;
2944
2945 if (debug_level >= DEBUG_LEVEL_INFO)
2946 printk("%s(%d):mgsl_break(%s,%d)\n",
2947 __FILE__,__LINE__, info->device_name, break_state);
2948
2949 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2950 return;
2951
2952 spin_lock_irqsave(&info->irq_spinlock,flags);
2953 if (break_state == -1)
2954 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2955 else
2956 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2957 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2958
2959} /* end of mgsl_break() */
2960
2961/* mgsl_ioctl() Service an IOCTL request
2962 *
2963 * Arguments:
2964 *
2965 * tty pointer to tty instance data
2966 * file pointer to associated file object for device
2967 * cmd IOCTL command code
2968 * arg command argument/context
2969 *
2970 * Return Value: 0 if success, otherwise error code
2971 */
2972static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2973 unsigned int cmd, unsigned long arg)
2974{
2975 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2976
2977 if (debug_level >= DEBUG_LEVEL_INFO)
2978 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2979 info->device_name, cmd );
2980
2981 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2982 return -ENODEV;
2983
2984 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2985 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2986 if (tty->flags & (1 << TTY_IO_ERROR))
2987 return -EIO;
2988 }
2989
2990 return mgsl_ioctl_common(info, cmd, arg);
2991}
2992
2993static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2994{
2995 int error;
2996 struct mgsl_icount cnow; /* kernel counter temps */
2997 void __user *argp = (void __user *)arg;
2998 struct serial_icounter_struct __user *p_cuser; /* user space */
2999 unsigned long flags;
3000
3001 switch (cmd) {
3002 case MGSL_IOCGPARAMS:
3003 return mgsl_get_params(info, argp);
3004 case MGSL_IOCSPARAMS:
3005 return mgsl_set_params(info, argp);
3006 case MGSL_IOCGTXIDLE:
3007 return mgsl_get_txidle(info, argp);
3008 case MGSL_IOCSTXIDLE:
3009 return mgsl_set_txidle(info,(int)arg);
3010 case MGSL_IOCTXENABLE:
3011 return mgsl_txenable(info,(int)arg);
3012 case MGSL_IOCRXENABLE:
3013 return mgsl_rxenable(info,(int)arg);
3014 case MGSL_IOCTXABORT:
3015 return mgsl_txabort(info);
3016 case MGSL_IOCGSTATS:
3017 return mgsl_get_stats(info, argp);
3018 case MGSL_IOCWAITEVENT:
3019 return mgsl_wait_event(info, argp);
3020 case MGSL_IOCLOOPTXDONE:
3021 return mgsl_loopmode_send_done(info);
3022 /* Wait for modem input (DCD,RI,DSR,CTS) change
3023 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3024 */
3025 case TIOCMIWAIT:
3026 return modem_input_wait(info,(int)arg);
3027
3028 /*
3029 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3030 * Return: write counters to the user passed counter struct
3031 * NB: both 1->0 and 0->1 transitions are counted except for
3032 * RI where only 0->1 is counted.
3033 */
3034 case TIOCGICOUNT:
3035 spin_lock_irqsave(&info->irq_spinlock,flags);
3036 cnow = info->icount;
3037 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3038 p_cuser = argp;
3039 PUT_USER(error,cnow.cts, &p_cuser->cts);
3040 if (error) return error;
3041 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3042 if (error) return error;
3043 PUT_USER(error,cnow.rng, &p_cuser->rng);
3044 if (error) return error;
3045 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3046 if (error) return error;
3047 PUT_USER(error,cnow.rx, &p_cuser->rx);
3048 if (error) return error;
3049 PUT_USER(error,cnow.tx, &p_cuser->tx);
3050 if (error) return error;
3051 PUT_USER(error,cnow.frame, &p_cuser->frame);
3052 if (error) return error;
3053 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3054 if (error) return error;
3055 PUT_USER(error,cnow.parity, &p_cuser->parity);
3056 if (error) return error;
3057 PUT_USER(error,cnow.brk, &p_cuser->brk);
3058 if (error) return error;
3059 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3060 if (error) return error;
3061 return 0;
3062 default:
3063 return -ENOIOCTLCMD;
3064 }
3065 return 0;
3066}
3067
3068/* mgsl_set_termios()
3069 *
3070 * Set new termios settings
3071 *
3072 * Arguments:
3073 *
3074 * tty pointer to tty structure
3075 * termios pointer to buffer to hold returned old termios
3076 *
3077 * Return Value: None
3078 */
3079static void mgsl_set_termios(struct tty_struct *tty, struct termios *old_termios)
3080{
3081 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3082 unsigned long flags;
3083
3084 if (debug_level >= DEBUG_LEVEL_INFO)
3085 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3086 tty->driver->name );
3087
3088 /* just return if nothing has changed */
3089 if ((tty->termios->c_cflag == old_termios->c_cflag)
3090 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3091 == RELEVANT_IFLAG(old_termios->c_iflag)))
3092 return;
3093
3094 mgsl_change_params(info);
3095
3096 /* Handle transition to B0 status */
3097 if (old_termios->c_cflag & CBAUD &&
3098 !(tty->termios->c_cflag & CBAUD)) {
3099 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3100 spin_lock_irqsave(&info->irq_spinlock,flags);
3101 usc_set_serial_signals(info);
3102 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3103 }
3104
3105 /* Handle transition away from B0 status */
3106 if (!(old_termios->c_cflag & CBAUD) &&
3107 tty->termios->c_cflag & CBAUD) {
3108 info->serial_signals |= SerialSignal_DTR;
3109 if (!(tty->termios->c_cflag & CRTSCTS) ||
3110 !test_bit(TTY_THROTTLED, &tty->flags)) {
3111 info->serial_signals |= SerialSignal_RTS;
3112 }
3113 spin_lock_irqsave(&info->irq_spinlock,flags);
3114 usc_set_serial_signals(info);
3115 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3116 }
3117
3118 /* Handle turning off CRTSCTS */
3119 if (old_termios->c_cflag & CRTSCTS &&
3120 !(tty->termios->c_cflag & CRTSCTS)) {
3121 tty->hw_stopped = 0;
3122 mgsl_start(tty);
3123 }
3124
3125} /* end of mgsl_set_termios() */
3126
3127/* mgsl_close()
3128 *
3129 * Called when port is closed. Wait for remaining data to be
3130 * sent. Disable port and free resources.
3131 *
3132 * Arguments:
3133 *
3134 * tty pointer to open tty structure
3135 * filp pointer to open file object
3136 *
3137 * Return Value: None
3138 */
3139static void mgsl_close(struct tty_struct *tty, struct file * filp)
3140{
3141 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3142
3143 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3144 return;
3145
3146 if (debug_level >= DEBUG_LEVEL_INFO)
3147 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3148 __FILE__,__LINE__, info->device_name, info->count);
3149
3150 if (!info->count)
3151 return;
3152
3153 if (tty_hung_up_p(filp))
3154 goto cleanup;
3155
3156 if ((tty->count == 1) && (info->count != 1)) {
3157 /*
3158 * tty->count is 1 and the tty structure will be freed.
3159 * info->count should be one in this case.
3160 * if it's not, correct it so that the port is shutdown.
3161 */
3162 printk("mgsl_close: bad refcount; tty->count is 1, "
3163 "info->count is %d\n", info->count);
3164 info->count = 1;
3165 }
3166
3167 info->count--;
3168
3169 /* if at least one open remaining, leave hardware active */
3170 if (info->count)
3171 goto cleanup;
3172
3173 info->flags |= ASYNC_CLOSING;
3174
3175 /* set tty->closing to notify line discipline to
3176 * only process XON/XOFF characters. Only the N_TTY
3177 * discipline appears to use this (ppp does not).
3178 */
3179 tty->closing = 1;
3180
3181 /* wait for transmit data to clear all layers */
3182
3183 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3184 if (debug_level >= DEBUG_LEVEL_INFO)
3185 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3186 __FILE__,__LINE__, info->device_name );
3187 tty_wait_until_sent(tty, info->closing_wait);
3188 }
3189
3190 if (info->flags & ASYNC_INITIALIZED)
3191 mgsl_wait_until_sent(tty, info->timeout);
3192
3193 if (tty->driver->flush_buffer)
3194 tty->driver->flush_buffer(tty);
3195
3196 tty_ldisc_flush(tty);
3197
3198 shutdown(info);
3199
3200 tty->closing = 0;
3201 info->tty = NULL;
3202
3203 if (info->blocked_open) {
3204 if (info->close_delay) {
3205 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3206 }
3207 wake_up_interruptible(&info->open_wait);
3208 }
3209
3210 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3211
3212 wake_up_interruptible(&info->close_wait);
3213
3214cleanup:
3215 if (debug_level >= DEBUG_LEVEL_INFO)
3216 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3217 tty->driver->name, info->count);
3218
3219} /* end of mgsl_close() */
3220
3221/* mgsl_wait_until_sent()
3222 *
3223 * Wait until the transmitter is empty.
3224 *
3225 * Arguments:
3226 *
3227 * tty pointer to tty info structure
3228 * timeout time to wait for send completion
3229 *
3230 * Return Value: None
3231 */
3232static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3233{
3234 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3235 unsigned long orig_jiffies, char_time;
3236
3237 if (!info )
3238 return;
3239
3240 if (debug_level >= DEBUG_LEVEL_INFO)
3241 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3242 __FILE__,__LINE__, info->device_name );
3243
3244 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3245 return;
3246
3247 if (!(info->flags & ASYNC_INITIALIZED))
3248 goto exit;
3249
3250 orig_jiffies = jiffies;
3251
3252 /* Set check interval to 1/5 of estimated time to
3253 * send a character, and make it at least 1. The check
3254 * interval should also be less than the timeout.
3255 * Note: use tight timings here to satisfy the NIST-PCTS.
3256 */
3257
3258 if ( info->params.data_rate ) {
3259 char_time = info->timeout/(32 * 5);
3260 if (!char_time)
3261 char_time++;
3262 } else
3263 char_time = 1;
3264
3265 if (timeout)
3266 char_time = min_t(unsigned long, char_time, timeout);
3267
3268 if ( info->params.mode == MGSL_MODE_HDLC ||
3269 info->params.mode == MGSL_MODE_RAW ) {
3270 while (info->tx_active) {
3271 msleep_interruptible(jiffies_to_msecs(char_time));
3272 if (signal_pending(current))
3273 break;
3274 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3275 break;
3276 }
3277 } else {
3278 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3279 info->tx_enabled) {
3280 msleep_interruptible(jiffies_to_msecs(char_time));
3281 if (signal_pending(current))
3282 break;
3283 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3284 break;
3285 }
3286 }
3287
3288exit:
3289 if (debug_level >= DEBUG_LEVEL_INFO)
3290 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3291 __FILE__,__LINE__, info->device_name );
3292
3293} /* end of mgsl_wait_until_sent() */
3294
3295/* mgsl_hangup()
3296 *
3297 * Called by tty_hangup() when a hangup is signaled.
3298 * This is the same as to closing all open files for the port.
3299 *
3300 * Arguments: tty pointer to associated tty object
3301 * Return Value: None
3302 */
3303static void mgsl_hangup(struct tty_struct *tty)
3304{
3305 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3306
3307 if (debug_level >= DEBUG_LEVEL_INFO)
3308 printk("%s(%d):mgsl_hangup(%s)\n",
3309 __FILE__,__LINE__, info->device_name );
3310
3311 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3312 return;
3313
3314 mgsl_flush_buffer(tty);
3315 shutdown(info);
3316
3317 info->count = 0;
3318 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3319 info->tty = NULL;
3320
3321 wake_up_interruptible(&info->open_wait);
3322
3323} /* end of mgsl_hangup() */
3324
3325/* block_til_ready()
3326 *
3327 * Block the current process until the specified port
3328 * is ready to be opened.
3329 *
3330 * Arguments:
3331 *
3332 * tty pointer to tty info structure
3333 * filp pointer to open file object
3334 * info pointer to device instance data
3335 *
3336 * Return Value: 0 if success, otherwise error code
3337 */
3338static int block_til_ready(struct tty_struct *tty, struct file * filp,
3339 struct mgsl_struct *info)
3340{
3341 DECLARE_WAITQUEUE(wait, current);
3342 int retval;
3343 int do_clocal = 0, extra_count = 0;
3344 unsigned long flags;
3345
3346 if (debug_level >= DEBUG_LEVEL_INFO)
3347 printk("%s(%d):block_til_ready on %s\n",
3348 __FILE__,__LINE__, tty->driver->name );
3349
3350 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3351 /* nonblock mode is set or port is not enabled */
3352 info->flags |= ASYNC_NORMAL_ACTIVE;
3353 return 0;
3354 }
3355
3356 if (tty->termios->c_cflag & CLOCAL)
3357 do_clocal = 1;
3358
3359 /* Wait for carrier detect and the line to become
3360 * free (i.e., not in use by the callout). While we are in
3361 * this loop, info->count is dropped by one, so that
3362 * mgsl_close() knows when to free things. We restore it upon
3363 * exit, either normal or abnormal.
3364 */
3365
3366 retval = 0;
3367 add_wait_queue(&info->open_wait, &wait);
3368
3369 if (debug_level >= DEBUG_LEVEL_INFO)
3370 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3371 __FILE__,__LINE__, tty->driver->name, info->count );
3372
3373 spin_lock_irqsave(&info->irq_spinlock, flags);
3374 if (!tty_hung_up_p(filp)) {
3375 extra_count = 1;
3376 info->count--;
3377 }
3378 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3379 info->blocked_open++;
3380
3381 while (1) {
3382 if (tty->termios->c_cflag & CBAUD) {
3383 spin_lock_irqsave(&info->irq_spinlock,flags);
3384 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3385 usc_set_serial_signals(info);
3386 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3387 }
3388
3389 set_current_state(TASK_INTERRUPTIBLE);
3390
3391 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3392 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3393 -EAGAIN : -ERESTARTSYS;
3394 break;
3395 }
3396
3397 spin_lock_irqsave(&info->irq_spinlock,flags);
3398 usc_get_serial_signals(info);
3399 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3400
3401 if (!(info->flags & ASYNC_CLOSING) &&
3402 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3403 break;
3404 }
3405
3406 if (signal_pending(current)) {
3407 retval = -ERESTARTSYS;
3408 break;
3409 }
3410
3411 if (debug_level >= DEBUG_LEVEL_INFO)
3412 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3413 __FILE__,__LINE__, tty->driver->name, info->count );
3414
3415 schedule();
3416 }
3417
3418 set_current_state(TASK_RUNNING);
3419 remove_wait_queue(&info->open_wait, &wait);
3420
3421 if (extra_count)
3422 info->count++;
3423 info->blocked_open--;
3424
3425 if (debug_level >= DEBUG_LEVEL_INFO)
3426 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3427 __FILE__,__LINE__, tty->driver->name, info->count );
3428
3429 if (!retval)
3430 info->flags |= ASYNC_NORMAL_ACTIVE;
3431
3432 return retval;
3433
3434} /* end of block_til_ready() */
3435
3436/* mgsl_open()
3437 *
3438 * Called when a port is opened. Init and enable port.
3439 * Perform serial-specific initialization for the tty structure.
3440 *
3441 * Arguments: tty pointer to tty info structure
3442 * filp associated file pointer
3443 *
3444 * Return Value: 0 if success, otherwise error code
3445 */
3446static int mgsl_open(struct tty_struct *tty, struct file * filp)
3447{
3448 struct mgsl_struct *info;
3449 int retval, line;
3450 unsigned long page;
3451 unsigned long flags;
3452
3453 /* verify range of specified line number */
3454 line = tty->index;
3455 if ((line < 0) || (line >= mgsl_device_count)) {
3456 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3457 __FILE__,__LINE__,line);
3458 return -ENODEV;
3459 }
3460
3461 /* find the info structure for the specified line */
3462 info = mgsl_device_list;
3463 while(info && info->line != line)
3464 info = info->next_device;
3465 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3466 return -ENODEV;
3467
3468 tty->driver_data = info;
3469 info->tty = tty;
3470
3471 if (debug_level >= DEBUG_LEVEL_INFO)
3472 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3473 __FILE__,__LINE__,tty->driver->name, info->count);
3474
3475 /* If port is closing, signal caller to try again */
3476 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3477 if (info->flags & ASYNC_CLOSING)
3478 interruptible_sleep_on(&info->close_wait);
3479 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3480 -EAGAIN : -ERESTARTSYS);
3481 goto cleanup;
3482 }
3483
3484 if (!tmp_buf) {
3485 page = get_zeroed_page(GFP_KERNEL);
3486 if (!page) {
3487 retval = -ENOMEM;
3488 goto cleanup;
3489 }
3490 if (tmp_buf)
3491 free_page(page);
3492 else
3493 tmp_buf = (unsigned char *) page;
3494 }
3495
3496 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3497
3498 spin_lock_irqsave(&info->netlock, flags);
3499 if (info->netcount) {
3500 retval = -EBUSY;
3501 spin_unlock_irqrestore(&info->netlock, flags);
3502 goto cleanup;
3503 }
3504 info->count++;
3505 spin_unlock_irqrestore(&info->netlock, flags);
3506
3507 if (info->count == 1) {
3508 /* 1st open on this device, init hardware */
3509 retval = startup(info);
3510 if (retval < 0)
3511 goto cleanup;
3512 }
3513
3514 retval = block_til_ready(tty, filp, info);
3515 if (retval) {
3516 if (debug_level >= DEBUG_LEVEL_INFO)
3517 printk("%s(%d):block_til_ready(%s) returned %d\n",
3518 __FILE__,__LINE__, info->device_name, retval);
3519 goto cleanup;
3520 }
3521
3522 if (debug_level >= DEBUG_LEVEL_INFO)
3523 printk("%s(%d):mgsl_open(%s) success\n",
3524 __FILE__,__LINE__, info->device_name);
3525 retval = 0;
3526
3527cleanup:
3528 if (retval) {
3529 if (tty->count == 1)
3530 info->tty = NULL; /* tty layer will release tty struct */
3531 if(info->count)
3532 info->count--;
3533 }
3534
3535 return retval;
3536
3537} /* end of mgsl_open() */
3538
3539/*
3540 * /proc fs routines....
3541 */
3542
3543static inline int line_info(char *buf, struct mgsl_struct *info)
3544{
3545 char stat_buf[30];
3546 int ret;
3547 unsigned long flags;
3548
3549 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3550 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3551 info->device_name, info->io_base, info->irq_level,
3552 info->phys_memory_base, info->phys_lcr_base);
3553 } else {
3554 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3555 info->device_name, info->io_base,
3556 info->irq_level, info->dma_level);
3557 }
3558
3559 /* output current serial signal states */
3560 spin_lock_irqsave(&info->irq_spinlock,flags);
3561 usc_get_serial_signals(info);
3562 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3563
3564 stat_buf[0] = 0;
3565 stat_buf[1] = 0;
3566 if (info->serial_signals & SerialSignal_RTS)
3567 strcat(stat_buf, "|RTS");
3568 if (info->serial_signals & SerialSignal_CTS)
3569 strcat(stat_buf, "|CTS");
3570 if (info->serial_signals & SerialSignal_DTR)
3571 strcat(stat_buf, "|DTR");
3572 if (info->serial_signals & SerialSignal_DSR)
3573 strcat(stat_buf, "|DSR");
3574 if (info->serial_signals & SerialSignal_DCD)
3575 strcat(stat_buf, "|CD");
3576 if (info->serial_signals & SerialSignal_RI)
3577 strcat(stat_buf, "|RI");
3578
3579 if (info->params.mode == MGSL_MODE_HDLC ||
3580 info->params.mode == MGSL_MODE_RAW ) {
3581 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3582 info->icount.txok, info->icount.rxok);
3583 if (info->icount.txunder)
3584 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3585 if (info->icount.txabort)
3586 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3587 if (info->icount.rxshort)
3588 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3589 if (info->icount.rxlong)
3590 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3591 if (info->icount.rxover)
3592 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3593 if (info->icount.rxcrc)
3594 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3595 } else {
3596 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3597 info->icount.tx, info->icount.rx);
3598 if (info->icount.frame)
3599 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3600 if (info->icount.parity)
3601 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3602 if (info->icount.brk)
3603 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3604 if (info->icount.overrun)
3605 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3606 }
3607
3608 /* Append serial signal status to end */
3609 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3610
3611 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3612 info->tx_active,info->bh_requested,info->bh_running,
3613 info->pending_bh);
3614
3615 spin_lock_irqsave(&info->irq_spinlock,flags);
3616 {
3617 u16 Tcsr = usc_InReg( info, TCSR );
3618 u16 Tdmr = usc_InDmaReg( info, TDMR );
3619 u16 Ticr = usc_InReg( info, TICR );
3620 u16 Rscr = usc_InReg( info, RCSR );
3621 u16 Rdmr = usc_InDmaReg( info, RDMR );
3622 u16 Ricr = usc_InReg( info, RICR );
3623 u16 Icr = usc_InReg( info, ICR );
3624 u16 Dccr = usc_InReg( info, DCCR );
3625 u16 Tmr = usc_InReg( info, TMR );
3626 u16 Tccr = usc_InReg( info, TCCR );
3627 u16 Ccar = inw( info->io_base + CCAR );
3628 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3629 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3630 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3631 }
3632 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3633
3634 return ret;
3635
3636} /* end of line_info() */
3637
3638/* mgsl_read_proc()
3639 *
3640 * Called to print information about devices
3641 *
3642 * Arguments:
3643 * page page of memory to hold returned info
3644 * start
3645 * off
3646 * count
3647 * eof
3648 * data
3649 *
3650 * Return Value:
3651 */
3652static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3653 int *eof, void *data)
3654{
3655 int len = 0, l;
3656 off_t begin = 0;
3657 struct mgsl_struct *info;
3658
3659 len += sprintf(page, "synclink driver:%s\n", driver_version);
3660
3661 info = mgsl_device_list;
3662 while( info ) {
3663 l = line_info(page + len, info);
3664 len += l;
3665 if (len+begin > off+count)
3666 goto done;
3667 if (len+begin < off) {
3668 begin += len;
3669 len = 0;
3670 }
3671 info = info->next_device;
3672 }
3673
3674 *eof = 1;
3675done:
3676 if (off >= len+begin)
3677 return 0;
3678 *start = page + (off-begin);
3679 return ((count < begin+len-off) ? count : begin+len-off);
3680
3681} /* end of mgsl_read_proc() */
3682
3683/* mgsl_allocate_dma_buffers()
3684 *
3685 * Allocate and format DMA buffers (ISA adapter)
3686 * or format shared memory buffers (PCI adapter).
3687 *
3688 * Arguments: info pointer to device instance data
3689 * Return Value: 0 if success, otherwise error
3690 */
3691static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3692{
3693 unsigned short BuffersPerFrame;
3694
3695 info->last_mem_alloc = 0;
3696
3697 /* Calculate the number of DMA buffers necessary to hold the */
3698 /* largest allowable frame size. Note: If the max frame size is */
3699 /* not an even multiple of the DMA buffer size then we need to */
3700 /* round the buffer count per frame up one. */
3701
3702 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3703 if ( info->max_frame_size % DMABUFFERSIZE )
3704 BuffersPerFrame++;
3705
3706 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3707 /*
3708 * The PCI adapter has 256KBytes of shared memory to use.
3709 * This is 64 PAGE_SIZE buffers.
3710 *
3711 * The first page is used for padding at this time so the
3712 * buffer list does not begin at offset 0 of the PCI
3713 * adapter's shared memory.
3714 *
3715 * The 2nd page is used for the buffer list. A 4K buffer
3716 * list can hold 128 DMA_BUFFER structures at 32 bytes
3717 * each.
3718 *
3719 * This leaves 62 4K pages.
3720 *
3721 * The next N pages are used for transmit frame(s). We
3722 * reserve enough 4K page blocks to hold the required
3723 * number of transmit dma buffers (num_tx_dma_buffers),
3724 * each of MaxFrameSize size.
3725 *
3726 * Of the remaining pages (62-N), determine how many can
3727 * be used to receive full MaxFrameSize inbound frames
3728 */
3729 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3730 info->rx_buffer_count = 62 - info->tx_buffer_count;
3731 } else {
3732 /* Calculate the number of PAGE_SIZE buffers needed for */
3733 /* receive and transmit DMA buffers. */
3734
3735
3736 /* Calculate the number of DMA buffers necessary to */
3737 /* hold 7 max size receive frames and one max size transmit frame. */
3738 /* The receive buffer count is bumped by one so we avoid an */
3739 /* End of List condition if all receive buffers are used when */
3740 /* using linked list DMA buffers. */
3741
3742 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3743 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3744
3745 /*
3746 * limit total TxBuffers & RxBuffers to 62 4K total
3747 * (ala PCI Allocation)
3748 */
3749
3750 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3751 info->rx_buffer_count = 62 - info->tx_buffer_count;
3752
3753 }
3754
3755 if ( debug_level >= DEBUG_LEVEL_INFO )
3756 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3757 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3758
3759 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3760 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3761 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3762 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3763 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3764 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3765 return -ENOMEM;
3766 }
3767
3768 mgsl_reset_rx_dma_buffers( info );
3769 mgsl_reset_tx_dma_buffers( info );
3770
3771 return 0;
3772
3773} /* end of mgsl_allocate_dma_buffers() */
3774
3775/*
3776 * mgsl_alloc_buffer_list_memory()
3777 *
3778 * Allocate a common DMA buffer for use as the
3779 * receive and transmit buffer lists.
3780 *
3781 * A buffer list is a set of buffer entries where each entry contains
3782 * a pointer to an actual buffer and a pointer to the next buffer entry
3783 * (plus some other info about the buffer).
3784 *
3785 * The buffer entries for a list are built to form a circular list so
3786 * that when the entire list has been traversed you start back at the
3787 * beginning.
3788 *
3789 * This function allocates memory for just the buffer entries.
3790 * The links (pointer to next entry) are filled in with the physical
3791 * address of the next entry so the adapter can navigate the list
3792 * using bus master DMA. The pointers to the actual buffers are filled
3793 * out later when the actual buffers are allocated.
3794 *
3795 * Arguments: info pointer to device instance data
3796 * Return Value: 0 if success, otherwise error
3797 */
3798static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3799{
3800 unsigned int i;
3801
3802 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3803 /* PCI adapter uses shared memory. */
3804 info->buffer_list = info->memory_base + info->last_mem_alloc;
3805 info->buffer_list_phys = info->last_mem_alloc;
3806 info->last_mem_alloc += BUFFERLISTSIZE;
3807 } else {
3808 /* ISA adapter uses system memory. */
3809 /* The buffer lists are allocated as a common buffer that both */
3810 /* the processor and adapter can access. This allows the driver to */
3811 /* inspect portions of the buffer while other portions are being */
3812 /* updated by the adapter using Bus Master DMA. */
3813
3814 info->buffer_list = kmalloc(BUFFERLISTSIZE, GFP_KERNEL | GFP_DMA);
3815 if ( info->buffer_list == NULL )
3816 return -ENOMEM;
3817
3818 info->buffer_list_phys = isa_virt_to_bus(info->buffer_list);
3819 }
3820
3821 /* We got the memory for the buffer entry lists. */
3822 /* Initialize the memory block to all zeros. */
3823 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3824
3825 /* Save virtual address pointers to the receive and */
3826 /* transmit buffer lists. (Receive 1st). These pointers will */
3827 /* be used by the processor to access the lists. */
3828 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3829 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3830 info->tx_buffer_list += info->rx_buffer_count;
3831
3832 /*
3833 * Build the links for the buffer entry lists such that
3834 * two circular lists are built. (Transmit and Receive).
3835 *
3836 * Note: the links are physical addresses
3837 * which are read by the adapter to determine the next
3838 * buffer entry to use.
3839 */
3840
3841 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3842 /* calculate and store physical address of this buffer entry */
3843 info->rx_buffer_list[i].phys_entry =
3844 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3845
3846 /* calculate and store physical address of */
3847 /* next entry in cirular list of entries */
3848
3849 info->rx_buffer_list[i].link = info->buffer_list_phys;
3850
3851 if ( i < info->rx_buffer_count - 1 )
3852 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3853 }
3854
3855 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3856 /* calculate and store physical address of this buffer entry */
3857 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3858 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3859
3860 /* calculate and store physical address of */
3861 /* next entry in cirular list of entries */
3862
3863 info->tx_buffer_list[i].link = info->buffer_list_phys +
3864 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3865
3866 if ( i < info->tx_buffer_count - 1 )
3867 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3868 }
3869
3870 return 0;
3871
3872} /* end of mgsl_alloc_buffer_list_memory() */
3873
3874/* Free DMA buffers allocated for use as the
3875 * receive and transmit buffer lists.
3876 * Warning:
3877 *
3878 * The data transfer buffers associated with the buffer list
3879 * MUST be freed before freeing the buffer list itself because
3880 * the buffer list contains the information necessary to free
3881 * the individual buffers!
3882 */
3883static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3884{
3885 if ( info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI )
3886 kfree(info->buffer_list);
3887
3888 info->buffer_list = NULL;
3889 info->rx_buffer_list = NULL;
3890 info->tx_buffer_list = NULL;
3891
3892} /* end of mgsl_free_buffer_list_memory() */
3893
3894/*
3895 * mgsl_alloc_frame_memory()
3896 *
3897 * Allocate the frame DMA buffers used by the specified buffer list.
3898 * Each DMA buffer will be one memory page in size. This is necessary
3899 * because memory can fragment enough that it may be impossible
3900 * contiguous pages.
3901 *
3902 * Arguments:
3903 *
3904 * info pointer to device instance data
3905 * BufferList pointer to list of buffer entries
3906 * Buffercount count of buffer entries in buffer list
3907 *
3908 * Return Value: 0 if success, otherwise -ENOMEM
3909 */
3910static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3911{
3912 int i;
3913 unsigned long phys_addr;
3914
3915 /* Allocate page sized buffers for the receive buffer list */
3916
3917 for ( i = 0; i < Buffercount; i++ ) {
3918 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3919 /* PCI adapter uses shared memory buffers. */
3920 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3921 phys_addr = info->last_mem_alloc;
3922 info->last_mem_alloc += DMABUFFERSIZE;
3923 } else {
3924 /* ISA adapter uses system memory. */
3925 BufferList[i].virt_addr =
3926 kmalloc(DMABUFFERSIZE, GFP_KERNEL | GFP_DMA);
3927 if ( BufferList[i].virt_addr == NULL )
3928 return -ENOMEM;
3929 phys_addr = isa_virt_to_bus(BufferList[i].virt_addr);
3930 }
3931 BufferList[i].phys_addr = phys_addr;
3932 }
3933
3934 return 0;
3935
3936} /* end of mgsl_alloc_frame_memory() */
3937
3938/*
3939 * mgsl_free_frame_memory()
3940 *
3941 * Free the buffers associated with
3942 * each buffer entry of a buffer list.
3943 *
3944 * Arguments:
3945 *
3946 * info pointer to device instance data
3947 * BufferList pointer to list of buffer entries
3948 * Buffercount count of buffer entries in buffer list
3949 *
3950 * Return Value: None
3951 */
3952static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3953{
3954 int i;
3955
3956 if ( BufferList ) {
3957 for ( i = 0 ; i < Buffercount ; i++ ) {
3958 if ( BufferList[i].virt_addr ) {
3959 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3960 kfree(BufferList[i].virt_addr);
3961 BufferList[i].virt_addr = NULL;
3962 }
3963 }
3964 }
3965
3966} /* end of mgsl_free_frame_memory() */
3967
3968/* mgsl_free_dma_buffers()
3969 *
3970 * Free DMA buffers
3971 *
3972 * Arguments: info pointer to device instance data
3973 * Return Value: None
3974 */
3975static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3976{
3977 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3978 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3979 mgsl_free_buffer_list_memory( info );
3980
3981} /* end of mgsl_free_dma_buffers() */
3982
3983
3984/*
3985 * mgsl_alloc_intermediate_rxbuffer_memory()
3986 *
3987 * Allocate a buffer large enough to hold max_frame_size. This buffer
3988 * is used to pass an assembled frame to the line discipline.
3989 *
3990 * Arguments:
3991 *
3992 * info pointer to device instance data
3993 *
3994 * Return Value: 0 if success, otherwise -ENOMEM
3995 */
3996static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3997{
3998 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3999 if ( info->intermediate_rxbuffer == NULL )
4000 return -ENOMEM;
4001
4002 return 0;
4003
4004} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
4005
4006/*
4007 * mgsl_free_intermediate_rxbuffer_memory()
4008 *
4009 *
4010 * Arguments:
4011 *
4012 * info pointer to device instance data
4013 *
4014 * Return Value: None
4015 */
4016static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
4017{
4018 if ( info->intermediate_rxbuffer )
4019 kfree(info->intermediate_rxbuffer);
4020
4021 info->intermediate_rxbuffer = NULL;
4022
4023} /* end of mgsl_free_intermediate_rxbuffer_memory() */
4024
4025/*
4026 * mgsl_alloc_intermediate_txbuffer_memory()
4027 *
4028 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
4029 * This buffer is used to load transmit frames into the adapter's dma transfer
4030 * buffers when there is sufficient space.
4031 *
4032 * Arguments:
4033 *
4034 * info pointer to device instance data
4035 *
4036 * Return Value: 0 if success, otherwise -ENOMEM
4037 */
4038static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4039{
4040 int i;
4041
4042 if ( debug_level >= DEBUG_LEVEL_INFO )
4043 printk("%s %s(%d) allocating %d tx holding buffers\n",
4044 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4045
4046 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4047
4048 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4049 info->tx_holding_buffers[i].buffer =
4050 kmalloc(info->max_frame_size, GFP_KERNEL);
4051 if ( info->tx_holding_buffers[i].buffer == NULL )
4052 return -ENOMEM;
4053 }
4054
4055 return 0;
4056
4057} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4058
4059/*
4060 * mgsl_free_intermediate_txbuffer_memory()
4061 *
4062 *
4063 * Arguments:
4064 *
4065 * info pointer to device instance data
4066 *
4067 * Return Value: None
4068 */
4069static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4070{
4071 int i;
4072
4073 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
4074 if ( info->tx_holding_buffers[i].buffer ) {
4075 kfree(info->tx_holding_buffers[i].buffer);
4076 info->tx_holding_buffers[i].buffer=NULL;
4077 }
4078 }
4079
4080 info->get_tx_holding_index = 0;
4081 info->put_tx_holding_index = 0;
4082 info->tx_holding_count = 0;
4083
4084} /* end of mgsl_free_intermediate_txbuffer_memory() */
4085
4086
4087/*
4088 * load_next_tx_holding_buffer()
4089 *
4090 * attempts to load the next buffered tx request into the
4091 * tx dma buffers
4092 *
4093 * Arguments:
4094 *
4095 * info pointer to device instance data
4096 *
4097 * Return Value: 1 if next buffered tx request loaded
4098 * into adapter's tx dma buffer,
4099 * 0 otherwise
4100 */
4101static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4102{
4103 int ret = 0;
4104
4105 if ( info->tx_holding_count ) {
4106 /* determine if we have enough tx dma buffers
4107 * to accommodate the next tx frame
4108 */
4109 struct tx_holding_buffer *ptx =
4110 &info->tx_holding_buffers[info->get_tx_holding_index];
4111 int num_free = num_free_tx_dma_buffers(info);
4112 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4113 if ( ptx->buffer_size % DMABUFFERSIZE )
4114 ++num_needed;
4115
4116 if (num_needed <= num_free) {
4117 info->xmit_cnt = ptx->buffer_size;
4118 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4119
4120 --info->tx_holding_count;
4121 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4122 info->get_tx_holding_index=0;
4123
4124 /* restart transmit timer */
4125 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4126
4127 ret = 1;
4128 }
4129 }
4130
4131 return ret;
4132}
4133
4134/*
4135 * save_tx_buffer_request()
4136 *
4137 * attempt to store transmit frame request for later transmission
4138 *
4139 * Arguments:
4140 *
4141 * info pointer to device instance data
4142 * Buffer pointer to buffer containing frame to load
4143 * BufferSize size in bytes of frame in Buffer
4144 *
4145 * Return Value: 1 if able to store, 0 otherwise
4146 */
4147static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4148{
4149 struct tx_holding_buffer *ptx;
4150
4151 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4152 return 0; /* all buffers in use */
4153 }
4154
4155 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4156 ptx->buffer_size = BufferSize;
4157 memcpy( ptx->buffer, Buffer, BufferSize);
4158
4159 ++info->tx_holding_count;
4160 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4161 info->put_tx_holding_index=0;
4162
4163 return 1;
4164}
4165
4166static int mgsl_claim_resources(struct mgsl_struct *info)
4167{
4168 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4169 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4170 __FILE__,__LINE__,info->device_name, info->io_base);
4171 return -ENODEV;
4172 }
4173 info->io_addr_requested = 1;
4174
4175 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4176 info->device_name, info ) < 0 ) {
4177 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4178 __FILE__,__LINE__,info->device_name, info->irq_level );
4179 goto errout;
4180 }
4181 info->irq_requested = 1;
4182
4183 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4184 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4185 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4186 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4187 goto errout;
4188 }
4189 info->shared_mem_requested = 1;
4190 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4191 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4192 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4193 goto errout;
4194 }
4195 info->lcr_mem_requested = 1;
4196
4197 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4198 if (!info->memory_base) {
4199 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4200 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4201 goto errout;
4202 }
4203
4204 if ( !mgsl_memory_test(info) ) {
4205 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4206 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4207 goto errout;
4208 }
4209
4210 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4211 if (!info->lcr_base) {
4212 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4213 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4214 goto errout;
4215 }
4216
4217 } else {
4218 /* claim DMA channel */
4219
4220 if (request_dma(info->dma_level,info->device_name) < 0){
4221 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4222 __FILE__,__LINE__,info->device_name, info->dma_level );
4223 mgsl_release_resources( info );
4224 return -ENODEV;
4225 }
4226 info->dma_requested = 1;
4227
4228 /* ISA adapter uses bus master DMA */
4229 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4230 enable_dma(info->dma_level);
4231 }
4232
4233 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4234 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4235 __FILE__,__LINE__,info->device_name, info->dma_level );
4236 goto errout;
4237 }
4238
4239 return 0;
4240errout:
4241 mgsl_release_resources(info);
4242 return -ENODEV;
4243
4244} /* end of mgsl_claim_resources() */
4245
4246static void mgsl_release_resources(struct mgsl_struct *info)
4247{
4248 if ( debug_level >= DEBUG_LEVEL_INFO )
4249 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4250 __FILE__,__LINE__,info->device_name );
4251
4252 if ( info->irq_requested ) {
4253 free_irq(info->irq_level, info);
4254 info->irq_requested = 0;
4255 }
4256 if ( info->dma_requested ) {
4257 disable_dma(info->dma_level);
4258 free_dma(info->dma_level);
4259 info->dma_requested = 0;
4260 }
4261 mgsl_free_dma_buffers(info);
4262 mgsl_free_intermediate_rxbuffer_memory(info);
4263 mgsl_free_intermediate_txbuffer_memory(info);
4264
4265 if ( info->io_addr_requested ) {
4266 release_region(info->io_base,info->io_addr_size);
4267 info->io_addr_requested = 0;
4268 }
4269 if ( info->shared_mem_requested ) {
4270 release_mem_region(info->phys_memory_base,0x40000);
4271 info->shared_mem_requested = 0;
4272 }
4273 if ( info->lcr_mem_requested ) {
4274 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4275 info->lcr_mem_requested = 0;
4276 }
4277 if (info->memory_base){
4278 iounmap(info->memory_base);
4279 info->memory_base = NULL;
4280 }
4281 if (info->lcr_base){
4282 iounmap(info->lcr_base - info->lcr_offset);
4283 info->lcr_base = NULL;
4284 }
4285
4286 if ( debug_level >= DEBUG_LEVEL_INFO )
4287 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4288 __FILE__,__LINE__,info->device_name );
4289
4290} /* end of mgsl_release_resources() */
4291
4292/* mgsl_add_device()
4293 *
4294 * Add the specified device instance data structure to the
4295 * global linked list of devices and increment the device count.
4296 *
4297 * Arguments: info pointer to device instance data
4298 * Return Value: None
4299 */
4300static void mgsl_add_device( struct mgsl_struct *info )
4301{
4302 info->next_device = NULL;
4303 info->line = mgsl_device_count;
4304 sprintf(info->device_name,"ttySL%d",info->line);
4305
4306 if (info->line < MAX_TOTAL_DEVICES) {
4307 if (maxframe[info->line])
4308 info->max_frame_size = maxframe[info->line];
4309 info->dosyncppp = dosyncppp[info->line];
4310
4311 if (txdmabufs[info->line]) {
4312 info->num_tx_dma_buffers = txdmabufs[info->line];
4313 if (info->num_tx_dma_buffers < 1)
4314 info->num_tx_dma_buffers = 1;
4315 }
4316
4317 if (txholdbufs[info->line]) {
4318 info->num_tx_holding_buffers = txholdbufs[info->line];
4319 if (info->num_tx_holding_buffers < 1)
4320 info->num_tx_holding_buffers = 1;
4321 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4322 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4323 }
4324 }
4325
4326 mgsl_device_count++;
4327
4328 if ( !mgsl_device_list )
4329 mgsl_device_list = info;
4330 else {
4331 struct mgsl_struct *current_dev = mgsl_device_list;
4332 while( current_dev->next_device )
4333 current_dev = current_dev->next_device;
4334 current_dev->next_device = info;
4335 }
4336
4337 if ( info->max_frame_size < 4096 )
4338 info->max_frame_size = 4096;
4339 else if ( info->max_frame_size > 65535 )
4340 info->max_frame_size = 65535;
4341
4342 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4343 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4344 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4345 info->phys_memory_base, info->phys_lcr_base,
4346 info->max_frame_size );
4347 } else {
4348 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4349 info->device_name, info->io_base, info->irq_level, info->dma_level,
4350 info->max_frame_size );
4351 }
4352
4353#ifdef CONFIG_HDLC
4354 hdlcdev_init(info);
4355#endif
4356
4357} /* end of mgsl_add_device() */
4358
4359/* mgsl_allocate_device()
4360 *
4361 * Allocate and initialize a device instance structure
4362 *
4363 * Arguments: none
4364 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4365 */
4366static struct mgsl_struct* mgsl_allocate_device(void)
4367{
4368 struct mgsl_struct *info;
4369
4370 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4371 GFP_KERNEL);
4372
4373 if (!info) {
4374 printk("Error can't allocate device instance data\n");
4375 } else {
4376 memset(info, 0, sizeof(struct mgsl_struct));
4377 info->magic = MGSL_MAGIC;
4378 INIT_WORK(&info->task, mgsl_bh_handler, info);
4379 info->max_frame_size = 4096;
4380 info->close_delay = 5*HZ/10;
4381 info->closing_wait = 30*HZ;
4382 init_waitqueue_head(&info->open_wait);
4383 init_waitqueue_head(&info->close_wait);
4384 init_waitqueue_head(&info->status_event_wait_q);
4385 init_waitqueue_head(&info->event_wait_q);
4386 spin_lock_init(&info->irq_spinlock);
4387 spin_lock_init(&info->netlock);
4388 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4389 info->idle_mode = HDLC_TXIDLE_FLAGS;
4390 info->num_tx_dma_buffers = 1;
4391 info->num_tx_holding_buffers = 0;
4392 }
4393
4394 return info;
4395
4396} /* end of mgsl_allocate_device()*/
4397
4398static struct tty_operations mgsl_ops = {
4399 .open = mgsl_open,
4400 .close = mgsl_close,
4401 .write = mgsl_write,
4402 .put_char = mgsl_put_char,
4403 .flush_chars = mgsl_flush_chars,
4404 .write_room = mgsl_write_room,
4405 .chars_in_buffer = mgsl_chars_in_buffer,
4406 .flush_buffer = mgsl_flush_buffer,
4407 .ioctl = mgsl_ioctl,
4408 .throttle = mgsl_throttle,
4409 .unthrottle = mgsl_unthrottle,
4410 .send_xchar = mgsl_send_xchar,
4411 .break_ctl = mgsl_break,
4412 .wait_until_sent = mgsl_wait_until_sent,
4413 .read_proc = mgsl_read_proc,
4414 .set_termios = mgsl_set_termios,
4415 .stop = mgsl_stop,
4416 .start = mgsl_start,
4417 .hangup = mgsl_hangup,
4418 .tiocmget = tiocmget,
4419 .tiocmset = tiocmset,
4420};
4421
4422/*
4423 * perform tty device initialization
4424 */
4425static int mgsl_init_tty(void)
4426{
4427 int rc;
4428
4429 serial_driver = alloc_tty_driver(128);
4430 if (!serial_driver)
4431 return -ENOMEM;
4432
4433 serial_driver->owner = THIS_MODULE;
4434 serial_driver->driver_name = "synclink";
4435 serial_driver->name = "ttySL";
4436 serial_driver->major = ttymajor;
4437 serial_driver->minor_start = 64;
4438 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4439 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4440 serial_driver->init_termios = tty_std_termios;
4441 serial_driver->init_termios.c_cflag =
4442 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4443 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4444 tty_set_operations(serial_driver, &mgsl_ops);
4445 if ((rc = tty_register_driver(serial_driver)) < 0) {
4446 printk("%s(%d):Couldn't register serial driver\n",
4447 __FILE__,__LINE__);
4448 put_tty_driver(serial_driver);
4449 serial_driver = NULL;
4450 return rc;
4451 }
4452
4453 printk("%s %s, tty major#%d\n",
4454 driver_name, driver_version,
4455 serial_driver->major);
4456 return 0;
4457}
4458
4459/* enumerate user specified ISA adapters
4460 */
4461static void mgsl_enum_isa_devices(void)
4462{
4463 struct mgsl_struct *info;
4464 int i;
4465
4466 /* Check for user specified ISA devices */
4467
4468 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4469 if ( debug_level >= DEBUG_LEVEL_INFO )
4470 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4471 io[i], irq[i], dma[i] );
4472
4473 info = mgsl_allocate_device();
4474 if ( !info ) {
4475 /* error allocating device instance data */
4476 if ( debug_level >= DEBUG_LEVEL_ERROR )
4477 printk( "can't allocate device instance data.\n");
4478 continue;
4479 }
4480
4481 /* Copy user configuration info to device instance data */
4482 info->io_base = (unsigned int)io[i];
4483 info->irq_level = (unsigned int)irq[i];
4484 info->irq_level = irq_canonicalize(info->irq_level);
4485 info->dma_level = (unsigned int)dma[i];
4486 info->bus_type = MGSL_BUS_TYPE_ISA;
4487 info->io_addr_size = 16;
4488 info->irq_flags = 0;
4489
4490 mgsl_add_device( info );
4491 }
4492}
4493
4494static void synclink_cleanup(void)
4495{
4496 int rc;
4497 struct mgsl_struct *info;
4498 struct mgsl_struct *tmp;
4499
4500 printk("Unloading %s: %s\n", driver_name, driver_version);
4501
4502 if (serial_driver) {
4503 if ((rc = tty_unregister_driver(serial_driver)))
4504 printk("%s(%d) failed to unregister tty driver err=%d\n",
4505 __FILE__,__LINE__,rc);
4506 put_tty_driver(serial_driver);
4507 }
4508
4509 info = mgsl_device_list;
4510 while(info) {
4511#ifdef CONFIG_HDLC
4512 hdlcdev_exit(info);
4513#endif
4514 mgsl_release_resources(info);
4515 tmp = info;
4516 info = info->next_device;
4517 kfree(tmp);
4518 }
4519
4520 if (tmp_buf) {
4521 free_page((unsigned long) tmp_buf);
4522 tmp_buf = NULL;
4523 }
4524
4525 if (pci_registered)
4526 pci_unregister_driver(&synclink_pci_driver);
4527}
4528
4529static int __init synclink_init(void)
4530{
4531 int rc;
4532
4533 if (break_on_load) {
4534 mgsl_get_text_ptr();
4535 BREAKPOINT();
4536 }
4537
4538 printk("%s %s\n", driver_name, driver_version);
4539
4540 mgsl_enum_isa_devices();
4541 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4542 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4543 else
4544 pci_registered = 1;
4545
4546 if ((rc = mgsl_init_tty()) < 0)
4547 goto error;
4548
4549 return 0;
4550
4551error:
4552 synclink_cleanup();
4553 return rc;
4554}
4555
4556static void __exit synclink_exit(void)
4557{
4558 synclink_cleanup();
4559}
4560
4561module_init(synclink_init);
4562module_exit(synclink_exit);
4563
4564/*
4565 * usc_RTCmd()
4566 *
4567 * Issue a USC Receive/Transmit command to the
4568 * Channel Command/Address Register (CCAR).
4569 *
4570 * Notes:
4571 *
4572 * The command is encoded in the most significant 5 bits <15..11>
4573 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4574 * and Bits <6..0> must be written as zeros.
4575 *
4576 * Arguments:
4577 *
4578 * info pointer to device information structure
4579 * Cmd command mask (use symbolic macros)
4580 *
4581 * Return Value:
4582 *
4583 * None
4584 */
4585static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4586{
4587 /* output command to CCAR in bits <15..11> */
4588 /* preserve bits <10..7>, bits <6..0> must be zero */
4589
4590 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4591
4592 /* Read to flush write to CCAR */
4593 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4594 inw( info->io_base + CCAR );
4595
4596} /* end of usc_RTCmd() */
4597
4598/*
4599 * usc_DmaCmd()
4600 *
4601 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4602 *
4603 * Arguments:
4604 *
4605 * info pointer to device information structure
4606 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4607 *
4608 * Return Value:
4609 *
4610 * None
4611 */
4612static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4613{
4614 /* write command mask to DCAR */
4615 outw( Cmd + info->mbre_bit, info->io_base );
4616
4617 /* Read to flush write to DCAR */
4618 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4619 inw( info->io_base );
4620
4621} /* end of usc_DmaCmd() */
4622
4623/*
4624 * usc_OutDmaReg()
4625 *
4626 * Write a 16-bit value to a USC DMA register
4627 *
4628 * Arguments:
4629 *
4630 * info pointer to device info structure
4631 * RegAddr register address (number) for write
4632 * RegValue 16-bit value to write to register
4633 *
4634 * Return Value:
4635 *
4636 * None
4637 *
4638 */
4639static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4640{
4641 /* Note: The DCAR is located at the adapter base address */
4642 /* Note: must preserve state of BIT8 in DCAR */
4643
4644 outw( RegAddr + info->mbre_bit, info->io_base );
4645 outw( RegValue, info->io_base );
4646
4647 /* Read to flush write to DCAR */
4648 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4649 inw( info->io_base );
4650
4651} /* end of usc_OutDmaReg() */
4652
4653/*
4654 * usc_InDmaReg()
4655 *
4656 * Read a 16-bit value from a DMA register
4657 *
4658 * Arguments:
4659 *
4660 * info pointer to device info structure
4661 * RegAddr register address (number) to read from
4662 *
4663 * Return Value:
4664 *
4665 * The 16-bit value read from register
4666 *
4667 */
4668static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4669{
4670 /* Note: The DCAR is located at the adapter base address */
4671 /* Note: must preserve state of BIT8 in DCAR */
4672
4673 outw( RegAddr + info->mbre_bit, info->io_base );
4674 return inw( info->io_base );
4675
4676} /* end of usc_InDmaReg() */
4677
4678/*
4679 *
4680 * usc_OutReg()
4681 *
4682 * Write a 16-bit value to a USC serial channel register
4683 *
4684 * Arguments:
4685 *
4686 * info pointer to device info structure
4687 * RegAddr register address (number) to write to
4688 * RegValue 16-bit value to write to register
4689 *
4690 * Return Value:
4691 *
4692 * None
4693 *
4694 */
4695static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4696{
4697 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4698 outw( RegValue, info->io_base + CCAR );
4699
4700 /* Read to flush write to CCAR */
4701 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4702 inw( info->io_base + CCAR );
4703
4704} /* end of usc_OutReg() */
4705
4706/*
4707 * usc_InReg()
4708 *
4709 * Reads a 16-bit value from a USC serial channel register
4710 *
4711 * Arguments:
4712 *
4713 * info pointer to device extension
4714 * RegAddr register address (number) to read from
4715 *
4716 * Return Value:
4717 *
4718 * 16-bit value read from register
4719 */
4720static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4721{
4722 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4723 return inw( info->io_base + CCAR );
4724
4725} /* end of usc_InReg() */
4726
4727/* usc_set_sdlc_mode()
4728 *
4729 * Set up the adapter for SDLC DMA communications.
4730 *
4731 * Arguments: info pointer to device instance data
4732 * Return Value: NONE
4733 */
4734static void usc_set_sdlc_mode( struct mgsl_struct *info )
4735{
4736 u16 RegValue;
4737 int PreSL1660;
4738
4739 /*
4740 * determine if the IUSC on the adapter is pre-SL1660. If
4741 * not, take advantage of the UnderWait feature of more
4742 * modern chips. If an underrun occurs and this bit is set,
4743 * the transmitter will idle the programmed idle pattern
4744 * until the driver has time to service the underrun. Otherwise,
4745 * the dma controller may get the cycles previously requested
4746 * and begin transmitting queued tx data.
4747 */
4748 usc_OutReg(info,TMCR,0x1f);
4749 RegValue=usc_InReg(info,TMDR);
4750 if ( RegValue == IUSC_PRE_SL1660 )
4751 PreSL1660 = 1;
4752 else
4753 PreSL1660 = 0;
4754
4755
4756 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4757 {
4758 /*
4759 ** Channel Mode Register (CMR)
4760 **
4761 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4762 ** <13> 0 0 = Transmit Disabled (initially)
4763 ** <12> 0 1 = Consecutive Idles share common 0
4764 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4765 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4766 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4767 **
4768 ** 1000 1110 0000 0110 = 0x8e06
4769 */
4770 RegValue = 0x8e06;
4771
4772 /*--------------------------------------------------
4773 * ignore user options for UnderRun Actions and
4774 * preambles
4775 *--------------------------------------------------*/
4776 }
4777 else
4778 {
4779 /* Channel mode Register (CMR)
4780 *
4781 * <15..14> 00 Tx Sub modes, Underrun Action
4782 * <13> 0 1 = Send Preamble before opening flag
4783 * <12> 0 1 = Consecutive Idles share common 0
4784 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4785 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4786 * <3..0> 0110 Receiver mode = HDLC/SDLC
4787 *
4788 * 0000 0110 0000 0110 = 0x0606
4789 */
4790 if (info->params.mode == MGSL_MODE_RAW) {
4791 RegValue = 0x0001; /* Set Receive mode = external sync */
4792
4793 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4794 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4795
4796 /*
4797 * TxSubMode:
4798 * CMR <15> 0 Don't send CRC on Tx Underrun
4799 * CMR <14> x undefined
4800 * CMR <13> 0 Send preamble before openning sync
4801 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4802 *
4803 * TxMode:
4804 * CMR <11-8) 0100 MonoSync
4805 *
4806 * 0x00 0100 xxxx xxxx 04xx
4807 */
4808 RegValue |= 0x0400;
4809 }
4810 else {
4811
4812 RegValue = 0x0606;
4813
4814 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4815 RegValue |= BIT14;
4816 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4817 RegValue |= BIT15;
4818 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4819 RegValue |= BIT15 + BIT14;
4820 }
4821
4822 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4823 RegValue |= BIT13;
4824 }
4825
4826 if ( info->params.mode == MGSL_MODE_HDLC &&
4827 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4828 RegValue |= BIT12;
4829
4830 if ( info->params.addr_filter != 0xff )
4831 {
4832 /* set up receive address filtering */
4833 usc_OutReg( info, RSR, info->params.addr_filter );
4834 RegValue |= BIT4;
4835 }
4836
4837 usc_OutReg( info, CMR, RegValue );
4838 info->cmr_value = RegValue;
4839
4840 /* Receiver mode Register (RMR)
4841 *
4842 * <15..13> 000 encoding
4843 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4844 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4845 * <9> 0 1 = Include Receive chars in CRC
4846 * <8> 1 1 = Use Abort/PE bit as abort indicator
4847 * <7..6> 00 Even parity
4848 * <5> 0 parity disabled
4849 * <4..2> 000 Receive Char Length = 8 bits
4850 * <1..0> 00 Disable Receiver
4851 *
4852 * 0000 0101 0000 0000 = 0x0500
4853 */
4854
4855 RegValue = 0x0500;
4856
4857 switch ( info->params.encoding ) {
4858 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4859 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4860 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4861 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4862 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4863 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4864 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4865 }
4866
4867 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4868 RegValue |= BIT9;
4869 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4870 RegValue |= ( BIT12 | BIT10 | BIT9 );
4871
4872 usc_OutReg( info, RMR, RegValue );
4873
4874 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4875 /* When an opening flag of an SDLC frame is recognized the */
4876 /* Receive Character count (RCC) is loaded with the value in */
4877 /* RCLR. The RCC is decremented for each received byte. The */
4878 /* value of RCC is stored after the closing flag of the frame */
4879 /* allowing the frame size to be computed. */
4880
4881 usc_OutReg( info, RCLR, RCLRVALUE );
4882
4883 usc_RCmd( info, RCmd_SelectRicrdma_level );
4884
4885 /* Receive Interrupt Control Register (RICR)
4886 *
4887 * <15..8> ? RxFIFO DMA Request Level
4888 * <7> 0 Exited Hunt IA (Interrupt Arm)
4889 * <6> 0 Idle Received IA
4890 * <5> 0 Break/Abort IA
4891 * <4> 0 Rx Bound IA
4892 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4893 * <2> 0 Abort/PE IA
4894 * <1> 1 Rx Overrun IA
4895 * <0> 0 Select TC0 value for readback
4896 *
4897 * 0000 0000 0000 1000 = 0x000a
4898 */
4899
4900 /* Carry over the Exit Hunt and Idle Received bits */
4901 /* in case they have been armed by usc_ArmEvents. */
4902
4903 RegValue = usc_InReg( info, RICR ) & 0xc0;
4904
4905 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4906 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4907 else
4908 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4909
4910 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4911
4912 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4913 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4914
4915 /* Transmit mode Register (TMR)
4916 *
4917 * <15..13> 000 encoding
4918 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4919 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4920 * <9> 0 1 = Tx CRC Enabled
4921 * <8> 0 1 = Append CRC to end of transmit frame
4922 * <7..6> 00 Transmit parity Even
4923 * <5> 0 Transmit parity Disabled
4924 * <4..2> 000 Tx Char Length = 8 bits
4925 * <1..0> 00 Disable Transmitter
4926 *
4927 * 0000 0100 0000 0000 = 0x0400
4928 */
4929
4930 RegValue = 0x0400;
4931
4932 switch ( info->params.encoding ) {
4933 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4934 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4935 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4936 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4937 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4938 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4939 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4940 }
4941
4942 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4943 RegValue |= BIT9 + BIT8;
4944 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4945 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4946
4947 usc_OutReg( info, TMR, RegValue );
4948
4949 usc_set_txidle( info );
4950
4951
4952 usc_TCmd( info, TCmd_SelectTicrdma_level );
4953
4954 /* Transmit Interrupt Control Register (TICR)
4955 *
4956 * <15..8> ? Transmit FIFO DMA Level
4957 * <7> 0 Present IA (Interrupt Arm)
4958 * <6> 0 Idle Sent IA
4959 * <5> 1 Abort Sent IA
4960 * <4> 1 EOF/EOM Sent IA
4961 * <3> 0 CRC Sent IA
4962 * <2> 1 1 = Wait for SW Trigger to Start Frame
4963 * <1> 1 Tx Underrun IA
4964 * <0> 0 TC0 constant on read back
4965 *
4966 * 0000 0000 0011 0110 = 0x0036
4967 */
4968
4969 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4970 usc_OutReg( info, TICR, 0x0736 );
4971 else
4972 usc_OutReg( info, TICR, 0x1436 );
4973
4974 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4975 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4976
4977 /*
4978 ** Transmit Command/Status Register (TCSR)
4979 **
4980 ** <15..12> 0000 TCmd
4981 ** <11> 0/1 UnderWait
4982 ** <10..08> 000 TxIdle
4983 ** <7> x PreSent
4984 ** <6> x IdleSent
4985 ** <5> x AbortSent
4986 ** <4> x EOF/EOM Sent
4987 ** <3> x CRC Sent
4988 ** <2> x All Sent
4989 ** <1> x TxUnder
4990 ** <0> x TxEmpty
4991 **
4992 ** 0000 0000 0000 0000 = 0x0000
4993 */
4994 info->tcsr_value = 0;
4995
4996 if ( !PreSL1660 )
4997 info->tcsr_value |= TCSR_UNDERWAIT;
4998
4999 usc_OutReg( info, TCSR, info->tcsr_value );
5000
5001 /* Clock mode Control Register (CMCR)
5002 *
5003 * <15..14> 00 counter 1 Source = Disabled
5004 * <13..12> 00 counter 0 Source = Disabled
5005 * <11..10> 11 BRG1 Input is TxC Pin
5006 * <9..8> 11 BRG0 Input is TxC Pin
5007 * <7..6> 01 DPLL Input is BRG1 Output
5008 * <5..3> XXX TxCLK comes from Port 0
5009 * <2..0> XXX RxCLK comes from Port 1
5010 *
5011 * 0000 1111 0111 0111 = 0x0f77
5012 */
5013
5014 RegValue = 0x0f40;
5015
5016 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
5017 RegValue |= 0x0003; /* RxCLK from DPLL */
5018 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
5019 RegValue |= 0x0004; /* RxCLK from BRG0 */
5020 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
5021 RegValue |= 0x0006; /* RxCLK from TXC Input */
5022 else
5023 RegValue |= 0x0007; /* RxCLK from Port1 */
5024
5025 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
5026 RegValue |= 0x0018; /* TxCLK from DPLL */
5027 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
5028 RegValue |= 0x0020; /* TxCLK from BRG0 */
5029 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
5030 RegValue |= 0x0038; /* RxCLK from TXC Input */
5031 else
5032 RegValue |= 0x0030; /* TxCLK from Port0 */
5033
5034 usc_OutReg( info, CMCR, RegValue );
5035
5036
5037 /* Hardware Configuration Register (HCR)
5038 *
5039 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
5040 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
5041 * <12> 0 CVOK:0=report code violation in biphase
5042 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5043 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5044 * <7..6> 00 reserved
5045 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5046 * <4> X BRG1 Enable
5047 * <3..2> 00 reserved
5048 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5049 * <0> 0 BRG0 Enable
5050 */
5051
5052 RegValue = 0x0000;
5053
5054 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5055 u32 XtalSpeed;
5056 u32 DpllDivisor;
5057 u16 Tc;
5058
5059 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5060 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5061
5062 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5063 XtalSpeed = 11059200;
5064 else
5065 XtalSpeed = 14745600;
5066
5067 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5068 DpllDivisor = 16;
5069 RegValue |= BIT10;
5070 }
5071 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5072 DpllDivisor = 8;
5073 RegValue |= BIT11;
5074 }
5075 else
5076 DpllDivisor = 32;
5077
5078 /* Tc = (Xtal/Speed) - 1 */
5079 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5080 /* then rounding up gives a more precise time constant. Instead */
5081 /* of rounding up and then subtracting 1 we just don't subtract */
5082 /* the one in this case. */
5083
5084 /*--------------------------------------------------
5085 * ejz: for DPLL mode, application should use the
5086 * same clock speed as the partner system, even
5087 * though clocking is derived from the input RxData.
5088 * In case the user uses a 0 for the clock speed,
5089 * default to 0xffffffff and don't try to divide by
5090 * zero
5091 *--------------------------------------------------*/
5092 if ( info->params.clock_speed )
5093 {
5094 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5095 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5096 / info->params.clock_speed) )
5097 Tc--;
5098 }
5099 else
5100 Tc = -1;
5101
5102
5103 /* Write 16-bit Time Constant for BRG1 */
5104 usc_OutReg( info, TC1R, Tc );
5105
5106 RegValue |= BIT4; /* enable BRG1 */
5107
5108 switch ( info->params.encoding ) {
5109 case HDLC_ENCODING_NRZ:
5110 case HDLC_ENCODING_NRZB:
5111 case HDLC_ENCODING_NRZI_MARK:
5112 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5113 case HDLC_ENCODING_BIPHASE_MARK:
5114 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5115 case HDLC_ENCODING_BIPHASE_LEVEL:
5116 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5117 }
5118 }
5119
5120 usc_OutReg( info, HCR, RegValue );
5121
5122
5123 /* Channel Control/status Register (CCSR)
5124 *
5125 * <15> X RCC FIFO Overflow status (RO)
5126 * <14> X RCC FIFO Not Empty status (RO)
5127 * <13> 0 1 = Clear RCC FIFO (WO)
5128 * <12> X DPLL Sync (RW)
5129 * <11> X DPLL 2 Missed Clocks status (RO)
5130 * <10> X DPLL 1 Missed Clock status (RO)
5131 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5132 * <7> X SDLC Loop On status (RO)
5133 * <6> X SDLC Loop Send status (RO)
5134 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5135 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5136 * <1..0> 00 reserved
5137 *
5138 * 0000 0000 0010 0000 = 0x0020
5139 */
5140
5141 usc_OutReg( info, CCSR, 0x1020 );
5142
5143
5144 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5145 usc_OutReg( info, SICR,
5146 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5147 }
5148
5149
5150 /* enable Master Interrupt Enable bit (MIE) */
5151 usc_EnableMasterIrqBit( info );
5152
5153 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5154 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5155
5156 /* arm RCC underflow interrupt */
5157 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5158 usc_EnableInterrupts(info, MISC);
5159
5160 info->mbre_bit = 0;
5161 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5162 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5163 info->mbre_bit = BIT8;
5164 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5165
5166 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5167 /* Enable DMAEN (Port 7, Bit 14) */
5168 /* This connects the DMA request signal to the ISA bus */
5169 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5170 }
5171
5172 /* DMA Control Register (DCR)
5173 *
5174 * <15..14> 10 Priority mode = Alternating Tx/Rx
5175 * 01 Rx has priority
5176 * 00 Tx has priority
5177 *
5178 * <13> 1 Enable Priority Preempt per DCR<15..14>
5179 * (WARNING DCR<11..10> must be 00 when this is 1)
5180 * 0 Choose activate channel per DCR<11..10>
5181 *
5182 * <12> 0 Little Endian for Array/List
5183 * <11..10> 00 Both Channels can use each bus grant
5184 * <9..6> 0000 reserved
5185 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5186 * <4> 0 1 = drive D/C and S/D pins
5187 * <3> 1 1 = Add one wait state to all DMA cycles.
5188 * <2> 0 1 = Strobe /UAS on every transfer.
5189 * <1..0> 11 Addr incrementing only affects LS24 bits
5190 *
5191 * 0110 0000 0000 1011 = 0x600b
5192 */
5193
5194 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5195 /* PCI adapter does not need DMA wait state */
5196 usc_OutDmaReg( info, DCR, 0xa00b );
5197 }
5198 else
5199 usc_OutDmaReg( info, DCR, 0x800b );
5200
5201
5202 /* Receive DMA mode Register (RDMR)
5203 *
5204 * <15..14> 11 DMA mode = Linked List Buffer mode
5205 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5206 * <12> 1 Clear count of List Entry after fetching
5207 * <11..10> 00 Address mode = Increment
5208 * <9> 1 Terminate Buffer on RxBound
5209 * <8> 0 Bus Width = 16bits
5210 * <7..0> ? status Bits (write as 0s)
5211 *
5212 * 1111 0010 0000 0000 = 0xf200
5213 */
5214
5215 usc_OutDmaReg( info, RDMR, 0xf200 );
5216
5217
5218 /* Transmit DMA mode Register (TDMR)
5219 *
5220 * <15..14> 11 DMA mode = Linked List Buffer mode
5221 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5222 * <12> 1 Clear count of List Entry after fetching
5223 * <11..10> 00 Address mode = Increment
5224 * <9> 1 Terminate Buffer on end of frame
5225 * <8> 0 Bus Width = 16bits
5226 * <7..0> ? status Bits (Read Only so write as 0)
5227 *
5228 * 1111 0010 0000 0000 = 0xf200
5229 */
5230
5231 usc_OutDmaReg( info, TDMR, 0xf200 );
5232
5233
5234 /* DMA Interrupt Control Register (DICR)
5235 *
5236 * <15> 1 DMA Interrupt Enable
5237 * <14> 0 1 = Disable IEO from USC
5238 * <13> 0 1 = Don't provide vector during IntAck
5239 * <12> 1 1 = Include status in Vector
5240 * <10..2> 0 reserved, Must be 0s
5241 * <1> 0 1 = Rx DMA Interrupt Enabled
5242 * <0> 0 1 = Tx DMA Interrupt Enabled
5243 *
5244 * 1001 0000 0000 0000 = 0x9000
5245 */
5246
5247 usc_OutDmaReg( info, DICR, 0x9000 );
5248
5249 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5250 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5251 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5252
5253 /* Channel Control Register (CCR)
5254 *
5255 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5256 * <13> 0 Trigger Tx on SW Command Disabled
5257 * <12> 0 Flag Preamble Disabled
5258 * <11..10> 00 Preamble Length
5259 * <9..8> 00 Preamble Pattern
5260 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5261 * <5> 0 Trigger Rx on SW Command Disabled
5262 * <4..0> 0 reserved
5263 *
5264 * 1000 0000 1000 0000 = 0x8080
5265 */
5266
5267 RegValue = 0x8080;
5268
5269 switch ( info->params.preamble_length ) {
5270 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5271 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5272 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5273 }
5274
5275 switch ( info->params.preamble ) {
5276 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5277 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5278 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5279 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5280 }
5281
5282 usc_OutReg( info, CCR, RegValue );
5283
5284
5285 /*
5286 * Burst/Dwell Control Register
5287 *
5288 * <15..8> 0x20 Maximum number of transfers per bus grant
5289 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5290 */
5291
5292 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5293 /* don't limit bus occupancy on PCI adapter */
5294 usc_OutDmaReg( info, BDCR, 0x0000 );
5295 }
5296 else
5297 usc_OutDmaReg( info, BDCR, 0x2000 );
5298
5299 usc_stop_transmitter(info);
5300 usc_stop_receiver(info);
5301
5302} /* end of usc_set_sdlc_mode() */
5303
5304/* usc_enable_loopback()
5305 *
5306 * Set the 16C32 for internal loopback mode.
5307 * The TxCLK and RxCLK signals are generated from the BRG0 and
5308 * the TxD is looped back to the RxD internally.
5309 *
5310 * Arguments: info pointer to device instance data
5311 * enable 1 = enable loopback, 0 = disable
5312 * Return Value: None
5313 */
5314static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5315{
5316 if (enable) {
5317 /* blank external TXD output */
5318 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5319
5320 /* Clock mode Control Register (CMCR)
5321 *
5322 * <15..14> 00 counter 1 Disabled
5323 * <13..12> 00 counter 0 Disabled
5324 * <11..10> 11 BRG1 Input is TxC Pin
5325 * <9..8> 11 BRG0 Input is TxC Pin
5326 * <7..6> 01 DPLL Input is BRG1 Output
5327 * <5..3> 100 TxCLK comes from BRG0
5328 * <2..0> 100 RxCLK comes from BRG0
5329 *
5330 * 0000 1111 0110 0100 = 0x0f64
5331 */
5332
5333 usc_OutReg( info, CMCR, 0x0f64 );
5334
5335 /* Write 16-bit Time Constant for BRG0 */
5336 /* use clock speed if available, otherwise use 8 for diagnostics */
5337 if (info->params.clock_speed) {
5338 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5339 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5340 else
5341 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5342 } else
5343 usc_OutReg(info, TC0R, (u16)8);
5344
5345 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5346 mode = Continuous Set Bit 0 to enable BRG0. */
5347 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5348
5349 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5350 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5351
5352 /* set Internal Data loopback mode */
5353 info->loopback_bits = 0x300;
5354 outw( 0x0300, info->io_base + CCAR );
5355 } else {
5356 /* enable external TXD output */
5357 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5358
5359 /* clear Internal Data loopback mode */
5360 info->loopback_bits = 0;
5361 outw( 0,info->io_base + CCAR );
5362 }
5363
5364} /* end of usc_enable_loopback() */
5365
5366/* usc_enable_aux_clock()
5367 *
5368 * Enabled the AUX clock output at the specified frequency.
5369 *
5370 * Arguments:
5371 *
5372 * info pointer to device extension
5373 * data_rate data rate of clock in bits per second
5374 * A data rate of 0 disables the AUX clock.
5375 *
5376 * Return Value: None
5377 */
5378static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5379{
5380 u32 XtalSpeed;
5381 u16 Tc;
5382
5383 if ( data_rate ) {
5384 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5385 XtalSpeed = 11059200;
5386 else
5387 XtalSpeed = 14745600;
5388
5389
5390 /* Tc = (Xtal/Speed) - 1 */
5391 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5392 /* then rounding up gives a more precise time constant. Instead */
5393 /* of rounding up and then subtracting 1 we just don't subtract */
5394 /* the one in this case. */
5395
5396
5397 Tc = (u16)(XtalSpeed/data_rate);
5398 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5399 Tc--;
5400
5401 /* Write 16-bit Time Constant for BRG0 */
5402 usc_OutReg( info, TC0R, Tc );
5403
5404 /*
5405 * Hardware Configuration Register (HCR)
5406 * Clear Bit 1, BRG0 mode = Continuous
5407 * Set Bit 0 to enable BRG0.
5408 */
5409
5410 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5411
5412 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5413 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5414 } else {
5415 /* data rate == 0 so turn off BRG0 */
5416 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5417 }
5418
5419} /* end of usc_enable_aux_clock() */
5420
5421/*
5422 *
5423 * usc_process_rxoverrun_sync()
5424 *
5425 * This function processes a receive overrun by resetting the
5426 * receive DMA buffers and issuing a Purge Rx FIFO command
5427 * to allow the receiver to continue receiving.
5428 *
5429 * Arguments:
5430 *
5431 * info pointer to device extension
5432 *
5433 * Return Value: None
5434 */
5435static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5436{
5437 int start_index;
5438 int end_index;
5439 int frame_start_index;
5440 int start_of_frame_found = FALSE;
5441 int end_of_frame_found = FALSE;
5442 int reprogram_dma = FALSE;
5443
5444 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5445 u32 phys_addr;
5446
5447 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5448 usc_RCmd( info, RCmd_EnterHuntmode );
5449 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5450
5451 /* CurrentRxBuffer points to the 1st buffer of the next */
5452 /* possibly available receive frame. */
5453
5454 frame_start_index = start_index = end_index = info->current_rx_buffer;
5455
5456 /* Search for an unfinished string of buffers. This means */
5457 /* that a receive frame started (at least one buffer with */
5458 /* count set to zero) but there is no terminiting buffer */
5459 /* (status set to non-zero). */
5460
5461 while( !buffer_list[end_index].count )
5462 {
5463 /* Count field has been reset to zero by 16C32. */
5464 /* This buffer is currently in use. */
5465
5466 if ( !start_of_frame_found )
5467 {
5468 start_of_frame_found = TRUE;
5469 frame_start_index = end_index;
5470 end_of_frame_found = FALSE;
5471 }
5472
5473 if ( buffer_list[end_index].status )
5474 {
5475 /* Status field has been set by 16C32. */
5476 /* This is the last buffer of a received frame. */
5477
5478 /* We want to leave the buffers for this frame intact. */
5479 /* Move on to next possible frame. */
5480
5481 start_of_frame_found = FALSE;
5482 end_of_frame_found = TRUE;
5483 }
5484
5485 /* advance to next buffer entry in linked list */
5486 end_index++;
5487 if ( end_index == info->rx_buffer_count )
5488 end_index = 0;
5489
5490 if ( start_index == end_index )
5491 {
5492 /* The entire list has been searched with all Counts == 0 and */
5493 /* all Status == 0. The receive buffers are */
5494 /* completely screwed, reset all receive buffers! */
5495 mgsl_reset_rx_dma_buffers( info );
5496 frame_start_index = 0;
5497 start_of_frame_found = FALSE;
5498 reprogram_dma = TRUE;
5499 break;
5500 }
5501 }
5502
5503 if ( start_of_frame_found && !end_of_frame_found )
5504 {
5505 /* There is an unfinished string of receive DMA buffers */
5506 /* as a result of the receiver overrun. */
5507
5508 /* Reset the buffers for the unfinished frame */
5509 /* and reprogram the receive DMA controller to start */
5510 /* at the 1st buffer of unfinished frame. */
5511
5512 start_index = frame_start_index;
5513
5514 do
5515 {
5516 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5517
5518 /* Adjust index for wrap around. */
5519 if ( start_index == info->rx_buffer_count )
5520 start_index = 0;
5521
5522 } while( start_index != end_index );
5523
5524 reprogram_dma = TRUE;
5525 }
5526
5527 if ( reprogram_dma )
5528 {
5529 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5530 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5531 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5532
5533 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5534
5535 /* This empties the receive FIFO and loads the RCC with RCLR */
5536 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5537
5538 /* program 16C32 with physical address of 1st DMA buffer entry */
5539 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5540 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5541 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5542
5543 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5544 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5545 usc_EnableInterrupts( info, RECEIVE_STATUS );
5546
5547 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5548 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5549
5550 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5551 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5552 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5553 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5554 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5555 else
5556 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5557 }
5558 else
5559 {
5560 /* This empties the receive FIFO and loads the RCC with RCLR */
5561 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5562 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5563 }
5564
5565} /* end of usc_process_rxoverrun_sync() */
5566
5567/* usc_stop_receiver()
5568 *
5569 * Disable USC receiver
5570 *
5571 * Arguments: info pointer to device instance data
5572 * Return Value: None
5573 */
5574static void usc_stop_receiver( struct mgsl_struct *info )
5575{
5576 if (debug_level >= DEBUG_LEVEL_ISR)
5577 printk("%s(%d):usc_stop_receiver(%s)\n",
5578 __FILE__,__LINE__, info->device_name );
5579
5580 /* Disable receive DMA channel. */
5581 /* This also disables receive DMA channel interrupts */
5582 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5583
5584 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5585 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5586 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5587
5588 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5589
5590 /* This empties the receive FIFO and loads the RCC with RCLR */
5591 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5592 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5593
5594 info->rx_enabled = 0;
5595 info->rx_overflow = 0;
5596 info->rx_rcc_underrun = 0;
5597
5598} /* end of stop_receiver() */
5599
5600/* usc_start_receiver()
5601 *
5602 * Enable the USC receiver
5603 *
5604 * Arguments: info pointer to device instance data
5605 * Return Value: None
5606 */
5607static void usc_start_receiver( struct mgsl_struct *info )
5608{
5609 u32 phys_addr;
5610
5611 if (debug_level >= DEBUG_LEVEL_ISR)
5612 printk("%s(%d):usc_start_receiver(%s)\n",
5613 __FILE__,__LINE__, info->device_name );
5614
5615 mgsl_reset_rx_dma_buffers( info );
5616 usc_stop_receiver( info );
5617
5618 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5619 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5620
5621 if ( info->params.mode == MGSL_MODE_HDLC ||
5622 info->params.mode == MGSL_MODE_RAW ) {
5623 /* DMA mode Transfers */
5624 /* Program the DMA controller. */
5625 /* Enable the DMA controller end of buffer interrupt. */
5626
5627 /* program 16C32 with physical address of 1st DMA buffer entry */
5628 phys_addr = info->rx_buffer_list[0].phys_entry;
5629 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5630 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5631
5632 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5633 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5634 usc_EnableInterrupts( info, RECEIVE_STATUS );
5635
5636 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5637 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5638
5639 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5640 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5641 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5642 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5643 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5644 else
5645 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5646 } else {
5647 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5648 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5649 usc_EnableInterrupts(info, RECEIVE_DATA);
5650
5651 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5652 usc_RCmd( info, RCmd_EnterHuntmode );
5653
5654 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5655 }
5656
5657 usc_OutReg( info, CCSR, 0x1020 );
5658
5659 info->rx_enabled = 1;
5660
5661} /* end of usc_start_receiver() */
5662
5663/* usc_start_transmitter()
5664 *
5665 * Enable the USC transmitter and send a transmit frame if
5666 * one is loaded in the DMA buffers.
5667 *
5668 * Arguments: info pointer to device instance data
5669 * Return Value: None
5670 */
5671static void usc_start_transmitter( struct mgsl_struct *info )
5672{
5673 u32 phys_addr;
5674 unsigned int FrameSize;
5675
5676 if (debug_level >= DEBUG_LEVEL_ISR)
5677 printk("%s(%d):usc_start_transmitter(%s)\n",
5678 __FILE__,__LINE__, info->device_name );
5679
5680 if ( info->xmit_cnt ) {
5681
5682 /* If auto RTS enabled and RTS is inactive, then assert */
5683 /* RTS and set a flag indicating that the driver should */
5684 /* negate RTS when the transmission completes. */
5685
5686 info->drop_rts_on_tx_done = 0;
5687
5688 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5689 usc_get_serial_signals( info );
5690 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5691 info->serial_signals |= SerialSignal_RTS;
5692 usc_set_serial_signals( info );
5693 info->drop_rts_on_tx_done = 1;
5694 }
5695 }
5696
5697
5698 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5699 if ( !info->tx_active ) {
5700 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5701 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5702 usc_EnableInterrupts(info, TRANSMIT_DATA);
5703 usc_load_txfifo(info);
5704 }
5705 } else {
5706 /* Disable transmit DMA controller while programming. */
5707 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5708
5709 /* Transmit DMA buffer is loaded, so program USC */
5710 /* to send the frame contained in the buffers. */
5711
5712 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5713
5714 /* if operating in Raw sync mode, reset the rcc component
5715 * of the tx dma buffer entry, otherwise, the serial controller
5716 * will send a closing sync char after this count.
5717 */
5718 if ( info->params.mode == MGSL_MODE_RAW )
5719 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5720
5721 /* Program the Transmit Character Length Register (TCLR) */
5722 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5723 usc_OutReg( info, TCLR, (u16)FrameSize );
5724
5725 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5726
5727 /* Program the address of the 1st DMA Buffer Entry in linked list */
5728 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5729 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5730 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5731
5732 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5733 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5734 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5735
5736 if ( info->params.mode == MGSL_MODE_RAW &&
5737 info->num_tx_dma_buffers > 1 ) {
5738 /* When running external sync mode, attempt to 'stream' transmit */
5739 /* by filling tx dma buffers as they become available. To do this */
5740 /* we need to enable Tx DMA EOB Status interrupts : */
5741 /* */
5742 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5743 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5744
5745 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5746 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5747 }
5748
5749 /* Initialize Transmit DMA Channel */
5750 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5751
5752 usc_TCmd( info, TCmd_SendFrame );
5753
5754 info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
5755 add_timer(&info->tx_timer);
5756 }
5757 info->tx_active = 1;
5758 }
5759
5760 if ( !info->tx_enabled ) {
5761 info->tx_enabled = 1;
5762 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5763 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5764 else
5765 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5766 }
5767
5768} /* end of usc_start_transmitter() */
5769
5770/* usc_stop_transmitter()
5771 *
5772 * Stops the transmitter and DMA
5773 *
5774 * Arguments: info pointer to device isntance data
5775 * Return Value: None
5776 */
5777static void usc_stop_transmitter( struct mgsl_struct *info )
5778{
5779 if (debug_level >= DEBUG_LEVEL_ISR)
5780 printk("%s(%d):usc_stop_transmitter(%s)\n",
5781 __FILE__,__LINE__, info->device_name );
5782
5783 del_timer(&info->tx_timer);
5784
5785 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5786 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5787 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5788
5789 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5790 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5791 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5792
5793 info->tx_enabled = 0;
5794 info->tx_active = 0;
5795
5796} /* end of usc_stop_transmitter() */
5797
5798/* usc_load_txfifo()
5799 *
5800 * Fill the transmit FIFO until the FIFO is full or
5801 * there is no more data to load.
5802 *
5803 * Arguments: info pointer to device extension (instance data)
5804 * Return Value: None
5805 */
5806static void usc_load_txfifo( struct mgsl_struct *info )
5807{
5808 int Fifocount;
5809 u8 TwoBytes[2];
5810
5811 if ( !info->xmit_cnt && !info->x_char )
5812 return;
5813
5814 /* Select transmit FIFO status readback in TICR */
5815 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5816
5817 /* load the Transmit FIFO until FIFOs full or all data sent */
5818
5819 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5820 /* there is more space in the transmit FIFO and */
5821 /* there is more data in transmit buffer */
5822
5823 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5824 /* write a 16-bit word from transmit buffer to 16C32 */
5825
5826 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5827 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5828 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5829 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5830
5831 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5832
5833 info->xmit_cnt -= 2;
5834 info->icount.tx += 2;
5835 } else {
5836 /* only 1 byte left to transmit or 1 FIFO slot left */
5837
5838 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5839 info->io_base + CCAR );
5840
5841 if (info->x_char) {
5842 /* transmit pending high priority char */
5843 outw( info->x_char,info->io_base + CCAR );
5844 info->x_char = 0;
5845 } else {
5846 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5847 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5848 info->xmit_cnt--;
5849 }
5850 info->icount.tx++;
5851 }
5852 }
5853
5854} /* end of usc_load_txfifo() */
5855
5856/* usc_reset()
5857 *
5858 * Reset the adapter to a known state and prepare it for further use.
5859 *
5860 * Arguments: info pointer to device instance data
5861 * Return Value: None
5862 */
5863static void usc_reset( struct mgsl_struct *info )
5864{
5865 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5866 int i;
5867 u32 readval;
5868
5869 /* Set BIT30 of Misc Control Register */
5870 /* (Local Control Register 0x50) to force reset of USC. */
5871
5872 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5873 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5874
5875 info->misc_ctrl_value |= BIT30;
5876 *MiscCtrl = info->misc_ctrl_value;
5877
5878 /*
5879 * Force at least 170ns delay before clearing
5880 * reset bit. Each read from LCR takes at least
5881 * 30ns so 10 times for 300ns to be safe.
5882 */
5883 for(i=0;i<10;i++)
5884 readval = *MiscCtrl;
5885
5886 info->misc_ctrl_value &= ~BIT30;
5887 *MiscCtrl = info->misc_ctrl_value;
5888
5889 *LCR0BRDR = BUS_DESCRIPTOR(
5890 1, // Write Strobe Hold (0-3)
5891 2, // Write Strobe Delay (0-3)
5892 2, // Read Strobe Delay (0-3)
5893 0, // NWDD (Write data-data) (0-3)
5894 4, // NWAD (Write Addr-data) (0-31)
5895 0, // NXDA (Read/Write Data-Addr) (0-3)
5896 0, // NRDD (Read Data-Data) (0-3)
5897 5 // NRAD (Read Addr-Data) (0-31)
5898 );
5899 } else {
5900 /* do HW reset */
5901 outb( 0,info->io_base + 8 );
5902 }
5903
5904 info->mbre_bit = 0;
5905 info->loopback_bits = 0;
5906 info->usc_idle_mode = 0;
5907
5908 /*
5909 * Program the Bus Configuration Register (BCR)
5910 *
5911 * <15> 0 Don't use separate address
5912 * <14..6> 0 reserved
5913 * <5..4> 00 IAckmode = Default, don't care
5914 * <3> 1 Bus Request Totem Pole output
5915 * <2> 1 Use 16 Bit data bus
5916 * <1> 0 IRQ Totem Pole output
5917 * <0> 0 Don't Shift Right Addr
5918 *
5919 * 0000 0000 0000 1100 = 0x000c
5920 *
5921 * By writing to io_base + SDPIN the Wait/Ack pin is
5922 * programmed to work as a Wait pin.
5923 */
5924
5925 outw( 0x000c,info->io_base + SDPIN );
5926
5927
5928 outw( 0,info->io_base );
5929 outw( 0,info->io_base + CCAR );
5930
5931 /* select little endian byte ordering */
5932 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5933
5934
5935 /* Port Control Register (PCR)
5936 *
5937 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5938 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5939 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5940 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5941 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5942 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5943 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5944 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5945 *
5946 * 1111 0000 1111 0101 = 0xf0f5
5947 */
5948
5949 usc_OutReg( info, PCR, 0xf0f5 );
5950
5951
5952 /*
5953 * Input/Output Control Register
5954 *
5955 * <15..14> 00 CTS is active low input
5956 * <13..12> 00 DCD is active low input
5957 * <11..10> 00 TxREQ pin is input (DSR)
5958 * <9..8> 00 RxREQ pin is input (RI)
5959 * <7..6> 00 TxD is output (Transmit Data)
5960 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5961 * <2..0> 100 RxC is Output (drive with BRG0)
5962 *
5963 * 0000 0000 0000 0100 = 0x0004
5964 */
5965
5966 usc_OutReg( info, IOCR, 0x0004 );
5967
5968} /* end of usc_reset() */
5969
5970/* usc_set_async_mode()
5971 *
5972 * Program adapter for asynchronous communications.
5973 *
5974 * Arguments: info pointer to device instance data
5975 * Return Value: None
5976 */
5977static void usc_set_async_mode( struct mgsl_struct *info )
5978{
5979 u16 RegValue;
5980
5981 /* disable interrupts while programming USC */
5982 usc_DisableMasterIrqBit( info );
5983
5984 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5985 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5986
5987 usc_loopback_frame( info );
5988
5989 /* Channel mode Register (CMR)
5990 *
5991 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5992 * <13..12> 00 00 = 16X Clock
5993 * <11..8> 0000 Transmitter mode = Asynchronous
5994 * <7..6> 00 reserved?
5995 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5996 * <3..0> 0000 Receiver mode = Asynchronous
5997 *
5998 * 0000 0000 0000 0000 = 0x0
5999 */
6000
6001 RegValue = 0;
6002 if ( info->params.stop_bits != 1 )
6003 RegValue |= BIT14;
6004 usc_OutReg( info, CMR, RegValue );
6005
6006
6007 /* Receiver mode Register (RMR)
6008 *
6009 * <15..13> 000 encoding = None
6010 * <12..08> 00000 reserved (Sync Only)
6011 * <7..6> 00 Even parity
6012 * <5> 0 parity disabled
6013 * <4..2> 000 Receive Char Length = 8 bits
6014 * <1..0> 00 Disable Receiver
6015 *
6016 * 0000 0000 0000 0000 = 0x0
6017 */
6018
6019 RegValue = 0;
6020
6021 if ( info->params.data_bits != 8 )
6022 RegValue |= BIT4+BIT3+BIT2;
6023
6024 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6025 RegValue |= BIT5;
6026 if ( info->params.parity != ASYNC_PARITY_ODD )
6027 RegValue |= BIT6;
6028 }
6029
6030 usc_OutReg( info, RMR, RegValue );
6031
6032
6033 /* Set IRQ trigger level */
6034
6035 usc_RCmd( info, RCmd_SelectRicrIntLevel );
6036
6037
6038 /* Receive Interrupt Control Register (RICR)
6039 *
6040 * <15..8> ? RxFIFO IRQ Request Level
6041 *
6042 * Note: For async mode the receive FIFO level must be set
6043 * to 0 to aviod the situation where the FIFO contains fewer bytes
6044 * than the trigger level and no more data is expected.
6045 *
6046 * <7> 0 Exited Hunt IA (Interrupt Arm)
6047 * <6> 0 Idle Received IA
6048 * <5> 0 Break/Abort IA
6049 * <4> 0 Rx Bound IA
6050 * <3> 0 Queued status reflects oldest byte in FIFO
6051 * <2> 0 Abort/PE IA
6052 * <1> 0 Rx Overrun IA
6053 * <0> 0 Select TC0 value for readback
6054 *
6055 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6056 */
6057
6058 usc_OutReg( info, RICR, 0x0000 );
6059
6060 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6061 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6062
6063
6064 /* Transmit mode Register (TMR)
6065 *
6066 * <15..13> 000 encoding = None
6067 * <12..08> 00000 reserved (Sync Only)
6068 * <7..6> 00 Transmit parity Even
6069 * <5> 0 Transmit parity Disabled
6070 * <4..2> 000 Tx Char Length = 8 bits
6071 * <1..0> 00 Disable Transmitter
6072 *
6073 * 0000 0000 0000 0000 = 0x0
6074 */
6075
6076 RegValue = 0;
6077
6078 if ( info->params.data_bits != 8 )
6079 RegValue |= BIT4+BIT3+BIT2;
6080
6081 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6082 RegValue |= BIT5;
6083 if ( info->params.parity != ASYNC_PARITY_ODD )
6084 RegValue |= BIT6;
6085 }
6086
6087 usc_OutReg( info, TMR, RegValue );
6088
6089 usc_set_txidle( info );
6090
6091
6092 /* Set IRQ trigger level */
6093
6094 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6095
6096
6097 /* Transmit Interrupt Control Register (TICR)
6098 *
6099 * <15..8> ? Transmit FIFO IRQ Level
6100 * <7> 0 Present IA (Interrupt Arm)
6101 * <6> 1 Idle Sent IA
6102 * <5> 0 Abort Sent IA
6103 * <4> 0 EOF/EOM Sent IA
6104 * <3> 0 CRC Sent IA
6105 * <2> 0 1 = Wait for SW Trigger to Start Frame
6106 * <1> 0 Tx Underrun IA
6107 * <0> 0 TC0 constant on read back
6108 *
6109 * 0000 0000 0100 0000 = 0x0040
6110 */
6111
6112 usc_OutReg( info, TICR, 0x1f40 );
6113
6114 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6115 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6116
6117 usc_enable_async_clock( info, info->params.data_rate );
6118
6119
6120 /* Channel Control/status Register (CCSR)
6121 *
6122 * <15> X RCC FIFO Overflow status (RO)
6123 * <14> X RCC FIFO Not Empty status (RO)
6124 * <13> 0 1 = Clear RCC FIFO (WO)
6125 * <12> X DPLL in Sync status (RO)
6126 * <11> X DPLL 2 Missed Clocks status (RO)
6127 * <10> X DPLL 1 Missed Clock status (RO)
6128 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6129 * <7> X SDLC Loop On status (RO)
6130 * <6> X SDLC Loop Send status (RO)
6131 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6132 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6133 * <1..0> 00 reserved
6134 *
6135 * 0000 0000 0010 0000 = 0x0020
6136 */
6137
6138 usc_OutReg( info, CCSR, 0x0020 );
6139
6140 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6141 RECEIVE_DATA + RECEIVE_STATUS );
6142
6143 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6144 RECEIVE_DATA + RECEIVE_STATUS );
6145
6146 usc_EnableMasterIrqBit( info );
6147
6148 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6149 /* Enable INTEN (Port 6, Bit12) */
6150 /* This connects the IRQ request signal to the ISA bus */
6151 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6152 }
6153
Paul Fulghum7c1fff52005-09-09 13:02:14 -07006154 if (info->params.loopback) {
6155 info->loopback_bits = 0x300;
6156 outw(0x0300, info->io_base + CCAR);
6157 }
6158
Linus Torvalds1da177e2005-04-16 15:20:36 -07006159} /* end of usc_set_async_mode() */
6160
6161/* usc_loopback_frame()
6162 *
6163 * Loop back a small (2 byte) dummy SDLC frame.
6164 * Interrupts and DMA are NOT used. The purpose of this is to
6165 * clear any 'stale' status info left over from running in async mode.
6166 *
6167 * The 16C32 shows the strange behaviour of marking the 1st
6168 * received SDLC frame with a CRC error even when there is no
6169 * CRC error. To get around this a small dummy from of 2 bytes
6170 * is looped back when switching from async to sync mode.
6171 *
6172 * Arguments: info pointer to device instance data
6173 * Return Value: None
6174 */
6175static void usc_loopback_frame( struct mgsl_struct *info )
6176{
6177 int i;
6178 unsigned long oldmode = info->params.mode;
6179
6180 info->params.mode = MGSL_MODE_HDLC;
6181
6182 usc_DisableMasterIrqBit( info );
6183
6184 usc_set_sdlc_mode( info );
6185 usc_enable_loopback( info, 1 );
6186
6187 /* Write 16-bit Time Constant for BRG0 */
6188 usc_OutReg( info, TC0R, 0 );
6189
6190 /* Channel Control Register (CCR)
6191 *
6192 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6193 * <13> 0 Trigger Tx on SW Command Disabled
6194 * <12> 0 Flag Preamble Disabled
6195 * <11..10> 00 Preamble Length = 8-Bits
6196 * <9..8> 01 Preamble Pattern = flags
6197 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6198 * <5> 0 Trigger Rx on SW Command Disabled
6199 * <4..0> 0 reserved
6200 *
6201 * 0000 0001 0000 0000 = 0x0100
6202 */
6203
6204 usc_OutReg( info, CCR, 0x0100 );
6205
6206 /* SETUP RECEIVER */
6207 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6208 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6209
6210 /* SETUP TRANSMITTER */
6211 /* Program the Transmit Character Length Register (TCLR) */
6212 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6213 usc_OutReg( info, TCLR, 2 );
6214 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6215
6216 /* unlatch Tx status bits, and start transmit channel. */
6217 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6218 outw(0,info->io_base + DATAREG);
6219
6220 /* ENABLE TRANSMITTER */
6221 usc_TCmd( info, TCmd_SendFrame );
6222 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6223
6224 /* WAIT FOR RECEIVE COMPLETE */
6225 for (i=0 ; i<1000 ; i++)
6226 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6227 break;
6228
6229 /* clear Internal Data loopback mode */
6230 usc_enable_loopback(info, 0);
6231
6232 usc_EnableMasterIrqBit(info);
6233
6234 info->params.mode = oldmode;
6235
6236} /* end of usc_loopback_frame() */
6237
6238/* usc_set_sync_mode() Programs the USC for SDLC communications.
6239 *
6240 * Arguments: info pointer to adapter info structure
6241 * Return Value: None
6242 */
6243static void usc_set_sync_mode( struct mgsl_struct *info )
6244{
6245 usc_loopback_frame( info );
6246 usc_set_sdlc_mode( info );
6247
6248 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6249 /* Enable INTEN (Port 6, Bit12) */
6250 /* This connects the IRQ request signal to the ISA bus */
6251 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6252 }
6253
6254 usc_enable_aux_clock(info, info->params.clock_speed);
6255
6256 if (info->params.loopback)
6257 usc_enable_loopback(info,1);
6258
6259} /* end of mgsl_set_sync_mode() */
6260
6261/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6262 *
6263 * Arguments: info pointer to device instance data
6264 * Return Value: None
6265 */
6266static void usc_set_txidle( struct mgsl_struct *info )
6267{
6268 u16 usc_idle_mode = IDLEMODE_FLAGS;
6269
6270 /* Map API idle mode to USC register bits */
6271
6272 switch( info->idle_mode ){
6273 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6274 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6275 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6276 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6277 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6278 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6279 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6280 }
6281
6282 info->usc_idle_mode = usc_idle_mode;
6283 //usc_OutReg(info, TCSR, usc_idle_mode);
6284 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6285 info->tcsr_value += usc_idle_mode;
6286 usc_OutReg(info, TCSR, info->tcsr_value);
6287
6288 /*
6289 * if SyncLink WAN adapter is running in external sync mode, the
6290 * transmitter has been set to Monosync in order to try to mimic
6291 * a true raw outbound bit stream. Monosync still sends an open/close
6292 * sync char at the start/end of a frame. Try to match those sync
6293 * patterns to the idle mode set here
6294 */
6295 if ( info->params.mode == MGSL_MODE_RAW ) {
6296 unsigned char syncpat = 0;
6297 switch( info->idle_mode ) {
6298 case HDLC_TXIDLE_FLAGS:
6299 syncpat = 0x7e;
6300 break;
6301 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6302 syncpat = 0x55;
6303 break;
6304 case HDLC_TXIDLE_ZEROS:
6305 case HDLC_TXIDLE_SPACE:
6306 syncpat = 0x00;
6307 break;
6308 case HDLC_TXIDLE_ONES:
6309 case HDLC_TXIDLE_MARK:
6310 syncpat = 0xff;
6311 break;
6312 case HDLC_TXIDLE_ALT_MARK_SPACE:
6313 syncpat = 0xaa;
6314 break;
6315 }
6316
6317 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6318 }
6319
6320} /* end of usc_set_txidle() */
6321
6322/* usc_get_serial_signals()
6323 *
6324 * Query the adapter for the state of the V24 status (input) signals.
6325 *
6326 * Arguments: info pointer to device instance data
6327 * Return Value: None
6328 */
6329static void usc_get_serial_signals( struct mgsl_struct *info )
6330{
6331 u16 status;
6332
6333 /* clear all serial signals except DTR and RTS */
6334 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6335
6336 /* Read the Misc Interrupt status Register (MISR) to get */
6337 /* the V24 status signals. */
6338
6339 status = usc_InReg( info, MISR );
6340
6341 /* set serial signal bits to reflect MISR */
6342
6343 if ( status & MISCSTATUS_CTS )
6344 info->serial_signals |= SerialSignal_CTS;
6345
6346 if ( status & MISCSTATUS_DCD )
6347 info->serial_signals |= SerialSignal_DCD;
6348
6349 if ( status & MISCSTATUS_RI )
6350 info->serial_signals |= SerialSignal_RI;
6351
6352 if ( status & MISCSTATUS_DSR )
6353 info->serial_signals |= SerialSignal_DSR;
6354
6355} /* end of usc_get_serial_signals() */
6356
6357/* usc_set_serial_signals()
6358 *
6359 * Set the state of DTR and RTS based on contents of
6360 * serial_signals member of device extension.
6361 *
6362 * Arguments: info pointer to device instance data
6363 * Return Value: None
6364 */
6365static void usc_set_serial_signals( struct mgsl_struct *info )
6366{
6367 u16 Control;
6368 unsigned char V24Out = info->serial_signals;
6369
6370 /* get the current value of the Port Control Register (PCR) */
6371
6372 Control = usc_InReg( info, PCR );
6373
6374 if ( V24Out & SerialSignal_RTS )
6375 Control &= ~(BIT6);
6376 else
6377 Control |= BIT6;
6378
6379 if ( V24Out & SerialSignal_DTR )
6380 Control &= ~(BIT4);
6381 else
6382 Control |= BIT4;
6383
6384 usc_OutReg( info, PCR, Control );
6385
6386} /* end of usc_set_serial_signals() */
6387
6388/* usc_enable_async_clock()
6389 *
6390 * Enable the async clock at the specified frequency.
6391 *
6392 * Arguments: info pointer to device instance data
6393 * data_rate data rate of clock in bps
6394 * 0 disables the AUX clock.
6395 * Return Value: None
6396 */
6397static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6398{
6399 if ( data_rate ) {
6400 /*
6401 * Clock mode Control Register (CMCR)
6402 *
6403 * <15..14> 00 counter 1 Disabled
6404 * <13..12> 00 counter 0 Disabled
6405 * <11..10> 11 BRG1 Input is TxC Pin
6406 * <9..8> 11 BRG0 Input is TxC Pin
6407 * <7..6> 01 DPLL Input is BRG1 Output
6408 * <5..3> 100 TxCLK comes from BRG0
6409 * <2..0> 100 RxCLK comes from BRG0
6410 *
6411 * 0000 1111 0110 0100 = 0x0f64
6412 */
6413
6414 usc_OutReg( info, CMCR, 0x0f64 );
6415
6416
6417 /*
6418 * Write 16-bit Time Constant for BRG0
6419 * Time Constant = (ClkSpeed / data_rate) - 1
6420 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6421 */
6422
6423 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6424 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6425 else
6426 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6427
6428
6429 /*
6430 * Hardware Configuration Register (HCR)
6431 * Clear Bit 1, BRG0 mode = Continuous
6432 * Set Bit 0 to enable BRG0.
6433 */
6434
6435 usc_OutReg( info, HCR,
6436 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6437
6438
6439 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6440
6441 usc_OutReg( info, IOCR,
6442 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6443 } else {
6444 /* data rate == 0 so turn off BRG0 */
6445 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6446 }
6447
6448} /* end of usc_enable_async_clock() */
6449
6450/*
6451 * Buffer Structures:
6452 *
6453 * Normal memory access uses virtual addresses that can make discontiguous
6454 * physical memory pages appear to be contiguous in the virtual address
6455 * space (the processors memory mapping handles the conversions).
6456 *
6457 * DMA transfers require physically contiguous memory. This is because
6458 * the DMA system controller and DMA bus masters deal with memory using
6459 * only physical addresses.
6460 *
6461 * This causes a problem under Windows NT when large DMA buffers are
6462 * needed. Fragmentation of the nonpaged pool prevents allocations of
6463 * physically contiguous buffers larger than the PAGE_SIZE.
6464 *
6465 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6466 * allows DMA transfers to physically discontiguous buffers. Information
6467 * about each data transfer buffer is contained in a memory structure
6468 * called a 'buffer entry'. A list of buffer entries is maintained
6469 * to track and control the use of the data transfer buffers.
6470 *
6471 * To support this strategy we will allocate sufficient PAGE_SIZE
6472 * contiguous memory buffers to allow for the total required buffer
6473 * space.
6474 *
6475 * The 16C32 accesses the list of buffer entries using Bus Master
6476 * DMA. Control information is read from the buffer entries by the
6477 * 16C32 to control data transfers. status information is written to
6478 * the buffer entries by the 16C32 to indicate the status of completed
6479 * transfers.
6480 *
6481 * The CPU writes control information to the buffer entries to control
6482 * the 16C32 and reads status information from the buffer entries to
6483 * determine information about received and transmitted frames.
6484 *
6485 * Because the CPU and 16C32 (adapter) both need simultaneous access
6486 * to the buffer entries, the buffer entry memory is allocated with
6487 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6488 * entry list to PAGE_SIZE.
6489 *
6490 * The actual data buffers on the other hand will only be accessed
6491 * by the CPU or the adapter but not by both simultaneously. This allows
6492 * Scatter/Gather packet based DMA procedures for using physically
6493 * discontiguous pages.
6494 */
6495
6496/*
6497 * mgsl_reset_tx_dma_buffers()
6498 *
6499 * Set the count for all transmit buffers to 0 to indicate the
6500 * buffer is available for use and set the current buffer to the
6501 * first buffer. This effectively makes all buffers free and
6502 * discards any data in buffers.
6503 *
6504 * Arguments: info pointer to device instance data
6505 * Return Value: None
6506 */
6507static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6508{
6509 unsigned int i;
6510
6511 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6512 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6513 }
6514
6515 info->current_tx_buffer = 0;
6516 info->start_tx_dma_buffer = 0;
6517 info->tx_dma_buffers_used = 0;
6518
6519 info->get_tx_holding_index = 0;
6520 info->put_tx_holding_index = 0;
6521 info->tx_holding_count = 0;
6522
6523} /* end of mgsl_reset_tx_dma_buffers() */
6524
6525/*
6526 * num_free_tx_dma_buffers()
6527 *
6528 * returns the number of free tx dma buffers available
6529 *
6530 * Arguments: info pointer to device instance data
6531 * Return Value: number of free tx dma buffers
6532 */
6533static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6534{
6535 return info->tx_buffer_count - info->tx_dma_buffers_used;
6536}
6537
6538/*
6539 * mgsl_reset_rx_dma_buffers()
6540 *
6541 * Set the count for all receive buffers to DMABUFFERSIZE
6542 * and set the current buffer to the first buffer. This effectively
6543 * makes all buffers free and discards any data in buffers.
6544 *
6545 * Arguments: info pointer to device instance data
6546 * Return Value: None
6547 */
6548static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6549{
6550 unsigned int i;
6551
6552 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6553 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6554// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6555// info->rx_buffer_list[i].status = 0;
6556 }
6557
6558 info->current_rx_buffer = 0;
6559
6560} /* end of mgsl_reset_rx_dma_buffers() */
6561
6562/*
6563 * mgsl_free_rx_frame_buffers()
6564 *
6565 * Free the receive buffers used by a received SDLC
6566 * frame such that the buffers can be reused.
6567 *
6568 * Arguments:
6569 *
6570 * info pointer to device instance data
6571 * StartIndex index of 1st receive buffer of frame
6572 * EndIndex index of last receive buffer of frame
6573 *
6574 * Return Value: None
6575 */
6576static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6577{
6578 int Done = 0;
6579 DMABUFFERENTRY *pBufEntry;
6580 unsigned int Index;
6581
6582 /* Starting with 1st buffer entry of the frame clear the status */
6583 /* field and set the count field to DMA Buffer Size. */
6584
6585 Index = StartIndex;
6586
6587 while( !Done ) {
6588 pBufEntry = &(info->rx_buffer_list[Index]);
6589
6590 if ( Index == EndIndex ) {
6591 /* This is the last buffer of the frame! */
6592 Done = 1;
6593 }
6594
6595 /* reset current buffer for reuse */
6596// pBufEntry->status = 0;
6597// pBufEntry->count = DMABUFFERSIZE;
6598 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6599
6600 /* advance to next buffer entry in linked list */
6601 Index++;
6602 if ( Index == info->rx_buffer_count )
6603 Index = 0;
6604 }
6605
6606 /* set current buffer to next buffer after last buffer of frame */
6607 info->current_rx_buffer = Index;
6608
6609} /* end of free_rx_frame_buffers() */
6610
6611/* mgsl_get_rx_frame()
6612 *
6613 * This function attempts to return a received SDLC frame from the
6614 * receive DMA buffers. Only frames received without errors are returned.
6615 *
6616 * Arguments: info pointer to device extension
6617 * Return Value: 1 if frame returned, otherwise 0
6618 */
6619static int mgsl_get_rx_frame(struct mgsl_struct *info)
6620{
6621 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6622 unsigned short status;
6623 DMABUFFERENTRY *pBufEntry;
6624 unsigned int framesize = 0;
6625 int ReturnCode = 0;
6626 unsigned long flags;
6627 struct tty_struct *tty = info->tty;
6628 int return_frame = 0;
6629
6630 /*
6631 * current_rx_buffer points to the 1st buffer of the next available
6632 * receive frame. To find the last buffer of the frame look for
6633 * a non-zero status field in the buffer entries. (The status
6634 * field is set by the 16C32 after completing a receive frame.
6635 */
6636
6637 StartIndex = EndIndex = info->current_rx_buffer;
6638
6639 while( !info->rx_buffer_list[EndIndex].status ) {
6640 /*
6641 * If the count field of the buffer entry is non-zero then
6642 * this buffer has not been used. (The 16C32 clears the count
6643 * field when it starts using the buffer.) If an unused buffer
6644 * is encountered then there are no frames available.
6645 */
6646
6647 if ( info->rx_buffer_list[EndIndex].count )
6648 goto Cleanup;
6649
6650 /* advance to next buffer entry in linked list */
6651 EndIndex++;
6652 if ( EndIndex == info->rx_buffer_count )
6653 EndIndex = 0;
6654
6655 /* if entire list searched then no frame available */
6656 if ( EndIndex == StartIndex ) {
6657 /* If this occurs then something bad happened,
6658 * all buffers have been 'used' but none mark
6659 * the end of a frame. Reset buffers and receiver.
6660 */
6661
6662 if ( info->rx_enabled ){
6663 spin_lock_irqsave(&info->irq_spinlock,flags);
6664 usc_start_receiver(info);
6665 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6666 }
6667 goto Cleanup;
6668 }
6669 }
6670
6671
6672 /* check status of receive frame */
6673
6674 status = info->rx_buffer_list[EndIndex].status;
6675
6676 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6677 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6678 if ( status & RXSTATUS_SHORT_FRAME )
6679 info->icount.rxshort++;
6680 else if ( status & RXSTATUS_ABORT )
6681 info->icount.rxabort++;
6682 else if ( status & RXSTATUS_OVERRUN )
6683 info->icount.rxover++;
6684 else {
6685 info->icount.rxcrc++;
6686 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6687 return_frame = 1;
6688 }
6689 framesize = 0;
6690#ifdef CONFIG_HDLC
6691 {
6692 struct net_device_stats *stats = hdlc_stats(info->netdev);
6693 stats->rx_errors++;
6694 stats->rx_frame_errors++;
6695 }
6696#endif
6697 } else
6698 return_frame = 1;
6699
6700 if ( return_frame ) {
6701 /* receive frame has no errors, get frame size.
6702 * The frame size is the starting value of the RCC (which was
6703 * set to 0xffff) minus the ending value of the RCC (decremented
6704 * once for each receive character) minus 2 for the 16-bit CRC.
6705 */
6706
6707 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6708
6709 /* adjust frame size for CRC if any */
6710 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6711 framesize -= 2;
6712 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6713 framesize -= 4;
6714 }
6715
6716 if ( debug_level >= DEBUG_LEVEL_BH )
6717 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6718 __FILE__,__LINE__,info->device_name,status,framesize);
6719
6720 if ( debug_level >= DEBUG_LEVEL_DATA )
6721 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6722 min_t(int, framesize, DMABUFFERSIZE),0);
6723
6724 if (framesize) {
6725 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6726 ((framesize+1) > info->max_frame_size) ) ||
6727 (framesize > info->max_frame_size) )
6728 info->icount.rxlong++;
6729 else {
6730 /* copy dma buffer(s) to contiguous intermediate buffer */
6731 int copy_count = framesize;
6732 int index = StartIndex;
6733 unsigned char *ptmp = info->intermediate_rxbuffer;
6734
6735 if ( !(status & RXSTATUS_CRC_ERROR))
6736 info->icount.rxok++;
6737
6738 while(copy_count) {
6739 int partial_count;
6740 if ( copy_count > DMABUFFERSIZE )
6741 partial_count = DMABUFFERSIZE;
6742 else
6743 partial_count = copy_count;
6744
6745 pBufEntry = &(info->rx_buffer_list[index]);
6746 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6747 ptmp += partial_count;
6748 copy_count -= partial_count;
6749
6750 if ( ++index == info->rx_buffer_count )
6751 index = 0;
6752 }
6753
6754 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6755 ++framesize;
6756 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6757 RX_CRC_ERROR :
6758 RX_OK);
6759
6760 if ( debug_level >= DEBUG_LEVEL_DATA )
6761 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6762 __FILE__,__LINE__,info->device_name,
6763 *ptmp);
6764 }
6765
6766#ifdef CONFIG_HDLC
6767 if (info->netcount)
6768 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6769 else
6770#endif
6771 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6772 }
6773 }
6774 /* Free the buffers used by this frame. */
6775 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6776
6777 ReturnCode = 1;
6778
6779Cleanup:
6780
6781 if ( info->rx_enabled && info->rx_overflow ) {
6782 /* The receiver needs to restarted because of
6783 * a receive overflow (buffer or FIFO). If the
6784 * receive buffers are now empty, then restart receiver.
6785 */
6786
6787 if ( !info->rx_buffer_list[EndIndex].status &&
6788 info->rx_buffer_list[EndIndex].count ) {
6789 spin_lock_irqsave(&info->irq_spinlock,flags);
6790 usc_start_receiver(info);
6791 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6792 }
6793 }
6794
6795 return ReturnCode;
6796
6797} /* end of mgsl_get_rx_frame() */
6798
6799/* mgsl_get_raw_rx_frame()
6800 *
6801 * This function attempts to return a received frame from the
6802 * receive DMA buffers when running in external loop mode. In this mode,
6803 * we will return at most one DMABUFFERSIZE frame to the application.
6804 * The USC receiver is triggering off of DCD going active to start a new
6805 * frame, and DCD going inactive to terminate the frame (similar to
6806 * processing a closing flag character).
6807 *
6808 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6809 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6810 * status field and the RCC field will indicate the length of the
6811 * entire received frame. We take this RCC field and get the modulus
6812 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6813 * last Rx DMA buffer and return that last portion of the frame.
6814 *
6815 * Arguments: info pointer to device extension
6816 * Return Value: 1 if frame returned, otherwise 0
6817 */
6818static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6819{
6820 unsigned int CurrentIndex, NextIndex;
6821 unsigned short status;
6822 DMABUFFERENTRY *pBufEntry;
6823 unsigned int framesize = 0;
6824 int ReturnCode = 0;
6825 unsigned long flags;
6826 struct tty_struct *tty = info->tty;
6827
6828 /*
6829 * current_rx_buffer points to the 1st buffer of the next available
6830 * receive frame. The status field is set by the 16C32 after
6831 * completing a receive frame. If the status field of this buffer
6832 * is zero, either the USC is still filling this buffer or this
6833 * is one of a series of buffers making up a received frame.
6834 *
6835 * If the count field of this buffer is zero, the USC is either
6836 * using this buffer or has used this buffer. Look at the count
6837 * field of the next buffer. If that next buffer's count is
6838 * non-zero, the USC is still actively using the current buffer.
6839 * Otherwise, if the next buffer's count field is zero, the
6840 * current buffer is complete and the USC is using the next
6841 * buffer.
6842 */
6843 CurrentIndex = NextIndex = info->current_rx_buffer;
6844 ++NextIndex;
6845 if ( NextIndex == info->rx_buffer_count )
6846 NextIndex = 0;
6847
6848 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6849 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6850 info->rx_buffer_list[NextIndex].count == 0)) {
6851 /*
6852 * Either the status field of this dma buffer is non-zero
6853 * (indicating the last buffer of a receive frame) or the next
6854 * buffer is marked as in use -- implying this buffer is complete
6855 * and an intermediate buffer for this received frame.
6856 */
6857
6858 status = info->rx_buffer_list[CurrentIndex].status;
6859
6860 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6861 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6862 if ( status & RXSTATUS_SHORT_FRAME )
6863 info->icount.rxshort++;
6864 else if ( status & RXSTATUS_ABORT )
6865 info->icount.rxabort++;
6866 else if ( status & RXSTATUS_OVERRUN )
6867 info->icount.rxover++;
6868 else
6869 info->icount.rxcrc++;
6870 framesize = 0;
6871 } else {
6872 /*
6873 * A receive frame is available, get frame size and status.
6874 *
6875 * The frame size is the starting value of the RCC (which was
6876 * set to 0xffff) minus the ending value of the RCC (decremented
6877 * once for each receive character) minus 2 or 4 for the 16-bit
6878 * or 32-bit CRC.
6879 *
6880 * If the status field is zero, this is an intermediate buffer.
6881 * It's size is 4K.
6882 *
6883 * If the DMA Buffer Entry's Status field is non-zero, the
6884 * receive operation completed normally (ie: DCD dropped). The
6885 * RCC field is valid and holds the received frame size.
6886 * It is possible that the RCC field will be zero on a DMA buffer
6887 * entry with a non-zero status. This can occur if the total
6888 * frame size (number of bytes between the time DCD goes active
6889 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6890 * case the 16C32 has underrun on the RCC count and appears to
6891 * stop updating this counter to let us know the actual received
6892 * frame size. If this happens (non-zero status and zero RCC),
6893 * simply return the entire RxDMA Buffer
6894 */
6895 if ( status ) {
6896 /*
6897 * In the event that the final RxDMA Buffer is
6898 * terminated with a non-zero status and the RCC
6899 * field is zero, we interpret this as the RCC
6900 * having underflowed (received frame > 65535 bytes).
6901 *
6902 * Signal the event to the user by passing back
6903 * a status of RxStatus_CrcError returning the full
6904 * buffer and let the app figure out what data is
6905 * actually valid
6906 */
6907 if ( info->rx_buffer_list[CurrentIndex].rcc )
6908 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6909 else
6910 framesize = DMABUFFERSIZE;
6911 }
6912 else
6913 framesize = DMABUFFERSIZE;
6914 }
6915
6916 if ( framesize > DMABUFFERSIZE ) {
6917 /*
6918 * if running in raw sync mode, ISR handler for
6919 * End Of Buffer events terminates all buffers at 4K.
6920 * If this frame size is said to be >4K, get the
6921 * actual number of bytes of the frame in this buffer.
6922 */
6923 framesize = framesize % DMABUFFERSIZE;
6924 }
6925
6926
6927 if ( debug_level >= DEBUG_LEVEL_BH )
6928 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6929 __FILE__,__LINE__,info->device_name,status,framesize);
6930
6931 if ( debug_level >= DEBUG_LEVEL_DATA )
6932 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6933 min_t(int, framesize, DMABUFFERSIZE),0);
6934
6935 if (framesize) {
6936 /* copy dma buffer(s) to contiguous intermediate buffer */
6937 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6938
6939 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6940 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6941 info->icount.rxok++;
6942
6943 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6944 }
6945
6946 /* Free the buffers used by this frame. */
6947 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6948
6949 ReturnCode = 1;
6950 }
6951
6952
6953 if ( info->rx_enabled && info->rx_overflow ) {
6954 /* The receiver needs to restarted because of
6955 * a receive overflow (buffer or FIFO). If the
6956 * receive buffers are now empty, then restart receiver.
6957 */
6958
6959 if ( !info->rx_buffer_list[CurrentIndex].status &&
6960 info->rx_buffer_list[CurrentIndex].count ) {
6961 spin_lock_irqsave(&info->irq_spinlock,flags);
6962 usc_start_receiver(info);
6963 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6964 }
6965 }
6966
6967 return ReturnCode;
6968
6969} /* end of mgsl_get_raw_rx_frame() */
6970
6971/* mgsl_load_tx_dma_buffer()
6972 *
6973 * Load the transmit DMA buffer with the specified data.
6974 *
6975 * Arguments:
6976 *
6977 * info pointer to device extension
6978 * Buffer pointer to buffer containing frame to load
6979 * BufferSize size in bytes of frame in Buffer
6980 *
6981 * Return Value: None
6982 */
6983static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6984 const char *Buffer, unsigned int BufferSize)
6985{
6986 unsigned short Copycount;
6987 unsigned int i = 0;
6988 DMABUFFERENTRY *pBufEntry;
6989
6990 if ( debug_level >= DEBUG_LEVEL_DATA )
6991 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6992
6993 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6994 /* set CMR:13 to start transmit when
6995 * next GoAhead (abort) is received
6996 */
6997 info->cmr_value |= BIT13;
6998 }
6999
7000 /* begin loading the frame in the next available tx dma
7001 * buffer, remember it's starting location for setting
7002 * up tx dma operation
7003 */
7004 i = info->current_tx_buffer;
7005 info->start_tx_dma_buffer = i;
7006
7007 /* Setup the status and RCC (Frame Size) fields of the 1st */
7008 /* buffer entry in the transmit DMA buffer list. */
7009
7010 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
7011 info->tx_buffer_list[i].rcc = BufferSize;
7012 info->tx_buffer_list[i].count = BufferSize;
7013
7014 /* Copy frame data from 1st source buffer to the DMA buffers. */
7015 /* The frame data may span multiple DMA buffers. */
7016
7017 while( BufferSize ){
7018 /* Get a pointer to next DMA buffer entry. */
7019 pBufEntry = &info->tx_buffer_list[i++];
7020
7021 if ( i == info->tx_buffer_count )
7022 i=0;
7023
7024 /* Calculate the number of bytes that can be copied from */
7025 /* the source buffer to this DMA buffer. */
7026 if ( BufferSize > DMABUFFERSIZE )
7027 Copycount = DMABUFFERSIZE;
7028 else
7029 Copycount = BufferSize;
7030
7031 /* Actually copy data from source buffer to DMA buffer. */
7032 /* Also set the data count for this individual DMA buffer. */
7033 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
7034 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
7035 else
7036 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
7037
7038 pBufEntry->count = Copycount;
7039
7040 /* Advance source pointer and reduce remaining data count. */
7041 Buffer += Copycount;
7042 BufferSize -= Copycount;
7043
7044 ++info->tx_dma_buffers_used;
7045 }
7046
7047 /* remember next available tx dma buffer */
7048 info->current_tx_buffer = i;
7049
7050} /* end of mgsl_load_tx_dma_buffer() */
7051
7052/*
7053 * mgsl_register_test()
7054 *
7055 * Performs a register test of the 16C32.
7056 *
7057 * Arguments: info pointer to device instance data
7058 * Return Value: TRUE if test passed, otherwise FALSE
7059 */
7060static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7061{
7062 static unsigned short BitPatterns[] =
7063 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
7064 static unsigned int Patterncount = sizeof(BitPatterns)/sizeof(unsigned short);
7065 unsigned int i;
7066 BOOLEAN rc = TRUE;
7067 unsigned long flags;
7068
7069 spin_lock_irqsave(&info->irq_spinlock,flags);
7070 usc_reset(info);
7071
7072 /* Verify the reset state of some registers. */
7073
7074 if ( (usc_InReg( info, SICR ) != 0) ||
7075 (usc_InReg( info, IVR ) != 0) ||
7076 (usc_InDmaReg( info, DIVR ) != 0) ){
7077 rc = FALSE;
7078 }
7079
7080 if ( rc == TRUE ){
7081 /* Write bit patterns to various registers but do it out of */
7082 /* sync, then read back and verify values. */
7083
7084 for ( i = 0 ; i < Patterncount ; i++ ) {
7085 usc_OutReg( info, TC0R, BitPatterns[i] );
7086 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7087 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7088 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7089 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7090 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7091
7092 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7093 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7094 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7095 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7096 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7097 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7098 rc = FALSE;
7099 break;
7100 }
7101 }
7102 }
7103
7104 usc_reset(info);
7105 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7106
7107 return rc;
7108
7109} /* end of mgsl_register_test() */
7110
7111/* mgsl_irq_test() Perform interrupt test of the 16C32.
7112 *
7113 * Arguments: info pointer to device instance data
7114 * Return Value: TRUE if test passed, otherwise FALSE
7115 */
7116static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7117{
7118 unsigned long EndTime;
7119 unsigned long flags;
7120
7121 spin_lock_irqsave(&info->irq_spinlock,flags);
7122 usc_reset(info);
7123
7124 /*
7125 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7126 * The ISR sets irq_occurred to 1.
7127 */
7128
7129 info->irq_occurred = FALSE;
7130
7131 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7132 /* Enable INTEN (Port 6, Bit12) */
7133 /* This connects the IRQ request signal to the ISA bus */
7134 /* on the ISA adapter. This has no effect for the PCI adapter */
7135 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7136
7137 usc_EnableMasterIrqBit(info);
7138 usc_EnableInterrupts(info, IO_PIN);
7139 usc_ClearIrqPendingBits(info, IO_PIN);
7140
7141 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7142 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7143
7144 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7145
7146 EndTime=100;
7147 while( EndTime-- && !info->irq_occurred ) {
7148 msleep_interruptible(10);
7149 }
7150
7151 spin_lock_irqsave(&info->irq_spinlock,flags);
7152 usc_reset(info);
7153 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7154
7155 if ( !info->irq_occurred )
7156 return FALSE;
7157 else
7158 return TRUE;
7159
7160} /* end of mgsl_irq_test() */
7161
7162/* mgsl_dma_test()
7163 *
7164 * Perform a DMA test of the 16C32. A small frame is
7165 * transmitted via DMA from a transmit buffer to a receive buffer
7166 * using single buffer DMA mode.
7167 *
7168 * Arguments: info pointer to device instance data
7169 * Return Value: TRUE if test passed, otherwise FALSE
7170 */
7171static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7172{
7173 unsigned short FifoLevel;
7174 unsigned long phys_addr;
7175 unsigned int FrameSize;
7176 unsigned int i;
7177 char *TmpPtr;
7178 BOOLEAN rc = TRUE;
7179 unsigned short status=0;
7180 unsigned long EndTime;
7181 unsigned long flags;
7182 MGSL_PARAMS tmp_params;
7183
7184 /* save current port options */
7185 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7186 /* load default port options */
7187 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7188
7189#define TESTFRAMESIZE 40
7190
7191 spin_lock_irqsave(&info->irq_spinlock,flags);
7192
7193 /* setup 16C32 for SDLC DMA transfer mode */
7194
7195 usc_reset(info);
7196 usc_set_sdlc_mode(info);
7197 usc_enable_loopback(info,1);
7198
7199 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7200 * field of the buffer entry after fetching buffer address. This
7201 * way we can detect a DMA failure for a DMA read (which should be
7202 * non-destructive to system memory) before we try and write to
7203 * memory (where a failure could corrupt system memory).
7204 */
7205
7206 /* Receive DMA mode Register (RDMR)
7207 *
7208 * <15..14> 11 DMA mode = Linked List Buffer mode
7209 * <13> 1 RSBinA/L = store Rx status Block in List entry
7210 * <12> 0 1 = Clear count of List Entry after fetching
7211 * <11..10> 00 Address mode = Increment
7212 * <9> 1 Terminate Buffer on RxBound
7213 * <8> 0 Bus Width = 16bits
7214 * <7..0> ? status Bits (write as 0s)
7215 *
7216 * 1110 0010 0000 0000 = 0xe200
7217 */
7218
7219 usc_OutDmaReg( info, RDMR, 0xe200 );
7220
7221 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7222
7223
7224 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7225
7226 FrameSize = TESTFRAMESIZE;
7227
7228 /* setup 1st transmit buffer entry: */
7229 /* with frame size and transmit control word */
7230
7231 info->tx_buffer_list[0].count = FrameSize;
7232 info->tx_buffer_list[0].rcc = FrameSize;
7233 info->tx_buffer_list[0].status = 0x4000;
7234
7235 /* build a transmit frame in 1st transmit DMA buffer */
7236
7237 TmpPtr = info->tx_buffer_list[0].virt_addr;
7238 for (i = 0; i < FrameSize; i++ )
7239 *TmpPtr++ = i;
7240
7241 /* setup 1st receive buffer entry: */
7242 /* clear status, set max receive buffer size */
7243
7244 info->rx_buffer_list[0].status = 0;
7245 info->rx_buffer_list[0].count = FrameSize + 4;
7246
7247 /* zero out the 1st receive buffer */
7248
7249 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7250
7251 /* Set count field of next buffer entries to prevent */
7252 /* 16C32 from using buffers after the 1st one. */
7253
7254 info->tx_buffer_list[1].count = 0;
7255 info->rx_buffer_list[1].count = 0;
7256
7257
7258 /***************************/
7259 /* Program 16C32 receiver. */
7260 /***************************/
7261
7262 spin_lock_irqsave(&info->irq_spinlock,flags);
7263
7264 /* setup DMA transfers */
7265 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7266
7267 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7268 phys_addr = info->rx_buffer_list[0].phys_entry;
7269 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7270 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7271
7272 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7273 usc_InDmaReg( info, RDMR );
7274 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7275
7276 /* Enable Receiver (RMR <1..0> = 10) */
7277 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7278
7279 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7280
7281
7282 /*************************************************************/
7283 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7284 /*************************************************************/
7285
7286 /* Wait 100ms for interrupt. */
7287 EndTime = jiffies + msecs_to_jiffies(100);
7288
7289 for(;;) {
7290 if (time_after(jiffies, EndTime)) {
7291 rc = FALSE;
7292 break;
7293 }
7294
7295 spin_lock_irqsave(&info->irq_spinlock,flags);
7296 status = usc_InDmaReg( info, RDMR );
7297 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7298
7299 if ( !(status & BIT4) && (status & BIT5) ) {
7300 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7301 /* BUSY (BIT 5) is active (channel still active). */
7302 /* This means the buffer entry read has completed. */
7303 break;
7304 }
7305 }
7306
7307
7308 /******************************/
7309 /* Program 16C32 transmitter. */
7310 /******************************/
7311
7312 spin_lock_irqsave(&info->irq_spinlock,flags);
7313
7314 /* Program the Transmit Character Length Register (TCLR) */
7315 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7316
7317 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7318 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7319
7320 /* Program the address of the 1st DMA Buffer Entry in linked list */
7321
7322 phys_addr = info->tx_buffer_list[0].phys_entry;
7323 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7324 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7325
7326 /* unlatch Tx status bits, and start transmit channel. */
7327
7328 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7329 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7330
7331 /* wait for DMA controller to fill transmit FIFO */
7332
7333 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7334
7335 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7336
7337
7338 /**********************************/
7339 /* WAIT FOR TRANSMIT FIFO TO FILL */
7340 /**********************************/
7341
7342 /* Wait 100ms */
7343 EndTime = jiffies + msecs_to_jiffies(100);
7344
7345 for(;;) {
7346 if (time_after(jiffies, EndTime)) {
7347 rc = FALSE;
7348 break;
7349 }
7350
7351 spin_lock_irqsave(&info->irq_spinlock,flags);
7352 FifoLevel = usc_InReg(info, TICR) >> 8;
7353 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7354
7355 if ( FifoLevel < 16 )
7356 break;
7357 else
7358 if ( FrameSize < 32 ) {
7359 /* This frame is smaller than the entire transmit FIFO */
7360 /* so wait for the entire frame to be loaded. */
7361 if ( FifoLevel <= (32 - FrameSize) )
7362 break;
7363 }
7364 }
7365
7366
7367 if ( rc == TRUE )
7368 {
7369 /* Enable 16C32 transmitter. */
7370
7371 spin_lock_irqsave(&info->irq_spinlock,flags);
7372
7373 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7374 usc_TCmd( info, TCmd_SendFrame );
7375 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7376
7377 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7378
7379
7380 /******************************/
7381 /* WAIT FOR TRANSMIT COMPLETE */
7382 /******************************/
7383
7384 /* Wait 100ms */
7385 EndTime = jiffies + msecs_to_jiffies(100);
7386
7387 /* While timer not expired wait for transmit complete */
7388
7389 spin_lock_irqsave(&info->irq_spinlock,flags);
7390 status = usc_InReg( info, TCSR );
7391 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7392
7393 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7394 if (time_after(jiffies, EndTime)) {
7395 rc = FALSE;
7396 break;
7397 }
7398
7399 spin_lock_irqsave(&info->irq_spinlock,flags);
7400 status = usc_InReg( info, TCSR );
7401 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7402 }
7403 }
7404
7405
7406 if ( rc == TRUE ){
7407 /* CHECK FOR TRANSMIT ERRORS */
7408 if ( status & (BIT5 + BIT1) )
7409 rc = FALSE;
7410 }
7411
7412 if ( rc == TRUE ) {
7413 /* WAIT FOR RECEIVE COMPLETE */
7414
7415 /* Wait 100ms */
7416 EndTime = jiffies + msecs_to_jiffies(100);
7417
7418 /* Wait for 16C32 to write receive status to buffer entry. */
7419 status=info->rx_buffer_list[0].status;
7420 while ( status == 0 ) {
7421 if (time_after(jiffies, EndTime)) {
7422 rc = FALSE;
7423 break;
7424 }
7425 status=info->rx_buffer_list[0].status;
7426 }
7427 }
7428
7429
7430 if ( rc == TRUE ) {
7431 /* CHECK FOR RECEIVE ERRORS */
7432 status = info->rx_buffer_list[0].status;
7433
7434 if ( status & (BIT8 + BIT3 + BIT1) ) {
7435 /* receive error has occurred */
7436 rc = FALSE;
7437 } else {
7438 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7439 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7440 rc = FALSE;
7441 }
7442 }
7443 }
7444
7445 spin_lock_irqsave(&info->irq_spinlock,flags);
7446 usc_reset( info );
7447 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7448
7449 /* restore current port options */
7450 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7451
7452 return rc;
7453
7454} /* end of mgsl_dma_test() */
7455
7456/* mgsl_adapter_test()
7457 *
7458 * Perform the register, IRQ, and DMA tests for the 16C32.
7459 *
7460 * Arguments: info pointer to device instance data
7461 * Return Value: 0 if success, otherwise -ENODEV
7462 */
7463static int mgsl_adapter_test( struct mgsl_struct *info )
7464{
7465 if ( debug_level >= DEBUG_LEVEL_INFO )
7466 printk( "%s(%d):Testing device %s\n",
7467 __FILE__,__LINE__,info->device_name );
7468
7469 if ( !mgsl_register_test( info ) ) {
7470 info->init_error = DiagStatus_AddressFailure;
7471 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7472 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7473 return -ENODEV;
7474 }
7475
7476 if ( !mgsl_irq_test( info ) ) {
7477 info->init_error = DiagStatus_IrqFailure;
7478 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7479 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7480 return -ENODEV;
7481 }
7482
7483 if ( !mgsl_dma_test( info ) ) {
7484 info->init_error = DiagStatus_DmaFailure;
7485 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7486 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7487 return -ENODEV;
7488 }
7489
7490 if ( debug_level >= DEBUG_LEVEL_INFO )
7491 printk( "%s(%d):device %s passed diagnostics\n",
7492 __FILE__,__LINE__,info->device_name );
7493
7494 return 0;
7495
7496} /* end of mgsl_adapter_test() */
7497
7498/* mgsl_memory_test()
7499 *
7500 * Test the shared memory on a PCI adapter.
7501 *
7502 * Arguments: info pointer to device instance data
7503 * Return Value: TRUE if test passed, otherwise FALSE
7504 */
7505static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7506{
7507 static unsigned long BitPatterns[] = { 0x0, 0x55555555, 0xaaaaaaaa,
7508 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7509 unsigned long Patterncount = sizeof(BitPatterns)/sizeof(unsigned long);
7510 unsigned long i;
7511 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7512 unsigned long * TestAddr;
7513
7514 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7515 return TRUE;
7516
7517 TestAddr = (unsigned long *)info->memory_base;
7518
7519 /* Test data lines with test pattern at one location. */
7520
7521 for ( i = 0 ; i < Patterncount ; i++ ) {
7522 *TestAddr = BitPatterns[i];
7523 if ( *TestAddr != BitPatterns[i] )
7524 return FALSE;
7525 }
7526
7527 /* Test address lines with incrementing pattern over */
7528 /* entire address range. */
7529
7530 for ( i = 0 ; i < TestLimit ; i++ ) {
7531 *TestAddr = i * 4;
7532 TestAddr++;
7533 }
7534
7535 TestAddr = (unsigned long *)info->memory_base;
7536
7537 for ( i = 0 ; i < TestLimit ; i++ ) {
7538 if ( *TestAddr != i * 4 )
7539 return FALSE;
7540 TestAddr++;
7541 }
7542
7543 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7544
7545 return TRUE;
7546
7547} /* End Of mgsl_memory_test() */
7548
7549
7550/* mgsl_load_pci_memory()
7551 *
7552 * Load a large block of data into the PCI shared memory.
7553 * Use this instead of memcpy() or memmove() to move data
7554 * into the PCI shared memory.
7555 *
7556 * Notes:
7557 *
7558 * This function prevents the PCI9050 interface chip from hogging
7559 * the adapter local bus, which can starve the 16C32 by preventing
7560 * 16C32 bus master cycles.
7561 *
7562 * The PCI9050 documentation says that the 9050 will always release
7563 * control of the local bus after completing the current read
7564 * or write operation.
7565 *
7566 * It appears that as long as the PCI9050 write FIFO is full, the
7567 * PCI9050 treats all of the writes as a single burst transaction
7568 * and will not release the bus. This causes DMA latency problems
7569 * at high speeds when copying large data blocks to the shared
7570 * memory.
7571 *
7572 * This function in effect, breaks the a large shared memory write
7573 * into multiple transations by interleaving a shared memory read
7574 * which will flush the write FIFO and 'complete' the write
7575 * transation. This allows any pending DMA request to gain control
7576 * of the local bus in a timely fasion.
7577 *
7578 * Arguments:
7579 *
7580 * TargetPtr pointer to target address in PCI shared memory
7581 * SourcePtr pointer to source buffer for data
7582 * count count in bytes of data to copy
7583 *
7584 * Return Value: None
7585 */
7586static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7587 unsigned short count )
7588{
7589 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7590#define PCI_LOAD_INTERVAL 64
7591
7592 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7593 unsigned short Index;
7594 unsigned long Dummy;
7595
7596 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7597 {
7598 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7599 Dummy = *((volatile unsigned long *)TargetPtr);
7600 TargetPtr += PCI_LOAD_INTERVAL;
7601 SourcePtr += PCI_LOAD_INTERVAL;
7602 }
7603
7604 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7605
7606} /* End Of mgsl_load_pci_memory() */
7607
7608static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7609{
7610 int i;
7611 int linecount;
7612 if (xmit)
7613 printk("%s tx data:\n",info->device_name);
7614 else
7615 printk("%s rx data:\n",info->device_name);
7616
7617 while(count) {
7618 if (count > 16)
7619 linecount = 16;
7620 else
7621 linecount = count;
7622
7623 for(i=0;i<linecount;i++)
7624 printk("%02X ",(unsigned char)data[i]);
7625 for(;i<17;i++)
7626 printk(" ");
7627 for(i=0;i<linecount;i++) {
7628 if (data[i]>=040 && data[i]<=0176)
7629 printk("%c",data[i]);
7630 else
7631 printk(".");
7632 }
7633 printk("\n");
7634
7635 data += linecount;
7636 count -= linecount;
7637 }
7638} /* end of mgsl_trace_block() */
7639
7640/* mgsl_tx_timeout()
7641 *
7642 * called when HDLC frame times out
7643 * update stats and do tx completion processing
7644 *
7645 * Arguments: context pointer to device instance data
7646 * Return Value: None
7647 */
7648static void mgsl_tx_timeout(unsigned long context)
7649{
7650 struct mgsl_struct *info = (struct mgsl_struct*)context;
7651 unsigned long flags;
7652
7653 if ( debug_level >= DEBUG_LEVEL_INFO )
7654 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7655 __FILE__,__LINE__,info->device_name);
7656 if(info->tx_active &&
7657 (info->params.mode == MGSL_MODE_HDLC ||
7658 info->params.mode == MGSL_MODE_RAW) ) {
7659 info->icount.txtimeout++;
7660 }
7661 spin_lock_irqsave(&info->irq_spinlock,flags);
7662 info->tx_active = 0;
7663 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7664
7665 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7666 usc_loopmode_cancel_transmit( info );
7667
7668 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7669
7670#ifdef CONFIG_HDLC
7671 if (info->netcount)
7672 hdlcdev_tx_done(info);
7673 else
7674#endif
7675 mgsl_bh_transmit(info);
7676
7677} /* end of mgsl_tx_timeout() */
7678
7679/* signal that there are no more frames to send, so that
7680 * line is 'released' by echoing RxD to TxD when current
7681 * transmission is complete (or immediately if no tx in progress).
7682 */
7683static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7684{
7685 unsigned long flags;
7686
7687 spin_lock_irqsave(&info->irq_spinlock,flags);
7688 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7689 if (info->tx_active)
7690 info->loopmode_send_done_requested = TRUE;
7691 else
7692 usc_loopmode_send_done(info);
7693 }
7694 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7695
7696 return 0;
7697}
7698
7699/* release the line by echoing RxD to TxD
7700 * upon completion of a transmit frame
7701 */
7702static void usc_loopmode_send_done( struct mgsl_struct * info )
7703{
7704 info->loopmode_send_done_requested = FALSE;
7705 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7706 info->cmr_value &= ~BIT13;
7707 usc_OutReg(info, CMR, info->cmr_value);
7708}
7709
7710/* abort a transmit in progress while in HDLC LoopMode
7711 */
7712static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7713{
7714 /* reset tx dma channel and purge TxFifo */
7715 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7716 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7717 usc_loopmode_send_done( info );
7718}
7719
7720/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7721 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7722 * we must clear CMR:13 to begin repeating TxData to RxData
7723 */
7724static void usc_loopmode_insert_request( struct mgsl_struct * info )
7725{
7726 info->loopmode_insert_requested = TRUE;
7727
7728 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7729 * begin repeating TxData on RxData (complete insertion)
7730 */
7731 usc_OutReg( info, RICR,
7732 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7733
7734 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7735 info->cmr_value |= BIT13;
7736 usc_OutReg(info, CMR, info->cmr_value);
7737}
7738
7739/* return 1 if station is inserted into the loop, otherwise 0
7740 */
7741static int usc_loopmode_active( struct mgsl_struct * info)
7742{
7743 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7744}
7745
7746#ifdef CONFIG_HDLC
7747
7748/**
7749 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7750 * set encoding and frame check sequence (FCS) options
7751 *
7752 * dev pointer to network device structure
7753 * encoding serial encoding setting
7754 * parity FCS setting
7755 *
7756 * returns 0 if success, otherwise error code
7757 */
7758static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7759 unsigned short parity)
7760{
7761 struct mgsl_struct *info = dev_to_port(dev);
7762 unsigned char new_encoding;
7763 unsigned short new_crctype;
7764
7765 /* return error if TTY interface open */
7766 if (info->count)
7767 return -EBUSY;
7768
7769 switch (encoding)
7770 {
7771 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7772 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7773 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7774 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7775 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7776 default: return -EINVAL;
7777 }
7778
7779 switch (parity)
7780 {
7781 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7782 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7783 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7784 default: return -EINVAL;
7785 }
7786
7787 info->params.encoding = new_encoding;
7788 info->params.crc_type = new_crctype;;
7789
7790 /* if network interface up, reprogram hardware */
7791 if (info->netcount)
7792 mgsl_program_hw(info);
7793
7794 return 0;
7795}
7796
7797/**
7798 * called by generic HDLC layer to send frame
7799 *
7800 * skb socket buffer containing HDLC frame
7801 * dev pointer to network device structure
7802 *
7803 * returns 0 if success, otherwise error code
7804 */
7805static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7806{
7807 struct mgsl_struct *info = dev_to_port(dev);
7808 struct net_device_stats *stats = hdlc_stats(dev);
7809 unsigned long flags;
7810
7811 if (debug_level >= DEBUG_LEVEL_INFO)
7812 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7813
7814 /* stop sending until this frame completes */
7815 netif_stop_queue(dev);
7816
7817 /* copy data to device buffers */
7818 info->xmit_cnt = skb->len;
7819 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7820
7821 /* update network statistics */
7822 stats->tx_packets++;
7823 stats->tx_bytes += skb->len;
7824
7825 /* done with socket buffer, so free it */
7826 dev_kfree_skb(skb);
7827
7828 /* save start time for transmit timeout detection */
7829 dev->trans_start = jiffies;
7830
7831 /* start hardware transmitter if necessary */
7832 spin_lock_irqsave(&info->irq_spinlock,flags);
7833 if (!info->tx_active)
7834 usc_start_transmitter(info);
7835 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7836
7837 return 0;
7838}
7839
7840/**
7841 * called by network layer when interface enabled
7842 * claim resources and initialize hardware
7843 *
7844 * dev pointer to network device structure
7845 *
7846 * returns 0 if success, otherwise error code
7847 */
7848static int hdlcdev_open(struct net_device *dev)
7849{
7850 struct mgsl_struct *info = dev_to_port(dev);
7851 int rc;
7852 unsigned long flags;
7853
7854 if (debug_level >= DEBUG_LEVEL_INFO)
7855 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7856
7857 /* generic HDLC layer open processing */
7858 if ((rc = hdlc_open(dev)))
7859 return rc;
7860
7861 /* arbitrate between network and tty opens */
7862 spin_lock_irqsave(&info->netlock, flags);
7863 if (info->count != 0 || info->netcount != 0) {
7864 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7865 spin_unlock_irqrestore(&info->netlock, flags);
7866 return -EBUSY;
7867 }
7868 info->netcount=1;
7869 spin_unlock_irqrestore(&info->netlock, flags);
7870
7871 /* claim resources and init adapter */
7872 if ((rc = startup(info)) != 0) {
7873 spin_lock_irqsave(&info->netlock, flags);
7874 info->netcount=0;
7875 spin_unlock_irqrestore(&info->netlock, flags);
7876 return rc;
7877 }
7878
7879 /* assert DTR and RTS, apply hardware settings */
7880 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7881 mgsl_program_hw(info);
7882
7883 /* enable network layer transmit */
7884 dev->trans_start = jiffies;
7885 netif_start_queue(dev);
7886
7887 /* inform generic HDLC layer of current DCD status */
7888 spin_lock_irqsave(&info->irq_spinlock, flags);
7889 usc_get_serial_signals(info);
7890 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7891 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
7892
7893 return 0;
7894}
7895
7896/**
7897 * called by network layer when interface is disabled
7898 * shutdown hardware and release resources
7899 *
7900 * dev pointer to network device structure
7901 *
7902 * returns 0 if success, otherwise error code
7903 */
7904static int hdlcdev_close(struct net_device *dev)
7905{
7906 struct mgsl_struct *info = dev_to_port(dev);
7907 unsigned long flags;
7908
7909 if (debug_level >= DEBUG_LEVEL_INFO)
7910 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7911
7912 netif_stop_queue(dev);
7913
7914 /* shutdown adapter and release resources */
7915 shutdown(info);
7916
7917 hdlc_close(dev);
7918
7919 spin_lock_irqsave(&info->netlock, flags);
7920 info->netcount=0;
7921 spin_unlock_irqrestore(&info->netlock, flags);
7922
7923 return 0;
7924}
7925
7926/**
7927 * called by network layer to process IOCTL call to network device
7928 *
7929 * dev pointer to network device structure
7930 * ifr pointer to network interface request structure
7931 * cmd IOCTL command code
7932 *
7933 * returns 0 if success, otherwise error code
7934 */
7935static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7936{
7937 const size_t size = sizeof(sync_serial_settings);
7938 sync_serial_settings new_line;
7939 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7940 struct mgsl_struct *info = dev_to_port(dev);
7941 unsigned int flags;
7942
7943 if (debug_level >= DEBUG_LEVEL_INFO)
7944 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7945
7946 /* return error if TTY interface open */
7947 if (info->count)
7948 return -EBUSY;
7949
7950 if (cmd != SIOCWANDEV)
7951 return hdlc_ioctl(dev, ifr, cmd);
7952
7953 switch(ifr->ifr_settings.type) {
7954 case IF_GET_IFACE: /* return current sync_serial_settings */
7955
7956 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7957 if (ifr->ifr_settings.size < size) {
7958 ifr->ifr_settings.size = size; /* data size wanted */
7959 return -ENOBUFS;
7960 }
7961
7962 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7963 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7964 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7965 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7966
7967 switch (flags){
7968 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7969 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7970 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7971 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7972 default: new_line.clock_type = CLOCK_DEFAULT;
7973 }
7974
7975 new_line.clock_rate = info->params.clock_speed;
7976 new_line.loopback = info->params.loopback ? 1:0;
7977
7978 if (copy_to_user(line, &new_line, size))
7979 return -EFAULT;
7980 return 0;
7981
7982 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7983
7984 if(!capable(CAP_NET_ADMIN))
7985 return -EPERM;
7986 if (copy_from_user(&new_line, line, size))
7987 return -EFAULT;
7988
7989 switch (new_line.clock_type)
7990 {
7991 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7992 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7993 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7994 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7995 case CLOCK_DEFAULT: flags = info->params.flags &
7996 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7997 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7998 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7999 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
8000 default: return -EINVAL;
8001 }
8002
8003 if (new_line.loopback != 0 && new_line.loopback != 1)
8004 return -EINVAL;
8005
8006 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
8007 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
8008 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
8009 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
8010 info->params.flags |= flags;
8011
8012 info->params.loopback = new_line.loopback;
8013
8014 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
8015 info->params.clock_speed = new_line.clock_rate;
8016 else
8017 info->params.clock_speed = 0;
8018
8019 /* if network interface up, reprogram hardware */
8020 if (info->netcount)
8021 mgsl_program_hw(info);
8022 return 0;
8023
8024 default:
8025 return hdlc_ioctl(dev, ifr, cmd);
8026 }
8027}
8028
8029/**
8030 * called by network layer when transmit timeout is detected
8031 *
8032 * dev pointer to network device structure
8033 */
8034static void hdlcdev_tx_timeout(struct net_device *dev)
8035{
8036 struct mgsl_struct *info = dev_to_port(dev);
8037 struct net_device_stats *stats = hdlc_stats(dev);
8038 unsigned long flags;
8039
8040 if (debug_level >= DEBUG_LEVEL_INFO)
8041 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
8042
8043 stats->tx_errors++;
8044 stats->tx_aborted_errors++;
8045
8046 spin_lock_irqsave(&info->irq_spinlock,flags);
8047 usc_stop_transmitter(info);
8048 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8049
8050 netif_wake_queue(dev);
8051}
8052
8053/**
8054 * called by device driver when transmit completes
8055 * reenable network layer transmit if stopped
8056 *
8057 * info pointer to device instance information
8058 */
8059static void hdlcdev_tx_done(struct mgsl_struct *info)
8060{
8061 if (netif_queue_stopped(info->netdev))
8062 netif_wake_queue(info->netdev);
8063}
8064
8065/**
8066 * called by device driver when frame received
8067 * pass frame to network layer
8068 *
8069 * info pointer to device instance information
8070 * buf pointer to buffer contianing frame data
8071 * size count of data bytes in buf
8072 */
8073static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8074{
8075 struct sk_buff *skb = dev_alloc_skb(size);
8076 struct net_device *dev = info->netdev;
8077 struct net_device_stats *stats = hdlc_stats(dev);
8078
8079 if (debug_level >= DEBUG_LEVEL_INFO)
8080 printk("hdlcdev_rx(%s)\n",dev->name);
8081
8082 if (skb == NULL) {
8083 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8084 stats->rx_dropped++;
8085 return;
8086 }
8087
8088 memcpy(skb_put(skb, size),buf,size);
8089
8090 skb->protocol = hdlc_type_trans(skb, info->netdev);
8091
8092 stats->rx_packets++;
8093 stats->rx_bytes += size;
8094
8095 netif_rx(skb);
8096
8097 info->netdev->last_rx = jiffies;
8098}
8099
8100/**
8101 * called by device driver when adding device instance
8102 * do generic HDLC initialization
8103 *
8104 * info pointer to device instance information
8105 *
8106 * returns 0 if success, otherwise error code
8107 */
8108static int hdlcdev_init(struct mgsl_struct *info)
8109{
8110 int rc;
8111 struct net_device *dev;
8112 hdlc_device *hdlc;
8113
8114 /* allocate and initialize network and HDLC layer objects */
8115
8116 if (!(dev = alloc_hdlcdev(info))) {
8117 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8118 return -ENOMEM;
8119 }
8120
8121 /* for network layer reporting purposes only */
8122 dev->base_addr = info->io_base;
8123 dev->irq = info->irq_level;
8124 dev->dma = info->dma_level;
8125
8126 /* network layer callbacks and settings */
8127 dev->do_ioctl = hdlcdev_ioctl;
8128 dev->open = hdlcdev_open;
8129 dev->stop = hdlcdev_close;
8130 dev->tx_timeout = hdlcdev_tx_timeout;
8131 dev->watchdog_timeo = 10*HZ;
8132 dev->tx_queue_len = 50;
8133
8134 /* generic HDLC layer callbacks and settings */
8135 hdlc = dev_to_hdlc(dev);
8136 hdlc->attach = hdlcdev_attach;
8137 hdlc->xmit = hdlcdev_xmit;
8138
8139 /* register objects with HDLC layer */
8140 if ((rc = register_hdlc_device(dev))) {
8141 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8142 free_netdev(dev);
8143 return rc;
8144 }
8145
8146 info->netdev = dev;
8147 return 0;
8148}
8149
8150/**
8151 * called by device driver when removing device instance
8152 * do generic HDLC cleanup
8153 *
8154 * info pointer to device instance information
8155 */
8156static void hdlcdev_exit(struct mgsl_struct *info)
8157{
8158 unregister_hdlc_device(info->netdev);
8159 free_netdev(info->netdev);
8160 info->netdev = NULL;
8161}
8162
8163#endif /* CONFIG_HDLC */
8164
8165
8166static int __devinit synclink_init_one (struct pci_dev *dev,
8167 const struct pci_device_id *ent)
8168{
8169 struct mgsl_struct *info;
8170
8171 if (pci_enable_device(dev)) {
8172 printk("error enabling pci device %p\n", dev);
8173 return -EIO;
8174 }
8175
8176 if (!(info = mgsl_allocate_device())) {
8177 printk("can't allocate device instance data.\n");
8178 return -EIO;
8179 }
8180
8181 /* Copy user configuration info to device instance data */
8182
8183 info->io_base = pci_resource_start(dev, 2);
8184 info->irq_level = dev->irq;
8185 info->phys_memory_base = pci_resource_start(dev, 3);
8186
8187 /* Because veremap only works on page boundaries we must map
8188 * a larger area than is actually implemented for the LCR
8189 * memory range. We map a full page starting at the page boundary.
8190 */
8191 info->phys_lcr_base = pci_resource_start(dev, 0);
8192 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8193 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8194
8195 info->bus_type = MGSL_BUS_TYPE_PCI;
8196 info->io_addr_size = 8;
8197 info->irq_flags = SA_SHIRQ;
8198
8199 if (dev->device == 0x0210) {
8200 /* Version 1 PCI9030 based universal PCI adapter */
8201 info->misc_ctrl_value = 0x007c4080;
8202 info->hw_version = 1;
8203 } else {
8204 /* Version 0 PCI9050 based 5V PCI adapter
8205 * A PCI9050 bug prevents reading LCR registers if
8206 * LCR base address bit 7 is set. Maintain shadow
8207 * value so we can write to LCR misc control reg.
8208 */
8209 info->misc_ctrl_value = 0x087e4546;
8210 info->hw_version = 0;
8211 }
8212
8213 mgsl_add_device(info);
8214
8215 return 0;
8216}
8217
8218static void __devexit synclink_remove_one (struct pci_dev *dev)
8219{
8220}
8221