blob: 789572fc002bf1008d03e5b1c40e5f42da8e9d92 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/char/synclink.c
3 *
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08004 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#if defined(__i386__)
57# define BREAKPOINT() asm(" int $3");
58#else
59# define BREAKPOINT() { }
60#endif
61
62#define MAX_ISA_DEVICES 10
63#define MAX_PCI_DEVICES 10
64#define MAX_TOTAL_DEVICES 20
65
66#include <linux/config.h>
67#include <linux/module.h>
68#include <linux/errno.h>
69#include <linux/signal.h>
70#include <linux/sched.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <linux/pci.h>
74#include <linux/tty.h>
75#include <linux/tty_flip.h>
76#include <linux/serial.h>
77#include <linux/major.h>
78#include <linux/string.h>
79#include <linux/fcntl.h>
80#include <linux/ptrace.h>
81#include <linux/ioport.h>
82#include <linux/mm.h>
83#include <linux/slab.h>
84#include <linux/delay.h>
85
86#include <linux/netdevice.h>
87
88#include <linux/vmalloc.h>
89#include <linux/init.h>
90#include <asm/serial.h>
91
92#include <linux/delay.h>
93#include <linux/ioctl.h>
94
95#include <asm/system.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98#include <asm/dma.h>
99#include <linux/bitops.h>
100#include <asm/types.h>
101#include <linux/termios.h>
102#include <linux/workqueue.h>
103#include <linux/hdlc.h>
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800104#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106#ifdef CONFIG_HDLC_MODULE
107#define CONFIG_HDLC 1
108#endif
109
110#define GET_USER(error,value,addr) error = get_user(value,addr)
111#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
112#define PUT_USER(error,value,addr) error = put_user(value,addr)
113#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
114
115#include <asm/uaccess.h>
116
117#include "linux/synclink.h"
118
119#define RCLRVALUE 0xffff
120
121static MGSL_PARAMS default_params = {
122 MGSL_MODE_HDLC, /* unsigned long mode */
123 0, /* unsigned char loopback; */
124 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
125 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
126 0, /* unsigned long clock_speed; */
127 0xff, /* unsigned char addr_filter; */
128 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
129 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
130 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
131 9600, /* unsigned long data_rate; */
132 8, /* unsigned char data_bits; */
133 1, /* unsigned char stop_bits; */
134 ASYNC_PARITY_NONE /* unsigned char parity; */
135};
136
137#define SHARED_MEM_ADDRESS_SIZE 0x40000
138#define BUFFERLISTSIZE (PAGE_SIZE)
139#define DMABUFFERSIZE (PAGE_SIZE)
140#define MAXRXFRAMES 7
141
142typedef struct _DMABUFFERENTRY
143{
144 u32 phys_addr; /* 32-bit flat physical address of data buffer */
Paul Fulghum4a918bc2005-09-09 13:02:12 -0700145 volatile u16 count; /* buffer size/data count */
146 volatile u16 status; /* Control/status field */
147 volatile u16 rcc; /* character count field */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 u16 reserved; /* padding required by 16C32 */
149 u32 link; /* 32-bit flat link to next buffer entry */
150 char *virt_addr; /* virtual address of data buffer */
151 u32 phys_entry; /* physical address of this buffer entry */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800152 dma_addr_t dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153} DMABUFFERENTRY, *DMAPBUFFERENTRY;
154
155/* The queue of BH actions to be performed */
156
157#define BH_RECEIVE 1
158#define BH_TRANSMIT 2
159#define BH_STATUS 4
160
161#define IO_PIN_SHUTDOWN_LIMIT 100
162
163#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
164
165struct _input_signal_events {
166 int ri_up;
167 int ri_down;
168 int dsr_up;
169 int dsr_down;
170 int dcd_up;
171 int dcd_down;
172 int cts_up;
173 int cts_down;
174};
175
176/* transmit holding buffer definitions*/
177#define MAX_TX_HOLDING_BUFFERS 5
178struct tx_holding_buffer {
179 int buffer_size;
180 unsigned char * buffer;
181};
182
183
184/*
185 * Device instance data structure
186 */
187
188struct mgsl_struct {
189 int magic;
190 int flags;
191 int count; /* count of opens */
192 int line;
193 int hw_version;
194 unsigned short close_delay;
195 unsigned short closing_wait; /* time to wait before closing */
196
197 struct mgsl_icount icount;
198
199 struct tty_struct *tty;
200 int timeout;
201 int x_char; /* xon/xoff character */
202 int blocked_open; /* # of blocked opens */
203 u16 read_status_mask;
204 u16 ignore_status_mask;
205 unsigned char *xmit_buf;
206 int xmit_head;
207 int xmit_tail;
208 int xmit_cnt;
209
210 wait_queue_head_t open_wait;
211 wait_queue_head_t close_wait;
212
213 wait_queue_head_t status_event_wait_q;
214 wait_queue_head_t event_wait_q;
215 struct timer_list tx_timer; /* HDLC transmit timeout timer */
216 struct mgsl_struct *next_device; /* device list link */
217
218 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
219 struct work_struct task; /* task structure for scheduling bh */
220
221 u32 EventMask; /* event trigger mask */
222 u32 RecordedEvents; /* pending events */
223
224 u32 max_frame_size; /* as set by device config */
225
226 u32 pending_bh;
227
228 int bh_running; /* Protection from multiple */
229 int isr_overflow;
230 int bh_requested;
231
232 int dcd_chkcount; /* check counts to prevent */
233 int cts_chkcount; /* too many IRQs if a signal */
234 int dsr_chkcount; /* is floating */
235 int ri_chkcount;
236
237 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800238 u32 buffer_list_phys;
239 dma_addr_t buffer_list_dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
242 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
243 unsigned int current_rx_buffer;
244
245 int num_tx_dma_buffers; /* number of tx dma frames required */
246 int tx_dma_buffers_used;
247 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
248 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
249 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
250 int current_tx_buffer; /* next tx dma buffer to be loaded */
251
252 unsigned char *intermediate_rxbuffer;
253
254 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
255 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
256 int put_tx_holding_index; /* next tx holding buffer to store user request */
257 int tx_holding_count; /* number of tx holding buffers waiting */
258 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
259
260 int rx_enabled;
261 int rx_overflow;
262 int rx_rcc_underrun;
263
264 int tx_enabled;
265 int tx_active;
266 u32 idle_mode;
267
268 u16 cmr_value;
269 u16 tcsr_value;
270
271 char device_name[25]; /* device instance name */
272
273 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
274 unsigned char bus; /* expansion bus number (zero based) */
275 unsigned char function; /* PCI device number */
276
277 unsigned int io_base; /* base I/O address of adapter */
278 unsigned int io_addr_size; /* size of the I/O address range */
279 int io_addr_requested; /* nonzero if I/O address requested */
280
281 unsigned int irq_level; /* interrupt level */
282 unsigned long irq_flags;
283 int irq_requested; /* nonzero if IRQ requested */
284
285 unsigned int dma_level; /* DMA channel */
286 int dma_requested; /* nonzero if dma channel requested */
287
288 u16 mbre_bit;
289 u16 loopback_bits;
290 u16 usc_idle_mode;
291
292 MGSL_PARAMS params; /* communications parameters */
293
294 unsigned char serial_signals; /* current serial signal states */
295
296 int irq_occurred; /* for diagnostics use */
297 unsigned int init_error; /* Initialization startup error (DIAGS) */
298 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
299
300 u32 last_mem_alloc;
301 unsigned char* memory_base; /* shared memory address (PCI only) */
302 u32 phys_memory_base;
303 int shared_mem_requested;
304
305 unsigned char* lcr_base; /* local config registers (PCI only) */
306 u32 phys_lcr_base;
307 u32 lcr_offset;
308 int lcr_mem_requested;
309
310 u32 misc_ctrl_value;
311 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
312 char char_buf[MAX_ASYNC_BUFFER_SIZE];
313 BOOLEAN drop_rts_on_tx_done;
314
315 BOOLEAN loopmode_insert_requested;
316 BOOLEAN loopmode_send_done_requested;
317
318 struct _input_signal_events input_signal_events;
319
320 /* generic HDLC device parts */
321 int netcount;
322 int dosyncppp;
323 spinlock_t netlock;
324
325#ifdef CONFIG_HDLC
326 struct net_device *netdev;
327#endif
328};
329
330#define MGSL_MAGIC 0x5401
331
332/*
333 * The size of the serial xmit buffer is 1 page, or 4096 bytes
334 */
335#ifndef SERIAL_XMIT_SIZE
336#define SERIAL_XMIT_SIZE 4096
337#endif
338
339/*
340 * These macros define the offsets used in calculating the
341 * I/O address of the specified USC registers.
342 */
343
344
345#define DCPIN 2 /* Bit 1 of I/O address */
346#define SDPIN 4 /* Bit 2 of I/O address */
347
348#define DCAR 0 /* DMA command/address register */
349#define CCAR SDPIN /* channel command/address register */
350#define DATAREG DCPIN + SDPIN /* serial data register */
351#define MSBONLY 0x41
352#define LSBONLY 0x40
353
354/*
355 * These macros define the register address (ordinal number)
356 * used for writing address/value pairs to the USC.
357 */
358
359#define CMR 0x02 /* Channel mode Register */
360#define CCSR 0x04 /* Channel Command/status Register */
361#define CCR 0x06 /* Channel Control Register */
362#define PSR 0x08 /* Port status Register */
363#define PCR 0x0a /* Port Control Register */
364#define TMDR 0x0c /* Test mode Data Register */
365#define TMCR 0x0e /* Test mode Control Register */
366#define CMCR 0x10 /* Clock mode Control Register */
367#define HCR 0x12 /* Hardware Configuration Register */
368#define IVR 0x14 /* Interrupt Vector Register */
369#define IOCR 0x16 /* Input/Output Control Register */
370#define ICR 0x18 /* Interrupt Control Register */
371#define DCCR 0x1a /* Daisy Chain Control Register */
372#define MISR 0x1c /* Misc Interrupt status Register */
373#define SICR 0x1e /* status Interrupt Control Register */
374#define RDR 0x20 /* Receive Data Register */
375#define RMR 0x22 /* Receive mode Register */
376#define RCSR 0x24 /* Receive Command/status Register */
377#define RICR 0x26 /* Receive Interrupt Control Register */
378#define RSR 0x28 /* Receive Sync Register */
379#define RCLR 0x2a /* Receive count Limit Register */
380#define RCCR 0x2c /* Receive Character count Register */
381#define TC0R 0x2e /* Time Constant 0 Register */
382#define TDR 0x30 /* Transmit Data Register */
383#define TMR 0x32 /* Transmit mode Register */
384#define TCSR 0x34 /* Transmit Command/status Register */
385#define TICR 0x36 /* Transmit Interrupt Control Register */
386#define TSR 0x38 /* Transmit Sync Register */
387#define TCLR 0x3a /* Transmit count Limit Register */
388#define TCCR 0x3c /* Transmit Character count Register */
389#define TC1R 0x3e /* Time Constant 1 Register */
390
391
392/*
393 * MACRO DEFINITIONS FOR DMA REGISTERS
394 */
395
396#define DCR 0x06 /* DMA Control Register (shared) */
397#define DACR 0x08 /* DMA Array count Register (shared) */
398#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
399#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
400#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
401#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
402#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
403
404#define TDMR 0x02 /* Transmit DMA mode Register */
405#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
406#define TBCR 0x2a /* Transmit Byte count Register */
407#define TARL 0x2c /* Transmit Address Register (low) */
408#define TARU 0x2e /* Transmit Address Register (high) */
409#define NTBCR 0x3a /* Next Transmit Byte count Register */
410#define NTARL 0x3c /* Next Transmit Address Register (low) */
411#define NTARU 0x3e /* Next Transmit Address Register (high) */
412
413#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
414#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
415#define RBCR 0xaa /* Receive Byte count Register */
416#define RARL 0xac /* Receive Address Register (low) */
417#define RARU 0xae /* Receive Address Register (high) */
418#define NRBCR 0xba /* Next Receive Byte count Register */
419#define NRARL 0xbc /* Next Receive Address Register (low) */
420#define NRARU 0xbe /* Next Receive Address Register (high) */
421
422
423/*
424 * MACRO DEFINITIONS FOR MODEM STATUS BITS
425 */
426
427#define MODEMSTATUS_DTR 0x80
428#define MODEMSTATUS_DSR 0x40
429#define MODEMSTATUS_RTS 0x20
430#define MODEMSTATUS_CTS 0x10
431#define MODEMSTATUS_RI 0x04
432#define MODEMSTATUS_DCD 0x01
433
434
435/*
436 * Channel Command/Address Register (CCAR) Command Codes
437 */
438
439#define RTCmd_Null 0x0000
440#define RTCmd_ResetHighestIus 0x1000
441#define RTCmd_TriggerChannelLoadDma 0x2000
442#define RTCmd_TriggerRxDma 0x2800
443#define RTCmd_TriggerTxDma 0x3000
444#define RTCmd_TriggerRxAndTxDma 0x3800
445#define RTCmd_PurgeRxFifo 0x4800
446#define RTCmd_PurgeTxFifo 0x5000
447#define RTCmd_PurgeRxAndTxFifo 0x5800
448#define RTCmd_LoadRcc 0x6800
449#define RTCmd_LoadTcc 0x7000
450#define RTCmd_LoadRccAndTcc 0x7800
451#define RTCmd_LoadTC0 0x8800
452#define RTCmd_LoadTC1 0x9000
453#define RTCmd_LoadTC0AndTC1 0x9800
454#define RTCmd_SerialDataLSBFirst 0xa000
455#define RTCmd_SerialDataMSBFirst 0xa800
456#define RTCmd_SelectBigEndian 0xb000
457#define RTCmd_SelectLittleEndian 0xb800
458
459
460/*
461 * DMA Command/Address Register (DCAR) Command Codes
462 */
463
464#define DmaCmd_Null 0x0000
465#define DmaCmd_ResetTxChannel 0x1000
466#define DmaCmd_ResetRxChannel 0x1200
467#define DmaCmd_StartTxChannel 0x2000
468#define DmaCmd_StartRxChannel 0x2200
469#define DmaCmd_ContinueTxChannel 0x3000
470#define DmaCmd_ContinueRxChannel 0x3200
471#define DmaCmd_PauseTxChannel 0x4000
472#define DmaCmd_PauseRxChannel 0x4200
473#define DmaCmd_AbortTxChannel 0x5000
474#define DmaCmd_AbortRxChannel 0x5200
475#define DmaCmd_InitTxChannel 0x7000
476#define DmaCmd_InitRxChannel 0x7200
477#define DmaCmd_ResetHighestDmaIus 0x8000
478#define DmaCmd_ResetAllChannels 0x9000
479#define DmaCmd_StartAllChannels 0xa000
480#define DmaCmd_ContinueAllChannels 0xb000
481#define DmaCmd_PauseAllChannels 0xc000
482#define DmaCmd_AbortAllChannels 0xd000
483#define DmaCmd_InitAllChannels 0xf000
484
485#define TCmd_Null 0x0000
486#define TCmd_ClearTxCRC 0x2000
487#define TCmd_SelectTicrTtsaData 0x4000
488#define TCmd_SelectTicrTxFifostatus 0x5000
489#define TCmd_SelectTicrIntLevel 0x6000
490#define TCmd_SelectTicrdma_level 0x7000
491#define TCmd_SendFrame 0x8000
492#define TCmd_SendAbort 0x9000
493#define TCmd_EnableDleInsertion 0xc000
494#define TCmd_DisableDleInsertion 0xd000
495#define TCmd_ClearEofEom 0xe000
496#define TCmd_SetEofEom 0xf000
497
498#define RCmd_Null 0x0000
499#define RCmd_ClearRxCRC 0x2000
500#define RCmd_EnterHuntmode 0x3000
501#define RCmd_SelectRicrRtsaData 0x4000
502#define RCmd_SelectRicrRxFifostatus 0x5000
503#define RCmd_SelectRicrIntLevel 0x6000
504#define RCmd_SelectRicrdma_level 0x7000
505
506/*
507 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
508 */
509
510#define RECEIVE_STATUS BIT5
511#define RECEIVE_DATA BIT4
512#define TRANSMIT_STATUS BIT3
513#define TRANSMIT_DATA BIT2
514#define IO_PIN BIT1
515#define MISC BIT0
516
517
518/*
519 * Receive status Bits in Receive Command/status Register RCSR
520 */
521
522#define RXSTATUS_SHORT_FRAME BIT8
523#define RXSTATUS_CODE_VIOLATION BIT8
524#define RXSTATUS_EXITED_HUNT BIT7
525#define RXSTATUS_IDLE_RECEIVED BIT6
526#define RXSTATUS_BREAK_RECEIVED BIT5
527#define RXSTATUS_ABORT_RECEIVED BIT5
528#define RXSTATUS_RXBOUND BIT4
529#define RXSTATUS_CRC_ERROR BIT3
530#define RXSTATUS_FRAMING_ERROR BIT3
531#define RXSTATUS_ABORT BIT2
532#define RXSTATUS_PARITY_ERROR BIT2
533#define RXSTATUS_OVERRUN BIT1
534#define RXSTATUS_DATA_AVAILABLE BIT0
535#define RXSTATUS_ALL 0x01f6
536#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
537
538/*
539 * Values for setting transmit idle mode in
540 * Transmit Control/status Register (TCSR)
541 */
542#define IDLEMODE_FLAGS 0x0000
543#define IDLEMODE_ALT_ONE_ZERO 0x0100
544#define IDLEMODE_ZERO 0x0200
545#define IDLEMODE_ONE 0x0300
546#define IDLEMODE_ALT_MARK_SPACE 0x0500
547#define IDLEMODE_SPACE 0x0600
548#define IDLEMODE_MARK 0x0700
549#define IDLEMODE_MASK 0x0700
550
551/*
552 * IUSC revision identifiers
553 */
554#define IUSC_SL1660 0x4d44
555#define IUSC_PRE_SL1660 0x4553
556
557/*
558 * Transmit status Bits in Transmit Command/status Register (TCSR)
559 */
560
561#define TCSR_PRESERVE 0x0F00
562
563#define TCSR_UNDERWAIT BIT11
564#define TXSTATUS_PREAMBLE_SENT BIT7
565#define TXSTATUS_IDLE_SENT BIT6
566#define TXSTATUS_ABORT_SENT BIT5
567#define TXSTATUS_EOF_SENT BIT4
568#define TXSTATUS_EOM_SENT BIT4
569#define TXSTATUS_CRC_SENT BIT3
570#define TXSTATUS_ALL_SENT BIT2
571#define TXSTATUS_UNDERRUN BIT1
572#define TXSTATUS_FIFO_EMPTY BIT0
573#define TXSTATUS_ALL 0x00fa
574#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
575
576
577#define MISCSTATUS_RXC_LATCHED BIT15
578#define MISCSTATUS_RXC BIT14
579#define MISCSTATUS_TXC_LATCHED BIT13
580#define MISCSTATUS_TXC BIT12
581#define MISCSTATUS_RI_LATCHED BIT11
582#define MISCSTATUS_RI BIT10
583#define MISCSTATUS_DSR_LATCHED BIT9
584#define MISCSTATUS_DSR BIT8
585#define MISCSTATUS_DCD_LATCHED BIT7
586#define MISCSTATUS_DCD BIT6
587#define MISCSTATUS_CTS_LATCHED BIT5
588#define MISCSTATUS_CTS BIT4
589#define MISCSTATUS_RCC_UNDERRUN BIT3
590#define MISCSTATUS_DPLL_NO_SYNC BIT2
591#define MISCSTATUS_BRG1_ZERO BIT1
592#define MISCSTATUS_BRG0_ZERO BIT0
593
594#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
595#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
596
597#define SICR_RXC_ACTIVE BIT15
598#define SICR_RXC_INACTIVE BIT14
599#define SICR_RXC (BIT15+BIT14)
600#define SICR_TXC_ACTIVE BIT13
601#define SICR_TXC_INACTIVE BIT12
602#define SICR_TXC (BIT13+BIT12)
603#define SICR_RI_ACTIVE BIT11
604#define SICR_RI_INACTIVE BIT10
605#define SICR_RI (BIT11+BIT10)
606#define SICR_DSR_ACTIVE BIT9
607#define SICR_DSR_INACTIVE BIT8
608#define SICR_DSR (BIT9+BIT8)
609#define SICR_DCD_ACTIVE BIT7
610#define SICR_DCD_INACTIVE BIT6
611#define SICR_DCD (BIT7+BIT6)
612#define SICR_CTS_ACTIVE BIT5
613#define SICR_CTS_INACTIVE BIT4
614#define SICR_CTS (BIT5+BIT4)
615#define SICR_RCC_UNDERFLOW BIT3
616#define SICR_DPLL_NO_SYNC BIT2
617#define SICR_BRG1_ZERO BIT1
618#define SICR_BRG0_ZERO BIT0
619
620void usc_DisableMasterIrqBit( struct mgsl_struct *info );
621void usc_EnableMasterIrqBit( struct mgsl_struct *info );
622void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
623void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
624void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
625
626#define usc_EnableInterrupts( a, b ) \
627 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
628
629#define usc_DisableInterrupts( a, b ) \
630 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
631
632#define usc_EnableMasterIrqBit(a) \
633 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
634
635#define usc_DisableMasterIrqBit(a) \
636 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
637
638#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
639
640/*
641 * Transmit status Bits in Transmit Control status Register (TCSR)
642 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
643 */
644
645#define TXSTATUS_PREAMBLE_SENT BIT7
646#define TXSTATUS_IDLE_SENT BIT6
647#define TXSTATUS_ABORT_SENT BIT5
648#define TXSTATUS_EOF BIT4
649#define TXSTATUS_CRC_SENT BIT3
650#define TXSTATUS_ALL_SENT BIT2
651#define TXSTATUS_UNDERRUN BIT1
652#define TXSTATUS_FIFO_EMPTY BIT0
653
654#define DICR_MASTER BIT15
655#define DICR_TRANSMIT BIT0
656#define DICR_RECEIVE BIT1
657
658#define usc_EnableDmaInterrupts(a,b) \
659 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
660
661#define usc_DisableDmaInterrupts(a,b) \
662 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
663
664#define usc_EnableStatusIrqs(a,b) \
665 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
666
667#define usc_DisablestatusIrqs(a,b) \
668 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
669
670/* Transmit status Bits in Transmit Control status Register (TCSR) */
671/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
672
673
674#define DISABLE_UNCONDITIONAL 0
675#define DISABLE_END_OF_FRAME 1
676#define ENABLE_UNCONDITIONAL 2
677#define ENABLE_AUTO_CTS 3
678#define ENABLE_AUTO_DCD 3
679#define usc_EnableTransmitter(a,b) \
680 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
681#define usc_EnableReceiver(a,b) \
682 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
683
684static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
685static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
686static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
687
688static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
689static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
690static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
691void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
692void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
693
694#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
695#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
696
697#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
698
699static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
700static void usc_start_receiver( struct mgsl_struct *info );
701static void usc_stop_receiver( struct mgsl_struct *info );
702
703static void usc_start_transmitter( struct mgsl_struct *info );
704static void usc_stop_transmitter( struct mgsl_struct *info );
705static void usc_set_txidle( struct mgsl_struct *info );
706static void usc_load_txfifo( struct mgsl_struct *info );
707
708static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
709static void usc_enable_loopback( struct mgsl_struct *info, int enable );
710
711static void usc_get_serial_signals( struct mgsl_struct *info );
712static void usc_set_serial_signals( struct mgsl_struct *info );
713
714static void usc_reset( struct mgsl_struct *info );
715
716static void usc_set_sync_mode( struct mgsl_struct *info );
717static void usc_set_sdlc_mode( struct mgsl_struct *info );
718static void usc_set_async_mode( struct mgsl_struct *info );
719static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
720
721static void usc_loopback_frame( struct mgsl_struct *info );
722
723static void mgsl_tx_timeout(unsigned long context);
724
725
726static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
727static void usc_loopmode_insert_request( struct mgsl_struct * info );
728static int usc_loopmode_active( struct mgsl_struct * info);
729static void usc_loopmode_send_done( struct mgsl_struct * info );
730
731static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
732
733#ifdef CONFIG_HDLC
734#define dev_to_port(D) (dev_to_hdlc(D)->priv)
735static void hdlcdev_tx_done(struct mgsl_struct *info);
736static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
737static int hdlcdev_init(struct mgsl_struct *info);
738static void hdlcdev_exit(struct mgsl_struct *info);
739#endif
740
741/*
742 * Defines a BUS descriptor value for the PCI adapter
743 * local bus address ranges.
744 */
745
746#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
747(0x00400020 + \
748((WrHold) << 30) + \
749((WrDly) << 28) + \
750((RdDly) << 26) + \
751((Nwdd) << 20) + \
752((Nwad) << 15) + \
753((Nxda) << 13) + \
754((Nrdd) << 11) + \
755((Nrad) << 6) )
756
757static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
758
759/*
760 * Adapter diagnostic routines
761 */
762static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
763static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
764static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
765static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
766static int mgsl_adapter_test( struct mgsl_struct *info );
767
768/*
769 * device and resource management routines
770 */
771static int mgsl_claim_resources(struct mgsl_struct *info);
772static void mgsl_release_resources(struct mgsl_struct *info);
773static void mgsl_add_device(struct mgsl_struct *info);
774static struct mgsl_struct* mgsl_allocate_device(void);
775
776/*
777 * DMA buffer manupulation functions.
778 */
779static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
780static int mgsl_get_rx_frame( struct mgsl_struct *info );
781static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
782static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
783static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
784static int num_free_tx_dma_buffers(struct mgsl_struct *info);
785static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
786static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
787
788/*
789 * DMA and Shared Memory buffer allocation and formatting
790 */
791static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
792static void mgsl_free_dma_buffers(struct mgsl_struct *info);
793static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
794static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
795static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
796static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
797static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
798static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
799static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
800static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
801static int load_next_tx_holding_buffer(struct mgsl_struct *info);
802static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
803
804/*
805 * Bottom half interrupt handlers
806 */
807static void mgsl_bh_handler(void* Context);
808static void mgsl_bh_receive(struct mgsl_struct *info);
809static void mgsl_bh_transmit(struct mgsl_struct *info);
810static void mgsl_bh_status(struct mgsl_struct *info);
811
812/*
813 * Interrupt handler routines and dispatch table.
814 */
815static void mgsl_isr_null( struct mgsl_struct *info );
816static void mgsl_isr_transmit_data( struct mgsl_struct *info );
817static void mgsl_isr_receive_data( struct mgsl_struct *info );
818static void mgsl_isr_receive_status( struct mgsl_struct *info );
819static void mgsl_isr_transmit_status( struct mgsl_struct *info );
820static void mgsl_isr_io_pin( struct mgsl_struct *info );
821static void mgsl_isr_misc( struct mgsl_struct *info );
822static void mgsl_isr_receive_dma( struct mgsl_struct *info );
823static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
824
825typedef void (*isr_dispatch_func)(struct mgsl_struct *);
826
827static isr_dispatch_func UscIsrTable[7] =
828{
829 mgsl_isr_null,
830 mgsl_isr_misc,
831 mgsl_isr_io_pin,
832 mgsl_isr_transmit_data,
833 mgsl_isr_transmit_status,
834 mgsl_isr_receive_data,
835 mgsl_isr_receive_status
836};
837
838/*
839 * ioctl call handlers
840 */
841static int tiocmget(struct tty_struct *tty, struct file *file);
842static int tiocmset(struct tty_struct *tty, struct file *file,
843 unsigned int set, unsigned int clear);
844static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
845 __user *user_icount);
846static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
847static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
848static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
849static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
850static int mgsl_txenable(struct mgsl_struct * info, int enable);
851static int mgsl_txabort(struct mgsl_struct * info);
852static int mgsl_rxenable(struct mgsl_struct * info, int enable);
853static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
854static int mgsl_loopmode_send_done( struct mgsl_struct * info );
855
856/* set non-zero on successful registration with PCI subsystem */
857static int pci_registered;
858
859/*
860 * Global linked list of SyncLink devices
861 */
862static struct mgsl_struct *mgsl_device_list;
863static int mgsl_device_count;
864
865/*
866 * Set this param to non-zero to load eax with the
867 * .text section address and breakpoint on module load.
868 * This is useful for use with gdb and add-symbol-file command.
869 */
870static int break_on_load;
871
872/*
873 * Driver major number, defaults to zero to get auto
874 * assigned major number. May be forced as module parameter.
875 */
876static int ttymajor;
877
878/*
879 * Array of user specified options for ISA adapters.
880 */
881static int io[MAX_ISA_DEVICES];
882static int irq[MAX_ISA_DEVICES];
883static int dma[MAX_ISA_DEVICES];
884static int debug_level;
885static int maxframe[MAX_TOTAL_DEVICES];
886static int dosyncppp[MAX_TOTAL_DEVICES];
887static int txdmabufs[MAX_TOTAL_DEVICES];
888static int txholdbufs[MAX_TOTAL_DEVICES];
889
890module_param(break_on_load, bool, 0);
891module_param(ttymajor, int, 0);
892module_param_array(io, int, NULL, 0);
893module_param_array(irq, int, NULL, 0);
894module_param_array(dma, int, NULL, 0);
895module_param(debug_level, int, 0);
896module_param_array(maxframe, int, NULL, 0);
897module_param_array(dosyncppp, int, NULL, 0);
898module_param_array(txdmabufs, int, NULL, 0);
899module_param_array(txholdbufs, int, NULL, 0);
900
901static char *driver_name = "SyncLink serial driver";
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800902static char *driver_version = "$Revision: 4.38 $";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904static int synclink_init_one (struct pci_dev *dev,
905 const struct pci_device_id *ent);
906static void synclink_remove_one (struct pci_dev *dev);
907
908static struct pci_device_id synclink_pci_tbl[] = {
909 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
910 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
911 { 0, }, /* terminate list */
912};
913MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
914
915MODULE_LICENSE("GPL");
916
917static struct pci_driver synclink_pci_driver = {
918 .name = "synclink",
919 .id_table = synclink_pci_tbl,
920 .probe = synclink_init_one,
921 .remove = __devexit_p(synclink_remove_one),
922};
923
924static struct tty_driver *serial_driver;
925
926/* number of characters left in xmit buffer before we ask for more */
927#define WAKEUP_CHARS 256
928
929
930static void mgsl_change_params(struct mgsl_struct *info);
931static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
932
933/*
934 * 1st function defined in .text section. Calling this function in
935 * init_module() followed by a breakpoint allows a remote debugger
936 * (gdb) to get the .text address for the add-symbol-file command.
937 * This allows remote debugging of dynamically loadable modules.
938 */
939static void* mgsl_get_text_ptr(void)
940{
941 return mgsl_get_text_ptr;
942}
943
944/*
945 * tmp_buf is used as a temporary buffer by mgsl_write. We need to
946 * lock it in case the COPY_FROM_USER blocks while swapping in a page,
947 * and some other program tries to do a serial write at the same time.
948 * Since the lock will only come under contention when the system is
949 * swapping and available memory is low, it makes sense to share one
950 * buffer across all the serial ioports, since it significantly saves
951 * memory if large numbers of serial ports are open.
952 */
953static unsigned char *tmp_buf;
954static DECLARE_MUTEX(tmp_buf_sem);
955
956static inline int mgsl_paranoia_check(struct mgsl_struct *info,
957 char *name, const char *routine)
958{
959#ifdef MGSL_PARANOIA_CHECK
960 static const char *badmagic =
961 "Warning: bad magic number for mgsl struct (%s) in %s\n";
962 static const char *badinfo =
963 "Warning: null mgsl_struct for (%s) in %s\n";
964
965 if (!info) {
966 printk(badinfo, name, routine);
967 return 1;
968 }
969 if (info->magic != MGSL_MAGIC) {
970 printk(badmagic, name, routine);
971 return 1;
972 }
973#else
974 if (!info)
975 return 1;
976#endif
977 return 0;
978}
979
980/**
981 * line discipline callback wrappers
982 *
983 * The wrappers maintain line discipline references
984 * while calling into the line discipline.
985 *
986 * ldisc_receive_buf - pass receive data to line discipline
987 */
988
989static void ldisc_receive_buf(struct tty_struct *tty,
990 const __u8 *data, char *flags, int count)
991{
992 struct tty_ldisc *ld;
993 if (!tty)
994 return;
995 ld = tty_ldisc_ref(tty);
996 if (ld) {
997 if (ld->receive_buf)
998 ld->receive_buf(tty, data, flags, count);
999 tty_ldisc_deref(ld);
1000 }
1001}
1002
1003/* mgsl_stop() throttle (stop) transmitter
1004 *
1005 * Arguments: tty pointer to tty info structure
1006 * Return Value: None
1007 */
1008static void mgsl_stop(struct tty_struct *tty)
1009{
1010 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1011 unsigned long flags;
1012
1013 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1014 return;
1015
1016 if ( debug_level >= DEBUG_LEVEL_INFO )
1017 printk("mgsl_stop(%s)\n",info->device_name);
1018
1019 spin_lock_irqsave(&info->irq_spinlock,flags);
1020 if (info->tx_enabled)
1021 usc_stop_transmitter(info);
1022 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1023
1024} /* end of mgsl_stop() */
1025
1026/* mgsl_start() release (start) transmitter
1027 *
1028 * Arguments: tty pointer to tty info structure
1029 * Return Value: None
1030 */
1031static void mgsl_start(struct tty_struct *tty)
1032{
1033 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1034 unsigned long flags;
1035
1036 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1037 return;
1038
1039 if ( debug_level >= DEBUG_LEVEL_INFO )
1040 printk("mgsl_start(%s)\n",info->device_name);
1041
1042 spin_lock_irqsave(&info->irq_spinlock,flags);
1043 if (!info->tx_enabled)
1044 usc_start_transmitter(info);
1045 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1046
1047} /* end of mgsl_start() */
1048
1049/*
1050 * Bottom half work queue access functions
1051 */
1052
1053/* mgsl_bh_action() Return next bottom half action to perform.
1054 * Return Value: BH action code or 0 if nothing to do.
1055 */
1056static int mgsl_bh_action(struct mgsl_struct *info)
1057{
1058 unsigned long flags;
1059 int rc = 0;
1060
1061 spin_lock_irqsave(&info->irq_spinlock,flags);
1062
1063 if (info->pending_bh & BH_RECEIVE) {
1064 info->pending_bh &= ~BH_RECEIVE;
1065 rc = BH_RECEIVE;
1066 } else if (info->pending_bh & BH_TRANSMIT) {
1067 info->pending_bh &= ~BH_TRANSMIT;
1068 rc = BH_TRANSMIT;
1069 } else if (info->pending_bh & BH_STATUS) {
1070 info->pending_bh &= ~BH_STATUS;
1071 rc = BH_STATUS;
1072 }
1073
1074 if (!rc) {
1075 /* Mark BH routine as complete */
1076 info->bh_running = 0;
1077 info->bh_requested = 0;
1078 }
1079
1080 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1081
1082 return rc;
1083}
1084
1085/*
1086 * Perform bottom half processing of work items queued by ISR.
1087 */
1088static void mgsl_bh_handler(void* Context)
1089{
1090 struct mgsl_struct *info = (struct mgsl_struct*)Context;
1091 int action;
1092
1093 if (!info)
1094 return;
1095
1096 if ( debug_level >= DEBUG_LEVEL_BH )
1097 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1098 __FILE__,__LINE__,info->device_name);
1099
1100 info->bh_running = 1;
1101
1102 while((action = mgsl_bh_action(info)) != 0) {
1103
1104 /* Process work item */
1105 if ( debug_level >= DEBUG_LEVEL_BH )
1106 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1107 __FILE__,__LINE__,action);
1108
1109 switch (action) {
1110
1111 case BH_RECEIVE:
1112 mgsl_bh_receive(info);
1113 break;
1114 case BH_TRANSMIT:
1115 mgsl_bh_transmit(info);
1116 break;
1117 case BH_STATUS:
1118 mgsl_bh_status(info);
1119 break;
1120 default:
1121 /* unknown work item ID */
1122 printk("Unknown work item ID=%08X!\n", action);
1123 break;
1124 }
1125 }
1126
1127 if ( debug_level >= DEBUG_LEVEL_BH )
1128 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1129 __FILE__,__LINE__,info->device_name);
1130}
1131
1132static void mgsl_bh_receive(struct mgsl_struct *info)
1133{
1134 int (*get_rx_frame)(struct mgsl_struct *info) =
1135 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1136
1137 if ( debug_level >= DEBUG_LEVEL_BH )
1138 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1139 __FILE__,__LINE__,info->device_name);
1140
1141 do
1142 {
1143 if (info->rx_rcc_underrun) {
1144 unsigned long flags;
1145 spin_lock_irqsave(&info->irq_spinlock,flags);
1146 usc_start_receiver(info);
1147 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1148 return;
1149 }
1150 } while(get_rx_frame(info));
1151}
1152
1153static void mgsl_bh_transmit(struct mgsl_struct *info)
1154{
1155 struct tty_struct *tty = info->tty;
1156 unsigned long flags;
1157
1158 if ( debug_level >= DEBUG_LEVEL_BH )
1159 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1160 __FILE__,__LINE__,info->device_name);
1161
1162 if (tty) {
1163 tty_wakeup(tty);
1164 wake_up_interruptible(&tty->write_wait);
1165 }
1166
1167 /* if transmitter idle and loopmode_send_done_requested
1168 * then start echoing RxD to TxD
1169 */
1170 spin_lock_irqsave(&info->irq_spinlock,flags);
1171 if ( !info->tx_active && info->loopmode_send_done_requested )
1172 usc_loopmode_send_done( info );
1173 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1174}
1175
1176static void mgsl_bh_status(struct mgsl_struct *info)
1177{
1178 if ( debug_level >= DEBUG_LEVEL_BH )
1179 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1180 __FILE__,__LINE__,info->device_name);
1181
1182 info->ri_chkcount = 0;
1183 info->dsr_chkcount = 0;
1184 info->dcd_chkcount = 0;
1185 info->cts_chkcount = 0;
1186}
1187
1188/* mgsl_isr_receive_status()
1189 *
1190 * Service a receive status interrupt. The type of status
1191 * interrupt is indicated by the state of the RCSR.
1192 * This is only used for HDLC mode.
1193 *
1194 * Arguments: info pointer to device instance data
1195 * Return Value: None
1196 */
1197static void mgsl_isr_receive_status( struct mgsl_struct *info )
1198{
1199 u16 status = usc_InReg( info, RCSR );
1200
1201 if ( debug_level >= DEBUG_LEVEL_ISR )
1202 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1203 __FILE__,__LINE__,status);
1204
1205 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1206 info->loopmode_insert_requested &&
1207 usc_loopmode_active(info) )
1208 {
1209 ++info->icount.rxabort;
1210 info->loopmode_insert_requested = FALSE;
1211
1212 /* clear CMR:13 to start echoing RxD to TxD */
1213 info->cmr_value &= ~BIT13;
1214 usc_OutReg(info, CMR, info->cmr_value);
1215
1216 /* disable received abort irq (no longer required) */
1217 usc_OutReg(info, RICR,
1218 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1219 }
1220
1221 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1222 if (status & RXSTATUS_EXITED_HUNT)
1223 info->icount.exithunt++;
1224 if (status & RXSTATUS_IDLE_RECEIVED)
1225 info->icount.rxidle++;
1226 wake_up_interruptible(&info->event_wait_q);
1227 }
1228
1229 if (status & RXSTATUS_OVERRUN){
1230 info->icount.rxover++;
1231 usc_process_rxoverrun_sync( info );
1232 }
1233
1234 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1235 usc_UnlatchRxstatusBits( info, status );
1236
1237} /* end of mgsl_isr_receive_status() */
1238
1239/* mgsl_isr_transmit_status()
1240 *
1241 * Service a transmit status interrupt
1242 * HDLC mode :end of transmit frame
1243 * Async mode:all data is sent
1244 * transmit status is indicated by bits in the TCSR.
1245 *
1246 * Arguments: info pointer to device instance data
1247 * Return Value: None
1248 */
1249static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1250{
1251 u16 status = usc_InReg( info, TCSR );
1252
1253 if ( debug_level >= DEBUG_LEVEL_ISR )
1254 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1255 __FILE__,__LINE__,status);
1256
1257 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1258 usc_UnlatchTxstatusBits( info, status );
1259
1260 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1261 {
1262 /* finished sending HDLC abort. This may leave */
1263 /* the TxFifo with data from the aborted frame */
1264 /* so purge the TxFifo. Also shutdown the DMA */
1265 /* channel in case there is data remaining in */
1266 /* the DMA buffer */
1267 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1268 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1269 }
1270
1271 if ( status & TXSTATUS_EOF_SENT )
1272 info->icount.txok++;
1273 else if ( status & TXSTATUS_UNDERRUN )
1274 info->icount.txunder++;
1275 else if ( status & TXSTATUS_ABORT_SENT )
1276 info->icount.txabort++;
1277 else
1278 info->icount.txunder++;
1279
1280 info->tx_active = 0;
1281 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1282 del_timer(&info->tx_timer);
1283
1284 if ( info->drop_rts_on_tx_done ) {
1285 usc_get_serial_signals( info );
1286 if ( info->serial_signals & SerialSignal_RTS ) {
1287 info->serial_signals &= ~SerialSignal_RTS;
1288 usc_set_serial_signals( info );
1289 }
1290 info->drop_rts_on_tx_done = 0;
1291 }
1292
1293#ifdef CONFIG_HDLC
1294 if (info->netcount)
1295 hdlcdev_tx_done(info);
1296 else
1297#endif
1298 {
1299 if (info->tty->stopped || info->tty->hw_stopped) {
1300 usc_stop_transmitter(info);
1301 return;
1302 }
1303 info->pending_bh |= BH_TRANSMIT;
1304 }
1305
1306} /* end of mgsl_isr_transmit_status() */
1307
1308/* mgsl_isr_io_pin()
1309 *
1310 * Service an Input/Output pin interrupt. The type of
1311 * interrupt is indicated by bits in the MISR
1312 *
1313 * Arguments: info pointer to device instance data
1314 * Return Value: None
1315 */
1316static void mgsl_isr_io_pin( struct mgsl_struct *info )
1317{
1318 struct mgsl_icount *icount;
1319 u16 status = usc_InReg( info, MISR );
1320
1321 if ( debug_level >= DEBUG_LEVEL_ISR )
1322 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1323 __FILE__,__LINE__,status);
1324
1325 usc_ClearIrqPendingBits( info, IO_PIN );
1326 usc_UnlatchIostatusBits( info, status );
1327
1328 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1329 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1330 icount = &info->icount;
1331 /* update input line counters */
1332 if (status & MISCSTATUS_RI_LATCHED) {
1333 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1334 usc_DisablestatusIrqs(info,SICR_RI);
1335 icount->rng++;
1336 if ( status & MISCSTATUS_RI )
1337 info->input_signal_events.ri_up++;
1338 else
1339 info->input_signal_events.ri_down++;
1340 }
1341 if (status & MISCSTATUS_DSR_LATCHED) {
1342 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1343 usc_DisablestatusIrqs(info,SICR_DSR);
1344 icount->dsr++;
1345 if ( status & MISCSTATUS_DSR )
1346 info->input_signal_events.dsr_up++;
1347 else
1348 info->input_signal_events.dsr_down++;
1349 }
1350 if (status & MISCSTATUS_DCD_LATCHED) {
1351 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1352 usc_DisablestatusIrqs(info,SICR_DCD);
1353 icount->dcd++;
1354 if (status & MISCSTATUS_DCD) {
1355 info->input_signal_events.dcd_up++;
1356 } else
1357 info->input_signal_events.dcd_down++;
1358#ifdef CONFIG_HDLC
1359 if (info->netcount)
1360 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev);
1361#endif
1362 }
1363 if (status & MISCSTATUS_CTS_LATCHED)
1364 {
1365 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1366 usc_DisablestatusIrqs(info,SICR_CTS);
1367 icount->cts++;
1368 if ( status & MISCSTATUS_CTS )
1369 info->input_signal_events.cts_up++;
1370 else
1371 info->input_signal_events.cts_down++;
1372 }
1373 wake_up_interruptible(&info->status_event_wait_q);
1374 wake_up_interruptible(&info->event_wait_q);
1375
1376 if ( (info->flags & ASYNC_CHECK_CD) &&
1377 (status & MISCSTATUS_DCD_LATCHED) ) {
1378 if ( debug_level >= DEBUG_LEVEL_ISR )
1379 printk("%s CD now %s...", info->device_name,
1380 (status & MISCSTATUS_DCD) ? "on" : "off");
1381 if (status & MISCSTATUS_DCD)
1382 wake_up_interruptible(&info->open_wait);
1383 else {
1384 if ( debug_level >= DEBUG_LEVEL_ISR )
1385 printk("doing serial hangup...");
1386 if (info->tty)
1387 tty_hangup(info->tty);
1388 }
1389 }
1390
1391 if ( (info->flags & ASYNC_CTS_FLOW) &&
1392 (status & MISCSTATUS_CTS_LATCHED) ) {
1393 if (info->tty->hw_stopped) {
1394 if (status & MISCSTATUS_CTS) {
1395 if ( debug_level >= DEBUG_LEVEL_ISR )
1396 printk("CTS tx start...");
1397 if (info->tty)
1398 info->tty->hw_stopped = 0;
1399 usc_start_transmitter(info);
1400 info->pending_bh |= BH_TRANSMIT;
1401 return;
1402 }
1403 } else {
1404 if (!(status & MISCSTATUS_CTS)) {
1405 if ( debug_level >= DEBUG_LEVEL_ISR )
1406 printk("CTS tx stop...");
1407 if (info->tty)
1408 info->tty->hw_stopped = 1;
1409 usc_stop_transmitter(info);
1410 }
1411 }
1412 }
1413 }
1414
1415 info->pending_bh |= BH_STATUS;
1416
1417 /* for diagnostics set IRQ flag */
1418 if ( status & MISCSTATUS_TXC_LATCHED ){
1419 usc_OutReg( info, SICR,
1420 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1421 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1422 info->irq_occurred = 1;
1423 }
1424
1425} /* end of mgsl_isr_io_pin() */
1426
1427/* mgsl_isr_transmit_data()
1428 *
1429 * Service a transmit data interrupt (async mode only).
1430 *
1431 * Arguments: info pointer to device instance data
1432 * Return Value: None
1433 */
1434static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1435{
1436 if ( debug_level >= DEBUG_LEVEL_ISR )
1437 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1438 __FILE__,__LINE__,info->xmit_cnt);
1439
1440 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1441
1442 if (info->tty->stopped || info->tty->hw_stopped) {
1443 usc_stop_transmitter(info);
1444 return;
1445 }
1446
1447 if ( info->xmit_cnt )
1448 usc_load_txfifo( info );
1449 else
1450 info->tx_active = 0;
1451
1452 if (info->xmit_cnt < WAKEUP_CHARS)
1453 info->pending_bh |= BH_TRANSMIT;
1454
1455} /* end of mgsl_isr_transmit_data() */
1456
1457/* mgsl_isr_receive_data()
1458 *
1459 * Service a receive data interrupt. This occurs
1460 * when operating in asynchronous interrupt transfer mode.
1461 * The receive data FIFO is flushed to the receive data buffers.
1462 *
1463 * Arguments: info pointer to device instance data
1464 * Return Value: None
1465 */
1466static void mgsl_isr_receive_data( struct mgsl_struct *info )
1467{
1468 int Fifocount;
1469 u16 status;
1470 unsigned char DataByte;
1471 struct tty_struct *tty = info->tty;
1472 struct mgsl_icount *icount = &info->icount;
1473
1474 if ( debug_level >= DEBUG_LEVEL_ISR )
1475 printk("%s(%d):mgsl_isr_receive_data\n",
1476 __FILE__,__LINE__);
1477
1478 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1479
1480 /* select FIFO status for RICR readback */
1481 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1482
1483 /* clear the Wordstatus bit so that status readback */
1484 /* only reflects the status of this byte */
1485 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1486
1487 /* flush the receive FIFO */
1488
1489 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1490 /* read one byte from RxFIFO */
1491 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1492 info->io_base + CCAR );
1493 DataByte = inb( info->io_base + CCAR );
1494
1495 /* get the status of the received byte */
1496 status = usc_InReg(info, RCSR);
1497 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1498 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1499 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1500
1501 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
1502 continue;
1503
1504 *tty->flip.char_buf_ptr = DataByte;
1505 icount->rx++;
1506
1507 *tty->flip.flag_buf_ptr = 0;
1508 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1509 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1510 printk("rxerr=%04X\n",status);
1511 /* update error statistics */
1512 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1513 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1514 icount->brk++;
1515 } else if (status & RXSTATUS_PARITY_ERROR)
1516 icount->parity++;
1517 else if (status & RXSTATUS_FRAMING_ERROR)
1518 icount->frame++;
1519 else if (status & RXSTATUS_OVERRUN) {
1520 /* must issue purge fifo cmd before */
1521 /* 16C32 accepts more receive chars */
1522 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1523 icount->overrun++;
1524 }
1525
1526 /* discard char if tty control flags say so */
1527 if (status & info->ignore_status_mask)
1528 continue;
1529
1530 status &= info->read_status_mask;
1531
1532 if (status & RXSTATUS_BREAK_RECEIVED) {
1533 *tty->flip.flag_buf_ptr = TTY_BREAK;
1534 if (info->flags & ASYNC_SAK)
1535 do_SAK(tty);
1536 } else if (status & RXSTATUS_PARITY_ERROR)
1537 *tty->flip.flag_buf_ptr = TTY_PARITY;
1538 else if (status & RXSTATUS_FRAMING_ERROR)
1539 *tty->flip.flag_buf_ptr = TTY_FRAME;
1540 if (status & RXSTATUS_OVERRUN) {
1541 /* Overrun is special, since it's
1542 * reported immediately, and doesn't
1543 * affect the current character
1544 */
1545 if (tty->flip.count < TTY_FLIPBUF_SIZE) {
1546 tty->flip.count++;
1547 tty->flip.flag_buf_ptr++;
1548 tty->flip.char_buf_ptr++;
1549 *tty->flip.flag_buf_ptr = TTY_OVERRUN;
1550 }
1551 }
1552 } /* end of if (error) */
1553
1554 tty->flip.flag_buf_ptr++;
1555 tty->flip.char_buf_ptr++;
1556 tty->flip.count++;
1557 }
1558
1559 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1560 printk("%s(%d):mgsl_isr_receive_data flip count=%d\n",
1561 __FILE__,__LINE__,tty->flip.count);
1562 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1563 __FILE__,__LINE__,icount->rx,icount->brk,
1564 icount->parity,icount->frame,icount->overrun);
1565 }
1566
1567 if ( tty->flip.count )
1568 tty_flip_buffer_push(tty);
1569}
1570
1571/* mgsl_isr_misc()
1572 *
1573 * Service a miscellaneos interrupt source.
1574 *
1575 * Arguments: info pointer to device extension (instance data)
1576 * Return Value: None
1577 */
1578static void mgsl_isr_misc( struct mgsl_struct *info )
1579{
1580 u16 status = usc_InReg( info, MISR );
1581
1582 if ( debug_level >= DEBUG_LEVEL_ISR )
1583 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1584 __FILE__,__LINE__,status);
1585
1586 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1587 (info->params.mode == MGSL_MODE_HDLC)) {
1588
1589 /* turn off receiver and rx DMA */
1590 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1591 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1592 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1593 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1594 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1595
1596 /* schedule BH handler to restart receiver */
1597 info->pending_bh |= BH_RECEIVE;
1598 info->rx_rcc_underrun = 1;
1599 }
1600
1601 usc_ClearIrqPendingBits( info, MISC );
1602 usc_UnlatchMiscstatusBits( info, status );
1603
1604} /* end of mgsl_isr_misc() */
1605
1606/* mgsl_isr_null()
1607 *
1608 * Services undefined interrupt vectors from the
1609 * USC. (hence this function SHOULD never be called)
1610 *
1611 * Arguments: info pointer to device extension (instance data)
1612 * Return Value: None
1613 */
1614static void mgsl_isr_null( struct mgsl_struct *info )
1615{
1616
1617} /* end of mgsl_isr_null() */
1618
1619/* mgsl_isr_receive_dma()
1620 *
1621 * Service a receive DMA channel interrupt.
1622 * For this driver there are two sources of receive DMA interrupts
1623 * as identified in the Receive DMA mode Register (RDMR):
1624 *
1625 * BIT3 EOA/EOL End of List, all receive buffers in receive
1626 * buffer list have been filled (no more free buffers
1627 * available). The DMA controller has shut down.
1628 *
1629 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1630 * DMA buffer is terminated in response to completion
1631 * of a good frame or a frame with errors. The status
1632 * of the frame is stored in the buffer entry in the
1633 * list of receive buffer entries.
1634 *
1635 * Arguments: info pointer to device instance data
1636 * Return Value: None
1637 */
1638static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1639{
1640 u16 status;
1641
1642 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1643 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1644
1645 /* Read the receive DMA status to identify interrupt type. */
1646 /* This also clears the status bits. */
1647 status = usc_InDmaReg( info, RDMR );
1648
1649 if ( debug_level >= DEBUG_LEVEL_ISR )
1650 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1651 __FILE__,__LINE__,info->device_name,status);
1652
1653 info->pending_bh |= BH_RECEIVE;
1654
1655 if ( status & BIT3 ) {
1656 info->rx_overflow = 1;
1657 info->icount.buf_overrun++;
1658 }
1659
1660} /* end of mgsl_isr_receive_dma() */
1661
1662/* mgsl_isr_transmit_dma()
1663 *
1664 * This function services a transmit DMA channel interrupt.
1665 *
1666 * For this driver there is one source of transmit DMA interrupts
1667 * as identified in the Transmit DMA Mode Register (TDMR):
1668 *
1669 * BIT2 EOB End of Buffer. This interrupt occurs when a
1670 * transmit DMA buffer has been emptied.
1671 *
1672 * The driver maintains enough transmit DMA buffers to hold at least
1673 * one max frame size transmit frame. When operating in a buffered
1674 * transmit mode, there may be enough transmit DMA buffers to hold at
1675 * least two or more max frame size frames. On an EOB condition,
1676 * determine if there are any queued transmit buffers and copy into
1677 * transmit DMA buffers if we have room.
1678 *
1679 * Arguments: info pointer to device instance data
1680 * Return Value: None
1681 */
1682static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1683{
1684 u16 status;
1685
1686 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1687 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1688
1689 /* Read the transmit DMA status to identify interrupt type. */
1690 /* This also clears the status bits. */
1691
1692 status = usc_InDmaReg( info, TDMR );
1693
1694 if ( debug_level >= DEBUG_LEVEL_ISR )
1695 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1696 __FILE__,__LINE__,info->device_name,status);
1697
1698 if ( status & BIT2 ) {
1699 --info->tx_dma_buffers_used;
1700
1701 /* if there are transmit frames queued,
1702 * try to load the next one
1703 */
1704 if ( load_next_tx_holding_buffer(info) ) {
1705 /* if call returns non-zero value, we have
1706 * at least one free tx holding buffer
1707 */
1708 info->pending_bh |= BH_TRANSMIT;
1709 }
1710 }
1711
1712} /* end of mgsl_isr_transmit_dma() */
1713
1714/* mgsl_interrupt()
1715 *
1716 * Interrupt service routine entry point.
1717 *
1718 * Arguments:
1719 *
1720 * irq interrupt number that caused interrupt
1721 * dev_id device ID supplied during interrupt registration
1722 * regs interrupted processor context
1723 *
1724 * Return Value: None
1725 */
1726static irqreturn_t mgsl_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1727{
1728 struct mgsl_struct * info;
1729 u16 UscVector;
1730 u16 DmaVector;
1731
1732 if ( debug_level >= DEBUG_LEVEL_ISR )
1733 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1734 __FILE__,__LINE__,irq);
1735
1736 info = (struct mgsl_struct *)dev_id;
1737 if (!info)
1738 return IRQ_NONE;
1739
1740 spin_lock(&info->irq_spinlock);
1741
1742 for(;;) {
1743 /* Read the interrupt vectors from hardware. */
1744 UscVector = usc_InReg(info, IVR) >> 9;
1745 DmaVector = usc_InDmaReg(info, DIVR);
1746
1747 if ( debug_level >= DEBUG_LEVEL_ISR )
1748 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1749 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1750
1751 if ( !UscVector && !DmaVector )
1752 break;
1753
1754 /* Dispatch interrupt vector */
1755 if ( UscVector )
1756 (*UscIsrTable[UscVector])(info);
1757 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1758 mgsl_isr_transmit_dma(info);
1759 else
1760 mgsl_isr_receive_dma(info);
1761
1762 if ( info->isr_overflow ) {
1763 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1764 __FILE__,__LINE__,info->device_name, irq);
1765 usc_DisableMasterIrqBit(info);
1766 usc_DisableDmaInterrupts(info,DICR_MASTER);
1767 break;
1768 }
1769 }
1770
1771 /* Request bottom half processing if there's something
1772 * for it to do and the bh is not already running
1773 */
1774
1775 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1776 if ( debug_level >= DEBUG_LEVEL_ISR )
1777 printk("%s(%d):%s queueing bh task.\n",
1778 __FILE__,__LINE__,info->device_name);
1779 schedule_work(&info->task);
1780 info->bh_requested = 1;
1781 }
1782
1783 spin_unlock(&info->irq_spinlock);
1784
1785 if ( debug_level >= DEBUG_LEVEL_ISR )
1786 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1787 __FILE__,__LINE__,irq);
1788 return IRQ_HANDLED;
1789} /* end of mgsl_interrupt() */
1790
1791/* startup()
1792 *
1793 * Initialize and start device.
1794 *
1795 * Arguments: info pointer to device instance data
1796 * Return Value: 0 if success, otherwise error code
1797 */
1798static int startup(struct mgsl_struct * info)
1799{
1800 int retval = 0;
1801
1802 if ( debug_level >= DEBUG_LEVEL_INFO )
1803 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1804
1805 if (info->flags & ASYNC_INITIALIZED)
1806 return 0;
1807
1808 if (!info->xmit_buf) {
1809 /* allocate a page of memory for a transmit buffer */
1810 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1811 if (!info->xmit_buf) {
1812 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1813 __FILE__,__LINE__,info->device_name);
1814 return -ENOMEM;
1815 }
1816 }
1817
1818 info->pending_bh = 0;
1819
Paul Fulghum96612392005-09-09 13:02:13 -07001820 memset(&info->icount, 0, sizeof(info->icount));
1821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 init_timer(&info->tx_timer);
1823 info->tx_timer.data = (unsigned long)info;
1824 info->tx_timer.function = mgsl_tx_timeout;
1825
1826 /* Allocate and claim adapter resources */
1827 retval = mgsl_claim_resources(info);
1828
1829 /* perform existence check and diagnostics */
1830 if ( !retval )
1831 retval = mgsl_adapter_test(info);
1832
1833 if ( retval ) {
1834 if (capable(CAP_SYS_ADMIN) && info->tty)
1835 set_bit(TTY_IO_ERROR, &info->tty->flags);
1836 mgsl_release_resources(info);
1837 return retval;
1838 }
1839
1840 /* program hardware for current parameters */
1841 mgsl_change_params(info);
1842
1843 if (info->tty)
1844 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1845
1846 info->flags |= ASYNC_INITIALIZED;
1847
1848 return 0;
1849
1850} /* end of startup() */
1851
1852/* shutdown()
1853 *
1854 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1855 *
1856 * Arguments: info pointer to device instance data
1857 * Return Value: None
1858 */
1859static void shutdown(struct mgsl_struct * info)
1860{
1861 unsigned long flags;
1862
1863 if (!(info->flags & ASYNC_INITIALIZED))
1864 return;
1865
1866 if (debug_level >= DEBUG_LEVEL_INFO)
1867 printk("%s(%d):mgsl_shutdown(%s)\n",
1868 __FILE__,__LINE__, info->device_name );
1869
1870 /* clear status wait queue because status changes */
1871 /* can't happen after shutting down the hardware */
1872 wake_up_interruptible(&info->status_event_wait_q);
1873 wake_up_interruptible(&info->event_wait_q);
1874
1875 del_timer(&info->tx_timer);
1876
1877 if (info->xmit_buf) {
1878 free_page((unsigned long) info->xmit_buf);
1879 info->xmit_buf = NULL;
1880 }
1881
1882 spin_lock_irqsave(&info->irq_spinlock,flags);
1883 usc_DisableMasterIrqBit(info);
1884 usc_stop_receiver(info);
1885 usc_stop_transmitter(info);
1886 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1887 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1888 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1889
1890 /* Disable DMAEN (Port 7, Bit 14) */
1891 /* This disconnects the DMA request signal from the ISA bus */
1892 /* on the ISA adapter. This has no effect for the PCI adapter */
1893 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1894
1895 /* Disable INTEN (Port 6, Bit12) */
1896 /* This disconnects the IRQ request signal to the ISA bus */
1897 /* on the ISA adapter. This has no effect for the PCI adapter */
1898 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1899
1900 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1901 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1902 usc_set_serial_signals(info);
1903 }
1904
1905 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1906
1907 mgsl_release_resources(info);
1908
1909 if (info->tty)
1910 set_bit(TTY_IO_ERROR, &info->tty->flags);
1911
1912 info->flags &= ~ASYNC_INITIALIZED;
1913
1914} /* end of shutdown() */
1915
1916static void mgsl_program_hw(struct mgsl_struct *info)
1917{
1918 unsigned long flags;
1919
1920 spin_lock_irqsave(&info->irq_spinlock,flags);
1921
1922 usc_stop_receiver(info);
1923 usc_stop_transmitter(info);
1924 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1925
1926 if (info->params.mode == MGSL_MODE_HDLC ||
1927 info->params.mode == MGSL_MODE_RAW ||
1928 info->netcount)
1929 usc_set_sync_mode(info);
1930 else
1931 usc_set_async_mode(info);
1932
1933 usc_set_serial_signals(info);
1934
1935 info->dcd_chkcount = 0;
1936 info->cts_chkcount = 0;
1937 info->ri_chkcount = 0;
1938 info->dsr_chkcount = 0;
1939
1940 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1941 usc_EnableInterrupts(info, IO_PIN);
1942 usc_get_serial_signals(info);
1943
1944 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1945 usc_start_receiver(info);
1946
1947 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1948}
1949
1950/* Reconfigure adapter based on new parameters
1951 */
1952static void mgsl_change_params(struct mgsl_struct *info)
1953{
1954 unsigned cflag;
1955 int bits_per_char;
1956
1957 if (!info->tty || !info->tty->termios)
1958 return;
1959
1960 if (debug_level >= DEBUG_LEVEL_INFO)
1961 printk("%s(%d):mgsl_change_params(%s)\n",
1962 __FILE__,__LINE__, info->device_name );
1963
1964 cflag = info->tty->termios->c_cflag;
1965
1966 /* if B0 rate (hangup) specified then negate DTR and RTS */
1967 /* otherwise assert DTR and RTS */
1968 if (cflag & CBAUD)
1969 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1970 else
1971 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1972
1973 /* byte size and parity */
1974
1975 switch (cflag & CSIZE) {
1976 case CS5: info->params.data_bits = 5; break;
1977 case CS6: info->params.data_bits = 6; break;
1978 case CS7: info->params.data_bits = 7; break;
1979 case CS8: info->params.data_bits = 8; break;
1980 /* Never happens, but GCC is too dumb to figure it out */
1981 default: info->params.data_bits = 7; break;
1982 }
1983
1984 if (cflag & CSTOPB)
1985 info->params.stop_bits = 2;
1986 else
1987 info->params.stop_bits = 1;
1988
1989 info->params.parity = ASYNC_PARITY_NONE;
1990 if (cflag & PARENB) {
1991 if (cflag & PARODD)
1992 info->params.parity = ASYNC_PARITY_ODD;
1993 else
1994 info->params.parity = ASYNC_PARITY_EVEN;
1995#ifdef CMSPAR
1996 if (cflag & CMSPAR)
1997 info->params.parity = ASYNC_PARITY_SPACE;
1998#endif
1999 }
2000
2001 /* calculate number of jiffies to transmit a full
2002 * FIFO (32 bytes) at specified data rate
2003 */
2004 bits_per_char = info->params.data_bits +
2005 info->params.stop_bits + 1;
2006
2007 /* if port data rate is set to 460800 or less then
2008 * allow tty settings to override, otherwise keep the
2009 * current data rate.
2010 */
2011 if (info->params.data_rate <= 460800)
2012 info->params.data_rate = tty_get_baud_rate(info->tty);
2013
2014 if ( info->params.data_rate ) {
2015 info->timeout = (32*HZ*bits_per_char) /
2016 info->params.data_rate;
2017 }
2018 info->timeout += HZ/50; /* Add .02 seconds of slop */
2019
2020 if (cflag & CRTSCTS)
2021 info->flags |= ASYNC_CTS_FLOW;
2022 else
2023 info->flags &= ~ASYNC_CTS_FLOW;
2024
2025 if (cflag & CLOCAL)
2026 info->flags &= ~ASYNC_CHECK_CD;
2027 else
2028 info->flags |= ASYNC_CHECK_CD;
2029
2030 /* process tty input control flags */
2031
2032 info->read_status_mask = RXSTATUS_OVERRUN;
2033 if (I_INPCK(info->tty))
2034 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2035 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2036 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2037
2038 if (I_IGNPAR(info->tty))
2039 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2040 if (I_IGNBRK(info->tty)) {
2041 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2042 /* If ignoring parity and break indicators, ignore
2043 * overruns too. (For real raw support).
2044 */
2045 if (I_IGNPAR(info->tty))
2046 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2047 }
2048
2049 mgsl_program_hw(info);
2050
2051} /* end of mgsl_change_params() */
2052
2053/* mgsl_put_char()
2054 *
2055 * Add a character to the transmit buffer.
2056 *
2057 * Arguments: tty pointer to tty information structure
2058 * ch character to add to transmit buffer
2059 *
2060 * Return Value: None
2061 */
2062static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2063{
2064 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2065 unsigned long flags;
2066
2067 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2068 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2069 __FILE__,__LINE__,ch,info->device_name);
2070 }
2071
2072 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2073 return;
2074
2075 if (!tty || !info->xmit_buf)
2076 return;
2077
2078 spin_lock_irqsave(&info->irq_spinlock,flags);
2079
2080 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2081
2082 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2083 info->xmit_buf[info->xmit_head++] = ch;
2084 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2085 info->xmit_cnt++;
2086 }
2087 }
2088
2089 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2090
2091} /* end of mgsl_put_char() */
2092
2093/* mgsl_flush_chars()
2094 *
2095 * Enable transmitter so remaining characters in the
2096 * transmit buffer are sent.
2097 *
2098 * Arguments: tty pointer to tty information structure
2099 * Return Value: None
2100 */
2101static void mgsl_flush_chars(struct tty_struct *tty)
2102{
2103 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2104 unsigned long flags;
2105
2106 if ( debug_level >= DEBUG_LEVEL_INFO )
2107 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2108 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2109
2110 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2111 return;
2112
2113 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2114 !info->xmit_buf)
2115 return;
2116
2117 if ( debug_level >= DEBUG_LEVEL_INFO )
2118 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2119 __FILE__,__LINE__,info->device_name );
2120
2121 spin_lock_irqsave(&info->irq_spinlock,flags);
2122
2123 if (!info->tx_active) {
2124 if ( (info->params.mode == MGSL_MODE_HDLC ||
2125 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2126 /* operating in synchronous (frame oriented) mode */
2127 /* copy data from circular xmit_buf to */
2128 /* transmit DMA buffer. */
2129 mgsl_load_tx_dma_buffer(info,
2130 info->xmit_buf,info->xmit_cnt);
2131 }
2132 usc_start_transmitter(info);
2133 }
2134
2135 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2136
2137} /* end of mgsl_flush_chars() */
2138
2139/* mgsl_write()
2140 *
2141 * Send a block of data
2142 *
2143 * Arguments:
2144 *
2145 * tty pointer to tty information structure
2146 * buf pointer to buffer containing send data
2147 * count size of send data in bytes
2148 *
2149 * Return Value: number of characters written
2150 */
2151static int mgsl_write(struct tty_struct * tty,
2152 const unsigned char *buf, int count)
2153{
2154 int c, ret = 0;
2155 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2156 unsigned long flags;
2157
2158 if ( debug_level >= DEBUG_LEVEL_INFO )
2159 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2160 __FILE__,__LINE__,info->device_name,count);
2161
2162 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2163 goto cleanup;
2164
2165 if (!tty || !info->xmit_buf || !tmp_buf)
2166 goto cleanup;
2167
2168 if ( info->params.mode == MGSL_MODE_HDLC ||
2169 info->params.mode == MGSL_MODE_RAW ) {
2170 /* operating in synchronous (frame oriented) mode */
2171 /* operating in synchronous (frame oriented) mode */
2172 if (info->tx_active) {
2173
2174 if ( info->params.mode == MGSL_MODE_HDLC ) {
2175 ret = 0;
2176 goto cleanup;
2177 }
2178 /* transmitter is actively sending data -
2179 * if we have multiple transmit dma and
2180 * holding buffers, attempt to queue this
2181 * frame for transmission at a later time.
2182 */
2183 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2184 /* no tx holding buffers available */
2185 ret = 0;
2186 goto cleanup;
2187 }
2188
2189 /* queue transmit frame request */
2190 ret = count;
2191 save_tx_buffer_request(info,buf,count);
2192
2193 /* if we have sufficient tx dma buffers,
2194 * load the next buffered tx request
2195 */
2196 spin_lock_irqsave(&info->irq_spinlock,flags);
2197 load_next_tx_holding_buffer(info);
2198 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2199 goto cleanup;
2200 }
2201
2202 /* if operating in HDLC LoopMode and the adapter */
2203 /* has yet to be inserted into the loop, we can't */
2204 /* transmit */
2205
2206 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2207 !usc_loopmode_active(info) )
2208 {
2209 ret = 0;
2210 goto cleanup;
2211 }
2212
2213 if ( info->xmit_cnt ) {
2214 /* Send accumulated from send_char() calls */
2215 /* as frame and wait before accepting more data. */
2216 ret = 0;
2217
2218 /* copy data from circular xmit_buf to */
2219 /* transmit DMA buffer. */
2220 mgsl_load_tx_dma_buffer(info,
2221 info->xmit_buf,info->xmit_cnt);
2222 if ( debug_level >= DEBUG_LEVEL_INFO )
2223 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2224 __FILE__,__LINE__,info->device_name);
2225 } else {
2226 if ( debug_level >= DEBUG_LEVEL_INFO )
2227 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2228 __FILE__,__LINE__,info->device_name);
2229 ret = count;
2230 info->xmit_cnt = count;
2231 mgsl_load_tx_dma_buffer(info,buf,count);
2232 }
2233 } else {
2234 while (1) {
2235 spin_lock_irqsave(&info->irq_spinlock,flags);
2236 c = min_t(int, count,
2237 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2238 SERIAL_XMIT_SIZE - info->xmit_head));
2239 if (c <= 0) {
2240 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2241 break;
2242 }
2243 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2244 info->xmit_head = ((info->xmit_head + c) &
2245 (SERIAL_XMIT_SIZE-1));
2246 info->xmit_cnt += c;
2247 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2248 buf += c;
2249 count -= c;
2250 ret += c;
2251 }
2252 }
2253
2254 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2255 spin_lock_irqsave(&info->irq_spinlock,flags);
2256 if (!info->tx_active)
2257 usc_start_transmitter(info);
2258 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2259 }
2260cleanup:
2261 if ( debug_level >= DEBUG_LEVEL_INFO )
2262 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2263 __FILE__,__LINE__,info->device_name,ret);
2264
2265 return ret;
2266
2267} /* end of mgsl_write() */
2268
2269/* mgsl_write_room()
2270 *
2271 * Return the count of free bytes in transmit buffer
2272 *
2273 * Arguments: tty pointer to tty info structure
2274 * Return Value: None
2275 */
2276static int mgsl_write_room(struct tty_struct *tty)
2277{
2278 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2279 int ret;
2280
2281 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2282 return 0;
2283 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2284 if (ret < 0)
2285 ret = 0;
2286
2287 if (debug_level >= DEBUG_LEVEL_INFO)
2288 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2289 __FILE__,__LINE__, info->device_name,ret );
2290
2291 if ( info->params.mode == MGSL_MODE_HDLC ||
2292 info->params.mode == MGSL_MODE_RAW ) {
2293 /* operating in synchronous (frame oriented) mode */
2294 if ( info->tx_active )
2295 return 0;
2296 else
2297 return HDLC_MAX_FRAME_SIZE;
2298 }
2299
2300 return ret;
2301
2302} /* end of mgsl_write_room() */
2303
2304/* mgsl_chars_in_buffer()
2305 *
2306 * Return the count of bytes in transmit buffer
2307 *
2308 * Arguments: tty pointer to tty info structure
2309 * Return Value: None
2310 */
2311static int mgsl_chars_in_buffer(struct tty_struct *tty)
2312{
2313 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2314
2315 if (debug_level >= DEBUG_LEVEL_INFO)
2316 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2317 __FILE__,__LINE__, info->device_name );
2318
2319 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2320 return 0;
2321
2322 if (debug_level >= DEBUG_LEVEL_INFO)
2323 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2324 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2325
2326 if ( info->params.mode == MGSL_MODE_HDLC ||
2327 info->params.mode == MGSL_MODE_RAW ) {
2328 /* operating in synchronous (frame oriented) mode */
2329 if ( info->tx_active )
2330 return info->max_frame_size;
2331 else
2332 return 0;
2333 }
2334
2335 return info->xmit_cnt;
2336} /* end of mgsl_chars_in_buffer() */
2337
2338/* mgsl_flush_buffer()
2339 *
2340 * Discard all data in the send buffer
2341 *
2342 * Arguments: tty pointer to tty info structure
2343 * Return Value: None
2344 */
2345static void mgsl_flush_buffer(struct tty_struct *tty)
2346{
2347 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2348 unsigned long flags;
2349
2350 if (debug_level >= DEBUG_LEVEL_INFO)
2351 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2352 __FILE__,__LINE__, info->device_name );
2353
2354 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2355 return;
2356
2357 spin_lock_irqsave(&info->irq_spinlock,flags);
2358 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2359 del_timer(&info->tx_timer);
2360 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2361
2362 wake_up_interruptible(&tty->write_wait);
2363 tty_wakeup(tty);
2364}
2365
2366/* mgsl_send_xchar()
2367 *
2368 * Send a high-priority XON/XOFF character
2369 *
2370 * Arguments: tty pointer to tty info structure
2371 * ch character to send
2372 * Return Value: None
2373 */
2374static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2375{
2376 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2377 unsigned long flags;
2378
2379 if (debug_level >= DEBUG_LEVEL_INFO)
2380 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2381 __FILE__,__LINE__, info->device_name, ch );
2382
2383 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2384 return;
2385
2386 info->x_char = ch;
2387 if (ch) {
2388 /* Make sure transmit interrupts are on */
2389 spin_lock_irqsave(&info->irq_spinlock,flags);
2390 if (!info->tx_enabled)
2391 usc_start_transmitter(info);
2392 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2393 }
2394} /* end of mgsl_send_xchar() */
2395
2396/* mgsl_throttle()
2397 *
2398 * Signal remote device to throttle send data (our receive data)
2399 *
2400 * Arguments: tty pointer to tty info structure
2401 * Return Value: None
2402 */
2403static void mgsl_throttle(struct tty_struct * tty)
2404{
2405 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2406 unsigned long flags;
2407
2408 if (debug_level >= DEBUG_LEVEL_INFO)
2409 printk("%s(%d):mgsl_throttle(%s) entry\n",
2410 __FILE__,__LINE__, info->device_name );
2411
2412 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2413 return;
2414
2415 if (I_IXOFF(tty))
2416 mgsl_send_xchar(tty, STOP_CHAR(tty));
2417
2418 if (tty->termios->c_cflag & CRTSCTS) {
2419 spin_lock_irqsave(&info->irq_spinlock,flags);
2420 info->serial_signals &= ~SerialSignal_RTS;
2421 usc_set_serial_signals(info);
2422 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2423 }
2424} /* end of mgsl_throttle() */
2425
2426/* mgsl_unthrottle()
2427 *
2428 * Signal remote device to stop throttling send data (our receive data)
2429 *
2430 * Arguments: tty pointer to tty info structure
2431 * Return Value: None
2432 */
2433static void mgsl_unthrottle(struct tty_struct * tty)
2434{
2435 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2436 unsigned long flags;
2437
2438 if (debug_level >= DEBUG_LEVEL_INFO)
2439 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2440 __FILE__,__LINE__, info->device_name );
2441
2442 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2443 return;
2444
2445 if (I_IXOFF(tty)) {
2446 if (info->x_char)
2447 info->x_char = 0;
2448 else
2449 mgsl_send_xchar(tty, START_CHAR(tty));
2450 }
2451
2452 if (tty->termios->c_cflag & CRTSCTS) {
2453 spin_lock_irqsave(&info->irq_spinlock,flags);
2454 info->serial_signals |= SerialSignal_RTS;
2455 usc_set_serial_signals(info);
2456 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2457 }
2458
2459} /* end of mgsl_unthrottle() */
2460
2461/* mgsl_get_stats()
2462 *
2463 * get the current serial parameters information
2464 *
2465 * Arguments: info pointer to device instance data
2466 * user_icount pointer to buffer to hold returned stats
2467 *
2468 * Return Value: 0 if success, otherwise error code
2469 */
2470static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2471{
2472 int err;
2473
2474 if (debug_level >= DEBUG_LEVEL_INFO)
2475 printk("%s(%d):mgsl_get_params(%s)\n",
2476 __FILE__,__LINE__, info->device_name);
2477
Paul Fulghum96612392005-09-09 13:02:13 -07002478 if (!user_icount) {
2479 memset(&info->icount, 0, sizeof(info->icount));
2480 } else {
2481 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2482 if (err)
2483 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 }
2485
2486 return 0;
2487
2488} /* end of mgsl_get_stats() */
2489
2490/* mgsl_get_params()
2491 *
2492 * get the current serial parameters information
2493 *
2494 * Arguments: info pointer to device instance data
2495 * user_params pointer to buffer to hold returned params
2496 *
2497 * Return Value: 0 if success, otherwise error code
2498 */
2499static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2500{
2501 int err;
2502 if (debug_level >= DEBUG_LEVEL_INFO)
2503 printk("%s(%d):mgsl_get_params(%s)\n",
2504 __FILE__,__LINE__, info->device_name);
2505
2506 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2507 if (err) {
2508 if ( debug_level >= DEBUG_LEVEL_INFO )
2509 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2510 __FILE__,__LINE__,info->device_name);
2511 return -EFAULT;
2512 }
2513
2514 return 0;
2515
2516} /* end of mgsl_get_params() */
2517
2518/* mgsl_set_params()
2519 *
2520 * set the serial parameters
2521 *
2522 * Arguments:
2523 *
2524 * info pointer to device instance data
2525 * new_params user buffer containing new serial params
2526 *
2527 * Return Value: 0 if success, otherwise error code
2528 */
2529static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2530{
2531 unsigned long flags;
2532 MGSL_PARAMS tmp_params;
2533 int err;
2534
2535 if (debug_level >= DEBUG_LEVEL_INFO)
2536 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2537 info->device_name );
2538 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2539 if (err) {
2540 if ( debug_level >= DEBUG_LEVEL_INFO )
2541 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2542 __FILE__,__LINE__,info->device_name);
2543 return -EFAULT;
2544 }
2545
2546 spin_lock_irqsave(&info->irq_spinlock,flags);
2547 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2548 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2549
2550 mgsl_change_params(info);
2551
2552 return 0;
2553
2554} /* end of mgsl_set_params() */
2555
2556/* mgsl_get_txidle()
2557 *
2558 * get the current transmit idle mode
2559 *
2560 * Arguments: info pointer to device instance data
2561 * idle_mode pointer to buffer to hold returned idle mode
2562 *
2563 * Return Value: 0 if success, otherwise error code
2564 */
2565static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2566{
2567 int err;
2568
2569 if (debug_level >= DEBUG_LEVEL_INFO)
2570 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2571 __FILE__,__LINE__, info->device_name, info->idle_mode);
2572
2573 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2574 if (err) {
2575 if ( debug_level >= DEBUG_LEVEL_INFO )
2576 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2577 __FILE__,__LINE__,info->device_name);
2578 return -EFAULT;
2579 }
2580
2581 return 0;
2582
2583} /* end of mgsl_get_txidle() */
2584
2585/* mgsl_set_txidle() service ioctl to set transmit idle mode
2586 *
2587 * Arguments: info pointer to device instance data
2588 * idle_mode new idle mode
2589 *
2590 * Return Value: 0 if success, otherwise error code
2591 */
2592static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2593{
2594 unsigned long flags;
2595
2596 if (debug_level >= DEBUG_LEVEL_INFO)
2597 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2598 info->device_name, idle_mode );
2599
2600 spin_lock_irqsave(&info->irq_spinlock,flags);
2601 info->idle_mode = idle_mode;
2602 usc_set_txidle( info );
2603 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2604 return 0;
2605
2606} /* end of mgsl_set_txidle() */
2607
2608/* mgsl_txenable()
2609 *
2610 * enable or disable the transmitter
2611 *
2612 * Arguments:
2613 *
2614 * info pointer to device instance data
2615 * enable 1 = enable, 0 = disable
2616 *
2617 * Return Value: 0 if success, otherwise error code
2618 */
2619static int mgsl_txenable(struct mgsl_struct * info, int enable)
2620{
2621 unsigned long flags;
2622
2623 if (debug_level >= DEBUG_LEVEL_INFO)
2624 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2625 info->device_name, enable);
2626
2627 spin_lock_irqsave(&info->irq_spinlock,flags);
2628 if ( enable ) {
2629 if ( !info->tx_enabled ) {
2630
2631 usc_start_transmitter(info);
2632 /*--------------------------------------------------
2633 * if HDLC/SDLC Loop mode, attempt to insert the
2634 * station in the 'loop' by setting CMR:13. Upon
2635 * receipt of the next GoAhead (RxAbort) sequence,
2636 * the OnLoop indicator (CCSR:7) should go active
2637 * to indicate that we are on the loop
2638 *--------------------------------------------------*/
2639 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2640 usc_loopmode_insert_request( info );
2641 }
2642 } else {
2643 if ( info->tx_enabled )
2644 usc_stop_transmitter(info);
2645 }
2646 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2647 return 0;
2648
2649} /* end of mgsl_txenable() */
2650
2651/* mgsl_txabort() abort send HDLC frame
2652 *
2653 * Arguments: info pointer to device instance data
2654 * Return Value: 0 if success, otherwise error code
2655 */
2656static int mgsl_txabort(struct mgsl_struct * info)
2657{
2658 unsigned long flags;
2659
2660 if (debug_level >= DEBUG_LEVEL_INFO)
2661 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2662 info->device_name);
2663
2664 spin_lock_irqsave(&info->irq_spinlock,flags);
2665 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2666 {
2667 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2668 usc_loopmode_cancel_transmit( info );
2669 else
2670 usc_TCmd(info,TCmd_SendAbort);
2671 }
2672 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2673 return 0;
2674
2675} /* end of mgsl_txabort() */
2676
2677/* mgsl_rxenable() enable or disable the receiver
2678 *
2679 * Arguments: info pointer to device instance data
2680 * enable 1 = enable, 0 = disable
2681 * Return Value: 0 if success, otherwise error code
2682 */
2683static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2684{
2685 unsigned long flags;
2686
2687 if (debug_level >= DEBUG_LEVEL_INFO)
2688 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2689 info->device_name, enable);
2690
2691 spin_lock_irqsave(&info->irq_spinlock,flags);
2692 if ( enable ) {
2693 if ( !info->rx_enabled )
2694 usc_start_receiver(info);
2695 } else {
2696 if ( info->rx_enabled )
2697 usc_stop_receiver(info);
2698 }
2699 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2700 return 0;
2701
2702} /* end of mgsl_rxenable() */
2703
2704/* mgsl_wait_event() wait for specified event to occur
2705 *
2706 * Arguments: info pointer to device instance data
2707 * mask pointer to bitmask of events to wait for
2708 * Return Value: 0 if successful and bit mask updated with
2709 * of events triggerred,
2710 * otherwise error code
2711 */
2712static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2713{
2714 unsigned long flags;
2715 int s;
2716 int rc=0;
2717 struct mgsl_icount cprev, cnow;
2718 int events;
2719 int mask;
2720 struct _input_signal_events oldsigs, newsigs;
2721 DECLARE_WAITQUEUE(wait, current);
2722
2723 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2724 if (rc) {
2725 return -EFAULT;
2726 }
2727
2728 if (debug_level >= DEBUG_LEVEL_INFO)
2729 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2730 info->device_name, mask);
2731
2732 spin_lock_irqsave(&info->irq_spinlock,flags);
2733
2734 /* return immediately if state matches requested events */
2735 usc_get_serial_signals(info);
2736 s = info->serial_signals;
2737 events = mask &
2738 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2739 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2740 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2741 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2742 if (events) {
2743 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2744 goto exit;
2745 }
2746
2747 /* save current irq counts */
2748 cprev = info->icount;
2749 oldsigs = info->input_signal_events;
2750
2751 /* enable hunt and idle irqs if needed */
2752 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2753 u16 oldreg = usc_InReg(info,RICR);
2754 u16 newreg = oldreg +
2755 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2756 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2757 if (oldreg != newreg)
2758 usc_OutReg(info, RICR, newreg);
2759 }
2760
2761 set_current_state(TASK_INTERRUPTIBLE);
2762 add_wait_queue(&info->event_wait_q, &wait);
2763
2764 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2765
2766
2767 for(;;) {
2768 schedule();
2769 if (signal_pending(current)) {
2770 rc = -ERESTARTSYS;
2771 break;
2772 }
2773
2774 /* get current irq counts */
2775 spin_lock_irqsave(&info->irq_spinlock,flags);
2776 cnow = info->icount;
2777 newsigs = info->input_signal_events;
2778 set_current_state(TASK_INTERRUPTIBLE);
2779 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2780
2781 /* if no change, wait aborted for some reason */
2782 if (newsigs.dsr_up == oldsigs.dsr_up &&
2783 newsigs.dsr_down == oldsigs.dsr_down &&
2784 newsigs.dcd_up == oldsigs.dcd_up &&
2785 newsigs.dcd_down == oldsigs.dcd_down &&
2786 newsigs.cts_up == oldsigs.cts_up &&
2787 newsigs.cts_down == oldsigs.cts_down &&
2788 newsigs.ri_up == oldsigs.ri_up &&
2789 newsigs.ri_down == oldsigs.ri_down &&
2790 cnow.exithunt == cprev.exithunt &&
2791 cnow.rxidle == cprev.rxidle) {
2792 rc = -EIO;
2793 break;
2794 }
2795
2796 events = mask &
2797 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2798 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2799 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2800 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2801 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2802 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2803 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2804 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2805 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2806 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2807 if (events)
2808 break;
2809
2810 cprev = cnow;
2811 oldsigs = newsigs;
2812 }
2813
2814 remove_wait_queue(&info->event_wait_q, &wait);
2815 set_current_state(TASK_RUNNING);
2816
2817 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2818 spin_lock_irqsave(&info->irq_spinlock,flags);
2819 if (!waitqueue_active(&info->event_wait_q)) {
2820 /* disable enable exit hunt mode/idle rcvd IRQs */
2821 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2822 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2823 }
2824 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2825 }
2826exit:
2827 if ( rc == 0 )
2828 PUT_USER(rc, events, mask_ptr);
2829
2830 return rc;
2831
2832} /* end of mgsl_wait_event() */
2833
2834static int modem_input_wait(struct mgsl_struct *info,int arg)
2835{
2836 unsigned long flags;
2837 int rc;
2838 struct mgsl_icount cprev, cnow;
2839 DECLARE_WAITQUEUE(wait, current);
2840
2841 /* save current irq counts */
2842 spin_lock_irqsave(&info->irq_spinlock,flags);
2843 cprev = info->icount;
2844 add_wait_queue(&info->status_event_wait_q, &wait);
2845 set_current_state(TASK_INTERRUPTIBLE);
2846 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2847
2848 for(;;) {
2849 schedule();
2850 if (signal_pending(current)) {
2851 rc = -ERESTARTSYS;
2852 break;
2853 }
2854
2855 /* get new irq counts */
2856 spin_lock_irqsave(&info->irq_spinlock,flags);
2857 cnow = info->icount;
2858 set_current_state(TASK_INTERRUPTIBLE);
2859 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2860
2861 /* if no change, wait aborted for some reason */
2862 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2863 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2864 rc = -EIO;
2865 break;
2866 }
2867
2868 /* check for change in caller specified modem input */
2869 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2870 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2871 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2872 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2873 rc = 0;
2874 break;
2875 }
2876
2877 cprev = cnow;
2878 }
2879 remove_wait_queue(&info->status_event_wait_q, &wait);
2880 set_current_state(TASK_RUNNING);
2881 return rc;
2882}
2883
2884/* return the state of the serial control and status signals
2885 */
2886static int tiocmget(struct tty_struct *tty, struct file *file)
2887{
2888 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2889 unsigned int result;
2890 unsigned long flags;
2891
2892 spin_lock_irqsave(&info->irq_spinlock,flags);
2893 usc_get_serial_signals(info);
2894 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2895
2896 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2897 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2898 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2899 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2900 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2901 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2902
2903 if (debug_level >= DEBUG_LEVEL_INFO)
2904 printk("%s(%d):%s tiocmget() value=%08X\n",
2905 __FILE__,__LINE__, info->device_name, result );
2906 return result;
2907}
2908
2909/* set modem control signals (DTR/RTS)
2910 */
2911static int tiocmset(struct tty_struct *tty, struct file *file,
2912 unsigned int set, unsigned int clear)
2913{
2914 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2915 unsigned long flags;
2916
2917 if (debug_level >= DEBUG_LEVEL_INFO)
2918 printk("%s(%d):%s tiocmset(%x,%x)\n",
2919 __FILE__,__LINE__,info->device_name, set, clear);
2920
2921 if (set & TIOCM_RTS)
2922 info->serial_signals |= SerialSignal_RTS;
2923 if (set & TIOCM_DTR)
2924 info->serial_signals |= SerialSignal_DTR;
2925 if (clear & TIOCM_RTS)
2926 info->serial_signals &= ~SerialSignal_RTS;
2927 if (clear & TIOCM_DTR)
2928 info->serial_signals &= ~SerialSignal_DTR;
2929
2930 spin_lock_irqsave(&info->irq_spinlock,flags);
2931 usc_set_serial_signals(info);
2932 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2933
2934 return 0;
2935}
2936
2937/* mgsl_break() Set or clear transmit break condition
2938 *
2939 * Arguments: tty pointer to tty instance data
2940 * break_state -1=set break condition, 0=clear
2941 * Return Value: None
2942 */
2943static void mgsl_break(struct tty_struct *tty, int break_state)
2944{
2945 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2946 unsigned long flags;
2947
2948 if (debug_level >= DEBUG_LEVEL_INFO)
2949 printk("%s(%d):mgsl_break(%s,%d)\n",
2950 __FILE__,__LINE__, info->device_name, break_state);
2951
2952 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2953 return;
2954
2955 spin_lock_irqsave(&info->irq_spinlock,flags);
2956 if (break_state == -1)
2957 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2958 else
2959 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2960 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2961
2962} /* end of mgsl_break() */
2963
2964/* mgsl_ioctl() Service an IOCTL request
2965 *
2966 * Arguments:
2967 *
2968 * tty pointer to tty instance data
2969 * file pointer to associated file object for device
2970 * cmd IOCTL command code
2971 * arg command argument/context
2972 *
2973 * Return Value: 0 if success, otherwise error code
2974 */
2975static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2976 unsigned int cmd, unsigned long arg)
2977{
2978 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2979
2980 if (debug_level >= DEBUG_LEVEL_INFO)
2981 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2982 info->device_name, cmd );
2983
2984 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2985 return -ENODEV;
2986
2987 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2988 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2989 if (tty->flags & (1 << TTY_IO_ERROR))
2990 return -EIO;
2991 }
2992
2993 return mgsl_ioctl_common(info, cmd, arg);
2994}
2995
2996static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2997{
2998 int error;
2999 struct mgsl_icount cnow; /* kernel counter temps */
3000 void __user *argp = (void __user *)arg;
3001 struct serial_icounter_struct __user *p_cuser; /* user space */
3002 unsigned long flags;
3003
3004 switch (cmd) {
3005 case MGSL_IOCGPARAMS:
3006 return mgsl_get_params(info, argp);
3007 case MGSL_IOCSPARAMS:
3008 return mgsl_set_params(info, argp);
3009 case MGSL_IOCGTXIDLE:
3010 return mgsl_get_txidle(info, argp);
3011 case MGSL_IOCSTXIDLE:
3012 return mgsl_set_txidle(info,(int)arg);
3013 case MGSL_IOCTXENABLE:
3014 return mgsl_txenable(info,(int)arg);
3015 case MGSL_IOCRXENABLE:
3016 return mgsl_rxenable(info,(int)arg);
3017 case MGSL_IOCTXABORT:
3018 return mgsl_txabort(info);
3019 case MGSL_IOCGSTATS:
3020 return mgsl_get_stats(info, argp);
3021 case MGSL_IOCWAITEVENT:
3022 return mgsl_wait_event(info, argp);
3023 case MGSL_IOCLOOPTXDONE:
3024 return mgsl_loopmode_send_done(info);
3025 /* Wait for modem input (DCD,RI,DSR,CTS) change
3026 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3027 */
3028 case TIOCMIWAIT:
3029 return modem_input_wait(info,(int)arg);
3030
3031 /*
3032 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3033 * Return: write counters to the user passed counter struct
3034 * NB: both 1->0 and 0->1 transitions are counted except for
3035 * RI where only 0->1 is counted.
3036 */
3037 case TIOCGICOUNT:
3038 spin_lock_irqsave(&info->irq_spinlock,flags);
3039 cnow = info->icount;
3040 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3041 p_cuser = argp;
3042 PUT_USER(error,cnow.cts, &p_cuser->cts);
3043 if (error) return error;
3044 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3045 if (error) return error;
3046 PUT_USER(error,cnow.rng, &p_cuser->rng);
3047 if (error) return error;
3048 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3049 if (error) return error;
3050 PUT_USER(error,cnow.rx, &p_cuser->rx);
3051 if (error) return error;
3052 PUT_USER(error,cnow.tx, &p_cuser->tx);
3053 if (error) return error;
3054 PUT_USER(error,cnow.frame, &p_cuser->frame);
3055 if (error) return error;
3056 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3057 if (error) return error;
3058 PUT_USER(error,cnow.parity, &p_cuser->parity);
3059 if (error) return error;
3060 PUT_USER(error,cnow.brk, &p_cuser->brk);
3061 if (error) return error;
3062 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3063 if (error) return error;
3064 return 0;
3065 default:
3066 return -ENOIOCTLCMD;
3067 }
3068 return 0;
3069}
3070
3071/* mgsl_set_termios()
3072 *
3073 * Set new termios settings
3074 *
3075 * Arguments:
3076 *
3077 * tty pointer to tty structure
3078 * termios pointer to buffer to hold returned old termios
3079 *
3080 * Return Value: None
3081 */
3082static void mgsl_set_termios(struct tty_struct *tty, struct termios *old_termios)
3083{
3084 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3085 unsigned long flags;
3086
3087 if (debug_level >= DEBUG_LEVEL_INFO)
3088 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3089 tty->driver->name );
3090
3091 /* just return if nothing has changed */
3092 if ((tty->termios->c_cflag == old_termios->c_cflag)
3093 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3094 == RELEVANT_IFLAG(old_termios->c_iflag)))
3095 return;
3096
3097 mgsl_change_params(info);
3098
3099 /* Handle transition to B0 status */
3100 if (old_termios->c_cflag & CBAUD &&
3101 !(tty->termios->c_cflag & CBAUD)) {
3102 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3103 spin_lock_irqsave(&info->irq_spinlock,flags);
3104 usc_set_serial_signals(info);
3105 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3106 }
3107
3108 /* Handle transition away from B0 status */
3109 if (!(old_termios->c_cflag & CBAUD) &&
3110 tty->termios->c_cflag & CBAUD) {
3111 info->serial_signals |= SerialSignal_DTR;
3112 if (!(tty->termios->c_cflag & CRTSCTS) ||
3113 !test_bit(TTY_THROTTLED, &tty->flags)) {
3114 info->serial_signals |= SerialSignal_RTS;
3115 }
3116 spin_lock_irqsave(&info->irq_spinlock,flags);
3117 usc_set_serial_signals(info);
3118 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3119 }
3120
3121 /* Handle turning off CRTSCTS */
3122 if (old_termios->c_cflag & CRTSCTS &&
3123 !(tty->termios->c_cflag & CRTSCTS)) {
3124 tty->hw_stopped = 0;
3125 mgsl_start(tty);
3126 }
3127
3128} /* end of mgsl_set_termios() */
3129
3130/* mgsl_close()
3131 *
3132 * Called when port is closed. Wait for remaining data to be
3133 * sent. Disable port and free resources.
3134 *
3135 * Arguments:
3136 *
3137 * tty pointer to open tty structure
3138 * filp pointer to open file object
3139 *
3140 * Return Value: None
3141 */
3142static void mgsl_close(struct tty_struct *tty, struct file * filp)
3143{
3144 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3145
3146 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3147 return;
3148
3149 if (debug_level >= DEBUG_LEVEL_INFO)
3150 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3151 __FILE__,__LINE__, info->device_name, info->count);
3152
3153 if (!info->count)
3154 return;
3155
3156 if (tty_hung_up_p(filp))
3157 goto cleanup;
3158
3159 if ((tty->count == 1) && (info->count != 1)) {
3160 /*
3161 * tty->count is 1 and the tty structure will be freed.
3162 * info->count should be one in this case.
3163 * if it's not, correct it so that the port is shutdown.
3164 */
3165 printk("mgsl_close: bad refcount; tty->count is 1, "
3166 "info->count is %d\n", info->count);
3167 info->count = 1;
3168 }
3169
3170 info->count--;
3171
3172 /* if at least one open remaining, leave hardware active */
3173 if (info->count)
3174 goto cleanup;
3175
3176 info->flags |= ASYNC_CLOSING;
3177
3178 /* set tty->closing to notify line discipline to
3179 * only process XON/XOFF characters. Only the N_TTY
3180 * discipline appears to use this (ppp does not).
3181 */
3182 tty->closing = 1;
3183
3184 /* wait for transmit data to clear all layers */
3185
3186 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3187 if (debug_level >= DEBUG_LEVEL_INFO)
3188 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3189 __FILE__,__LINE__, info->device_name );
3190 tty_wait_until_sent(tty, info->closing_wait);
3191 }
3192
3193 if (info->flags & ASYNC_INITIALIZED)
3194 mgsl_wait_until_sent(tty, info->timeout);
3195
3196 if (tty->driver->flush_buffer)
3197 tty->driver->flush_buffer(tty);
3198
3199 tty_ldisc_flush(tty);
3200
3201 shutdown(info);
3202
3203 tty->closing = 0;
3204 info->tty = NULL;
3205
3206 if (info->blocked_open) {
3207 if (info->close_delay) {
3208 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3209 }
3210 wake_up_interruptible(&info->open_wait);
3211 }
3212
3213 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3214
3215 wake_up_interruptible(&info->close_wait);
3216
3217cleanup:
3218 if (debug_level >= DEBUG_LEVEL_INFO)
3219 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3220 tty->driver->name, info->count);
3221
3222} /* end of mgsl_close() */
3223
3224/* mgsl_wait_until_sent()
3225 *
3226 * Wait until the transmitter is empty.
3227 *
3228 * Arguments:
3229 *
3230 * tty pointer to tty info structure
3231 * timeout time to wait for send completion
3232 *
3233 * Return Value: None
3234 */
3235static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3236{
3237 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3238 unsigned long orig_jiffies, char_time;
3239
3240 if (!info )
3241 return;
3242
3243 if (debug_level >= DEBUG_LEVEL_INFO)
3244 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3245 __FILE__,__LINE__, info->device_name );
3246
3247 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3248 return;
3249
3250 if (!(info->flags & ASYNC_INITIALIZED))
3251 goto exit;
3252
3253 orig_jiffies = jiffies;
3254
3255 /* Set check interval to 1/5 of estimated time to
3256 * send a character, and make it at least 1. The check
3257 * interval should also be less than the timeout.
3258 * Note: use tight timings here to satisfy the NIST-PCTS.
3259 */
3260
3261 if ( info->params.data_rate ) {
3262 char_time = info->timeout/(32 * 5);
3263 if (!char_time)
3264 char_time++;
3265 } else
3266 char_time = 1;
3267
3268 if (timeout)
3269 char_time = min_t(unsigned long, char_time, timeout);
3270
3271 if ( info->params.mode == MGSL_MODE_HDLC ||
3272 info->params.mode == MGSL_MODE_RAW ) {
3273 while (info->tx_active) {
3274 msleep_interruptible(jiffies_to_msecs(char_time));
3275 if (signal_pending(current))
3276 break;
3277 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3278 break;
3279 }
3280 } else {
3281 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3282 info->tx_enabled) {
3283 msleep_interruptible(jiffies_to_msecs(char_time));
3284 if (signal_pending(current))
3285 break;
3286 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3287 break;
3288 }
3289 }
3290
3291exit:
3292 if (debug_level >= DEBUG_LEVEL_INFO)
3293 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3294 __FILE__,__LINE__, info->device_name );
3295
3296} /* end of mgsl_wait_until_sent() */
3297
3298/* mgsl_hangup()
3299 *
3300 * Called by tty_hangup() when a hangup is signaled.
3301 * This is the same as to closing all open files for the port.
3302 *
3303 * Arguments: tty pointer to associated tty object
3304 * Return Value: None
3305 */
3306static void mgsl_hangup(struct tty_struct *tty)
3307{
3308 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3309
3310 if (debug_level >= DEBUG_LEVEL_INFO)
3311 printk("%s(%d):mgsl_hangup(%s)\n",
3312 __FILE__,__LINE__, info->device_name );
3313
3314 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3315 return;
3316
3317 mgsl_flush_buffer(tty);
3318 shutdown(info);
3319
3320 info->count = 0;
3321 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3322 info->tty = NULL;
3323
3324 wake_up_interruptible(&info->open_wait);
3325
3326} /* end of mgsl_hangup() */
3327
3328/* block_til_ready()
3329 *
3330 * Block the current process until the specified port
3331 * is ready to be opened.
3332 *
3333 * Arguments:
3334 *
3335 * tty pointer to tty info structure
3336 * filp pointer to open file object
3337 * info pointer to device instance data
3338 *
3339 * Return Value: 0 if success, otherwise error code
3340 */
3341static int block_til_ready(struct tty_struct *tty, struct file * filp,
3342 struct mgsl_struct *info)
3343{
3344 DECLARE_WAITQUEUE(wait, current);
3345 int retval;
3346 int do_clocal = 0, extra_count = 0;
3347 unsigned long flags;
3348
3349 if (debug_level >= DEBUG_LEVEL_INFO)
3350 printk("%s(%d):block_til_ready on %s\n",
3351 __FILE__,__LINE__, tty->driver->name );
3352
3353 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3354 /* nonblock mode is set or port is not enabled */
3355 info->flags |= ASYNC_NORMAL_ACTIVE;
3356 return 0;
3357 }
3358
3359 if (tty->termios->c_cflag & CLOCAL)
3360 do_clocal = 1;
3361
3362 /* Wait for carrier detect and the line to become
3363 * free (i.e., not in use by the callout). While we are in
3364 * this loop, info->count is dropped by one, so that
3365 * mgsl_close() knows when to free things. We restore it upon
3366 * exit, either normal or abnormal.
3367 */
3368
3369 retval = 0;
3370 add_wait_queue(&info->open_wait, &wait);
3371
3372 if (debug_level >= DEBUG_LEVEL_INFO)
3373 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3374 __FILE__,__LINE__, tty->driver->name, info->count );
3375
3376 spin_lock_irqsave(&info->irq_spinlock, flags);
3377 if (!tty_hung_up_p(filp)) {
3378 extra_count = 1;
3379 info->count--;
3380 }
3381 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3382 info->blocked_open++;
3383
3384 while (1) {
3385 if (tty->termios->c_cflag & CBAUD) {
3386 spin_lock_irqsave(&info->irq_spinlock,flags);
3387 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3388 usc_set_serial_signals(info);
3389 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3390 }
3391
3392 set_current_state(TASK_INTERRUPTIBLE);
3393
3394 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3395 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3396 -EAGAIN : -ERESTARTSYS;
3397 break;
3398 }
3399
3400 spin_lock_irqsave(&info->irq_spinlock,flags);
3401 usc_get_serial_signals(info);
3402 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3403
3404 if (!(info->flags & ASYNC_CLOSING) &&
3405 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3406 break;
3407 }
3408
3409 if (signal_pending(current)) {
3410 retval = -ERESTARTSYS;
3411 break;
3412 }
3413
3414 if (debug_level >= DEBUG_LEVEL_INFO)
3415 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3416 __FILE__,__LINE__, tty->driver->name, info->count );
3417
3418 schedule();
3419 }
3420
3421 set_current_state(TASK_RUNNING);
3422 remove_wait_queue(&info->open_wait, &wait);
3423
3424 if (extra_count)
3425 info->count++;
3426 info->blocked_open--;
3427
3428 if (debug_level >= DEBUG_LEVEL_INFO)
3429 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3430 __FILE__,__LINE__, tty->driver->name, info->count );
3431
3432 if (!retval)
3433 info->flags |= ASYNC_NORMAL_ACTIVE;
3434
3435 return retval;
3436
3437} /* end of block_til_ready() */
3438
3439/* mgsl_open()
3440 *
3441 * Called when a port is opened. Init and enable port.
3442 * Perform serial-specific initialization for the tty structure.
3443 *
3444 * Arguments: tty pointer to tty info structure
3445 * filp associated file pointer
3446 *
3447 * Return Value: 0 if success, otherwise error code
3448 */
3449static int mgsl_open(struct tty_struct *tty, struct file * filp)
3450{
3451 struct mgsl_struct *info;
3452 int retval, line;
3453 unsigned long page;
3454 unsigned long flags;
3455
3456 /* verify range of specified line number */
3457 line = tty->index;
3458 if ((line < 0) || (line >= mgsl_device_count)) {
3459 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3460 __FILE__,__LINE__,line);
3461 return -ENODEV;
3462 }
3463
3464 /* find the info structure for the specified line */
3465 info = mgsl_device_list;
3466 while(info && info->line != line)
3467 info = info->next_device;
3468 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3469 return -ENODEV;
3470
3471 tty->driver_data = info;
3472 info->tty = tty;
3473
3474 if (debug_level >= DEBUG_LEVEL_INFO)
3475 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3476 __FILE__,__LINE__,tty->driver->name, info->count);
3477
3478 /* If port is closing, signal caller to try again */
3479 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3480 if (info->flags & ASYNC_CLOSING)
3481 interruptible_sleep_on(&info->close_wait);
3482 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3483 -EAGAIN : -ERESTARTSYS);
3484 goto cleanup;
3485 }
3486
3487 if (!tmp_buf) {
3488 page = get_zeroed_page(GFP_KERNEL);
3489 if (!page) {
3490 retval = -ENOMEM;
3491 goto cleanup;
3492 }
3493 if (tmp_buf)
3494 free_page(page);
3495 else
3496 tmp_buf = (unsigned char *) page;
3497 }
3498
3499 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3500
3501 spin_lock_irqsave(&info->netlock, flags);
3502 if (info->netcount) {
3503 retval = -EBUSY;
3504 spin_unlock_irqrestore(&info->netlock, flags);
3505 goto cleanup;
3506 }
3507 info->count++;
3508 spin_unlock_irqrestore(&info->netlock, flags);
3509
3510 if (info->count == 1) {
3511 /* 1st open on this device, init hardware */
3512 retval = startup(info);
3513 if (retval < 0)
3514 goto cleanup;
3515 }
3516
3517 retval = block_til_ready(tty, filp, info);
3518 if (retval) {
3519 if (debug_level >= DEBUG_LEVEL_INFO)
3520 printk("%s(%d):block_til_ready(%s) returned %d\n",
3521 __FILE__,__LINE__, info->device_name, retval);
3522 goto cleanup;
3523 }
3524
3525 if (debug_level >= DEBUG_LEVEL_INFO)
3526 printk("%s(%d):mgsl_open(%s) success\n",
3527 __FILE__,__LINE__, info->device_name);
3528 retval = 0;
3529
3530cleanup:
3531 if (retval) {
3532 if (tty->count == 1)
3533 info->tty = NULL; /* tty layer will release tty struct */
3534 if(info->count)
3535 info->count--;
3536 }
3537
3538 return retval;
3539
3540} /* end of mgsl_open() */
3541
3542/*
3543 * /proc fs routines....
3544 */
3545
3546static inline int line_info(char *buf, struct mgsl_struct *info)
3547{
3548 char stat_buf[30];
3549 int ret;
3550 unsigned long flags;
3551
3552 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3553 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3554 info->device_name, info->io_base, info->irq_level,
3555 info->phys_memory_base, info->phys_lcr_base);
3556 } else {
3557 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3558 info->device_name, info->io_base,
3559 info->irq_level, info->dma_level);
3560 }
3561
3562 /* output current serial signal states */
3563 spin_lock_irqsave(&info->irq_spinlock,flags);
3564 usc_get_serial_signals(info);
3565 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3566
3567 stat_buf[0] = 0;
3568 stat_buf[1] = 0;
3569 if (info->serial_signals & SerialSignal_RTS)
3570 strcat(stat_buf, "|RTS");
3571 if (info->serial_signals & SerialSignal_CTS)
3572 strcat(stat_buf, "|CTS");
3573 if (info->serial_signals & SerialSignal_DTR)
3574 strcat(stat_buf, "|DTR");
3575 if (info->serial_signals & SerialSignal_DSR)
3576 strcat(stat_buf, "|DSR");
3577 if (info->serial_signals & SerialSignal_DCD)
3578 strcat(stat_buf, "|CD");
3579 if (info->serial_signals & SerialSignal_RI)
3580 strcat(stat_buf, "|RI");
3581
3582 if (info->params.mode == MGSL_MODE_HDLC ||
3583 info->params.mode == MGSL_MODE_RAW ) {
3584 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3585 info->icount.txok, info->icount.rxok);
3586 if (info->icount.txunder)
3587 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3588 if (info->icount.txabort)
3589 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3590 if (info->icount.rxshort)
3591 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3592 if (info->icount.rxlong)
3593 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3594 if (info->icount.rxover)
3595 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3596 if (info->icount.rxcrc)
3597 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3598 } else {
3599 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3600 info->icount.tx, info->icount.rx);
3601 if (info->icount.frame)
3602 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3603 if (info->icount.parity)
3604 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3605 if (info->icount.brk)
3606 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3607 if (info->icount.overrun)
3608 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3609 }
3610
3611 /* Append serial signal status to end */
3612 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3613
3614 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3615 info->tx_active,info->bh_requested,info->bh_running,
3616 info->pending_bh);
3617
3618 spin_lock_irqsave(&info->irq_spinlock,flags);
3619 {
3620 u16 Tcsr = usc_InReg( info, TCSR );
3621 u16 Tdmr = usc_InDmaReg( info, TDMR );
3622 u16 Ticr = usc_InReg( info, TICR );
3623 u16 Rscr = usc_InReg( info, RCSR );
3624 u16 Rdmr = usc_InDmaReg( info, RDMR );
3625 u16 Ricr = usc_InReg( info, RICR );
3626 u16 Icr = usc_InReg( info, ICR );
3627 u16 Dccr = usc_InReg( info, DCCR );
3628 u16 Tmr = usc_InReg( info, TMR );
3629 u16 Tccr = usc_InReg( info, TCCR );
3630 u16 Ccar = inw( info->io_base + CCAR );
3631 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3632 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3633 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3634 }
3635 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3636
3637 return ret;
3638
3639} /* end of line_info() */
3640
3641/* mgsl_read_proc()
3642 *
3643 * Called to print information about devices
3644 *
3645 * Arguments:
3646 * page page of memory to hold returned info
3647 * start
3648 * off
3649 * count
3650 * eof
3651 * data
3652 *
3653 * Return Value:
3654 */
3655static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3656 int *eof, void *data)
3657{
3658 int len = 0, l;
3659 off_t begin = 0;
3660 struct mgsl_struct *info;
3661
3662 len += sprintf(page, "synclink driver:%s\n", driver_version);
3663
3664 info = mgsl_device_list;
3665 while( info ) {
3666 l = line_info(page + len, info);
3667 len += l;
3668 if (len+begin > off+count)
3669 goto done;
3670 if (len+begin < off) {
3671 begin += len;
3672 len = 0;
3673 }
3674 info = info->next_device;
3675 }
3676
3677 *eof = 1;
3678done:
3679 if (off >= len+begin)
3680 return 0;
3681 *start = page + (off-begin);
3682 return ((count < begin+len-off) ? count : begin+len-off);
3683
3684} /* end of mgsl_read_proc() */
3685
3686/* mgsl_allocate_dma_buffers()
3687 *
3688 * Allocate and format DMA buffers (ISA adapter)
3689 * or format shared memory buffers (PCI adapter).
3690 *
3691 * Arguments: info pointer to device instance data
3692 * Return Value: 0 if success, otherwise error
3693 */
3694static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3695{
3696 unsigned short BuffersPerFrame;
3697
3698 info->last_mem_alloc = 0;
3699
3700 /* Calculate the number of DMA buffers necessary to hold the */
3701 /* largest allowable frame size. Note: If the max frame size is */
3702 /* not an even multiple of the DMA buffer size then we need to */
3703 /* round the buffer count per frame up one. */
3704
3705 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3706 if ( info->max_frame_size % DMABUFFERSIZE )
3707 BuffersPerFrame++;
3708
3709 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3710 /*
3711 * The PCI adapter has 256KBytes of shared memory to use.
3712 * This is 64 PAGE_SIZE buffers.
3713 *
3714 * The first page is used for padding at this time so the
3715 * buffer list does not begin at offset 0 of the PCI
3716 * adapter's shared memory.
3717 *
3718 * The 2nd page is used for the buffer list. A 4K buffer
3719 * list can hold 128 DMA_BUFFER structures at 32 bytes
3720 * each.
3721 *
3722 * This leaves 62 4K pages.
3723 *
3724 * The next N pages are used for transmit frame(s). We
3725 * reserve enough 4K page blocks to hold the required
3726 * number of transmit dma buffers (num_tx_dma_buffers),
3727 * each of MaxFrameSize size.
3728 *
3729 * Of the remaining pages (62-N), determine how many can
3730 * be used to receive full MaxFrameSize inbound frames
3731 */
3732 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3733 info->rx_buffer_count = 62 - info->tx_buffer_count;
3734 } else {
3735 /* Calculate the number of PAGE_SIZE buffers needed for */
3736 /* receive and transmit DMA buffers. */
3737
3738
3739 /* Calculate the number of DMA buffers necessary to */
3740 /* hold 7 max size receive frames and one max size transmit frame. */
3741 /* The receive buffer count is bumped by one so we avoid an */
3742 /* End of List condition if all receive buffers are used when */
3743 /* using linked list DMA buffers. */
3744
3745 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3746 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3747
3748 /*
3749 * limit total TxBuffers & RxBuffers to 62 4K total
3750 * (ala PCI Allocation)
3751 */
3752
3753 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3754 info->rx_buffer_count = 62 - info->tx_buffer_count;
3755
3756 }
3757
3758 if ( debug_level >= DEBUG_LEVEL_INFO )
3759 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3760 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3761
3762 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3763 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3764 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3765 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3766 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3767 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3768 return -ENOMEM;
3769 }
3770
3771 mgsl_reset_rx_dma_buffers( info );
3772 mgsl_reset_tx_dma_buffers( info );
3773
3774 return 0;
3775
3776} /* end of mgsl_allocate_dma_buffers() */
3777
3778/*
3779 * mgsl_alloc_buffer_list_memory()
3780 *
3781 * Allocate a common DMA buffer for use as the
3782 * receive and transmit buffer lists.
3783 *
3784 * A buffer list is a set of buffer entries where each entry contains
3785 * a pointer to an actual buffer and a pointer to the next buffer entry
3786 * (plus some other info about the buffer).
3787 *
3788 * The buffer entries for a list are built to form a circular list so
3789 * that when the entire list has been traversed you start back at the
3790 * beginning.
3791 *
3792 * This function allocates memory for just the buffer entries.
3793 * The links (pointer to next entry) are filled in with the physical
3794 * address of the next entry so the adapter can navigate the list
3795 * using bus master DMA. The pointers to the actual buffers are filled
3796 * out later when the actual buffers are allocated.
3797 *
3798 * Arguments: info pointer to device instance data
3799 * Return Value: 0 if success, otherwise error
3800 */
3801static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3802{
3803 unsigned int i;
3804
3805 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3806 /* PCI adapter uses shared memory. */
3807 info->buffer_list = info->memory_base + info->last_mem_alloc;
3808 info->buffer_list_phys = info->last_mem_alloc;
3809 info->last_mem_alloc += BUFFERLISTSIZE;
3810 } else {
3811 /* ISA adapter uses system memory. */
3812 /* The buffer lists are allocated as a common buffer that both */
3813 /* the processor and adapter can access. This allows the driver to */
3814 /* inspect portions of the buffer while other portions are being */
3815 /* updated by the adapter using Bus Master DMA. */
3816
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003817 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3818 if (info->buffer_list == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819 return -ENOMEM;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003820 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 }
3822
3823 /* We got the memory for the buffer entry lists. */
3824 /* Initialize the memory block to all zeros. */
3825 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3826
3827 /* Save virtual address pointers to the receive and */
3828 /* transmit buffer lists. (Receive 1st). These pointers will */
3829 /* be used by the processor to access the lists. */
3830 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3831 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3832 info->tx_buffer_list += info->rx_buffer_count;
3833
3834 /*
3835 * Build the links for the buffer entry lists such that
3836 * two circular lists are built. (Transmit and Receive).
3837 *
3838 * Note: the links are physical addresses
3839 * which are read by the adapter to determine the next
3840 * buffer entry to use.
3841 */
3842
3843 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3844 /* calculate and store physical address of this buffer entry */
3845 info->rx_buffer_list[i].phys_entry =
3846 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3847
3848 /* calculate and store physical address of */
3849 /* next entry in cirular list of entries */
3850
3851 info->rx_buffer_list[i].link = info->buffer_list_phys;
3852
3853 if ( i < info->rx_buffer_count - 1 )
3854 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3855 }
3856
3857 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3858 /* calculate and store physical address of this buffer entry */
3859 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3860 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3861
3862 /* calculate and store physical address of */
3863 /* next entry in cirular list of entries */
3864
3865 info->tx_buffer_list[i].link = info->buffer_list_phys +
3866 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3867
3868 if ( i < info->tx_buffer_count - 1 )
3869 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3870 }
3871
3872 return 0;
3873
3874} /* end of mgsl_alloc_buffer_list_memory() */
3875
3876/* Free DMA buffers allocated for use as the
3877 * receive and transmit buffer lists.
3878 * Warning:
3879 *
3880 * The data transfer buffers associated with the buffer list
3881 * MUST be freed before freeing the buffer list itself because
3882 * the buffer list contains the information necessary to free
3883 * the individual buffers!
3884 */
3885static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3886{
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003887 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3888 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889
3890 info->buffer_list = NULL;
3891 info->rx_buffer_list = NULL;
3892 info->tx_buffer_list = NULL;
3893
3894} /* end of mgsl_free_buffer_list_memory() */
3895
3896/*
3897 * mgsl_alloc_frame_memory()
3898 *
3899 * Allocate the frame DMA buffers used by the specified buffer list.
3900 * Each DMA buffer will be one memory page in size. This is necessary
3901 * because memory can fragment enough that it may be impossible
3902 * contiguous pages.
3903 *
3904 * Arguments:
3905 *
3906 * info pointer to device instance data
3907 * BufferList pointer to list of buffer entries
3908 * Buffercount count of buffer entries in buffer list
3909 *
3910 * Return Value: 0 if success, otherwise -ENOMEM
3911 */
3912static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3913{
3914 int i;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003915 u32 phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916
3917 /* Allocate page sized buffers for the receive buffer list */
3918
3919 for ( i = 0; i < Buffercount; i++ ) {
3920 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3921 /* PCI adapter uses shared memory buffers. */
3922 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3923 phys_addr = info->last_mem_alloc;
3924 info->last_mem_alloc += DMABUFFERSIZE;
3925 } else {
3926 /* ISA adapter uses system memory. */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003927 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3928 if (BufferList[i].virt_addr == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 return -ENOMEM;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003930 phys_addr = (u32)(BufferList[i].dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 }
3932 BufferList[i].phys_addr = phys_addr;
3933 }
3934
3935 return 0;
3936
3937} /* end of mgsl_alloc_frame_memory() */
3938
3939/*
3940 * mgsl_free_frame_memory()
3941 *
3942 * Free the buffers associated with
3943 * each buffer entry of a buffer list.
3944 *
3945 * Arguments:
3946 *
3947 * info pointer to device instance data
3948 * BufferList pointer to list of buffer entries
3949 * Buffercount count of buffer entries in buffer list
3950 *
3951 * Return Value: None
3952 */
3953static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3954{
3955 int i;
3956
3957 if ( BufferList ) {
3958 for ( i = 0 ; i < Buffercount ; i++ ) {
3959 if ( BufferList[i].virt_addr ) {
3960 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003961 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 BufferList[i].virt_addr = NULL;
3963 }
3964 }
3965 }
3966
3967} /* end of mgsl_free_frame_memory() */
3968
3969/* mgsl_free_dma_buffers()
3970 *
3971 * Free DMA buffers
3972 *
3973 * Arguments: info pointer to device instance data
3974 * Return Value: None
3975 */
3976static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3977{
3978 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3979 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3980 mgsl_free_buffer_list_memory( info );
3981
3982} /* end of mgsl_free_dma_buffers() */
3983
3984
3985/*
3986 * mgsl_alloc_intermediate_rxbuffer_memory()
3987 *
3988 * Allocate a buffer large enough to hold max_frame_size. This buffer
3989 * is used to pass an assembled frame to the line discipline.
3990 *
3991 * Arguments:
3992 *
3993 * info pointer to device instance data
3994 *
3995 * Return Value: 0 if success, otherwise -ENOMEM
3996 */
3997static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3998{
3999 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
4000 if ( info->intermediate_rxbuffer == NULL )
4001 return -ENOMEM;
4002
4003 return 0;
4004
4005} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
4006
4007/*
4008 * mgsl_free_intermediate_rxbuffer_memory()
4009 *
4010 *
4011 * Arguments:
4012 *
4013 * info pointer to device instance data
4014 *
4015 * Return Value: None
4016 */
4017static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
4018{
Jesper Juhl735d5662005-11-07 01:01:29 -08004019 kfree(info->intermediate_rxbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 info->intermediate_rxbuffer = NULL;
4021
4022} /* end of mgsl_free_intermediate_rxbuffer_memory() */
4023
4024/*
4025 * mgsl_alloc_intermediate_txbuffer_memory()
4026 *
4027 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
4028 * This buffer is used to load transmit frames into the adapter's dma transfer
4029 * buffers when there is sufficient space.
4030 *
4031 * Arguments:
4032 *
4033 * info pointer to device instance data
4034 *
4035 * Return Value: 0 if success, otherwise -ENOMEM
4036 */
4037static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4038{
4039 int i;
4040
4041 if ( debug_level >= DEBUG_LEVEL_INFO )
4042 printk("%s %s(%d) allocating %d tx holding buffers\n",
4043 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4044
4045 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4046
4047 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4048 info->tx_holding_buffers[i].buffer =
4049 kmalloc(info->max_frame_size, GFP_KERNEL);
4050 if ( info->tx_holding_buffers[i].buffer == NULL )
4051 return -ENOMEM;
4052 }
4053
4054 return 0;
4055
4056} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4057
4058/*
4059 * mgsl_free_intermediate_txbuffer_memory()
4060 *
4061 *
4062 * Arguments:
4063 *
4064 * info pointer to device instance data
4065 *
4066 * Return Value: None
4067 */
4068static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4069{
4070 int i;
4071
4072 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
Jesper Juhl735d5662005-11-07 01:01:29 -08004073 kfree(info->tx_holding_buffers[i].buffer);
4074 info->tx_holding_buffers[i].buffer = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 }
4076
4077 info->get_tx_holding_index = 0;
4078 info->put_tx_holding_index = 0;
4079 info->tx_holding_count = 0;
4080
4081} /* end of mgsl_free_intermediate_txbuffer_memory() */
4082
4083
4084/*
4085 * load_next_tx_holding_buffer()
4086 *
4087 * attempts to load the next buffered tx request into the
4088 * tx dma buffers
4089 *
4090 * Arguments:
4091 *
4092 * info pointer to device instance data
4093 *
4094 * Return Value: 1 if next buffered tx request loaded
4095 * into adapter's tx dma buffer,
4096 * 0 otherwise
4097 */
4098static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4099{
4100 int ret = 0;
4101
4102 if ( info->tx_holding_count ) {
4103 /* determine if we have enough tx dma buffers
4104 * to accommodate the next tx frame
4105 */
4106 struct tx_holding_buffer *ptx =
4107 &info->tx_holding_buffers[info->get_tx_holding_index];
4108 int num_free = num_free_tx_dma_buffers(info);
4109 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4110 if ( ptx->buffer_size % DMABUFFERSIZE )
4111 ++num_needed;
4112
4113 if (num_needed <= num_free) {
4114 info->xmit_cnt = ptx->buffer_size;
4115 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4116
4117 --info->tx_holding_count;
4118 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4119 info->get_tx_holding_index=0;
4120
4121 /* restart transmit timer */
4122 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4123
4124 ret = 1;
4125 }
4126 }
4127
4128 return ret;
4129}
4130
4131/*
4132 * save_tx_buffer_request()
4133 *
4134 * attempt to store transmit frame request for later transmission
4135 *
4136 * Arguments:
4137 *
4138 * info pointer to device instance data
4139 * Buffer pointer to buffer containing frame to load
4140 * BufferSize size in bytes of frame in Buffer
4141 *
4142 * Return Value: 1 if able to store, 0 otherwise
4143 */
4144static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4145{
4146 struct tx_holding_buffer *ptx;
4147
4148 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4149 return 0; /* all buffers in use */
4150 }
4151
4152 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4153 ptx->buffer_size = BufferSize;
4154 memcpy( ptx->buffer, Buffer, BufferSize);
4155
4156 ++info->tx_holding_count;
4157 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4158 info->put_tx_holding_index=0;
4159
4160 return 1;
4161}
4162
4163static int mgsl_claim_resources(struct mgsl_struct *info)
4164{
4165 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4166 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4167 __FILE__,__LINE__,info->device_name, info->io_base);
4168 return -ENODEV;
4169 }
4170 info->io_addr_requested = 1;
4171
4172 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4173 info->device_name, info ) < 0 ) {
4174 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4175 __FILE__,__LINE__,info->device_name, info->irq_level );
4176 goto errout;
4177 }
4178 info->irq_requested = 1;
4179
4180 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4181 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4182 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4183 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4184 goto errout;
4185 }
4186 info->shared_mem_requested = 1;
4187 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4188 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4189 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4190 goto errout;
4191 }
4192 info->lcr_mem_requested = 1;
4193
4194 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4195 if (!info->memory_base) {
4196 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4197 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4198 goto errout;
4199 }
4200
4201 if ( !mgsl_memory_test(info) ) {
4202 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4203 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4204 goto errout;
4205 }
4206
4207 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4208 if (!info->lcr_base) {
4209 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4210 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4211 goto errout;
4212 }
4213
4214 } else {
4215 /* claim DMA channel */
4216
4217 if (request_dma(info->dma_level,info->device_name) < 0){
4218 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4219 __FILE__,__LINE__,info->device_name, info->dma_level );
4220 mgsl_release_resources( info );
4221 return -ENODEV;
4222 }
4223 info->dma_requested = 1;
4224
4225 /* ISA adapter uses bus master DMA */
4226 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4227 enable_dma(info->dma_level);
4228 }
4229
4230 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4231 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4232 __FILE__,__LINE__,info->device_name, info->dma_level );
4233 goto errout;
4234 }
4235
4236 return 0;
4237errout:
4238 mgsl_release_resources(info);
4239 return -ENODEV;
4240
4241} /* end of mgsl_claim_resources() */
4242
4243static void mgsl_release_resources(struct mgsl_struct *info)
4244{
4245 if ( debug_level >= DEBUG_LEVEL_INFO )
4246 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4247 __FILE__,__LINE__,info->device_name );
4248
4249 if ( info->irq_requested ) {
4250 free_irq(info->irq_level, info);
4251 info->irq_requested = 0;
4252 }
4253 if ( info->dma_requested ) {
4254 disable_dma(info->dma_level);
4255 free_dma(info->dma_level);
4256 info->dma_requested = 0;
4257 }
4258 mgsl_free_dma_buffers(info);
4259 mgsl_free_intermediate_rxbuffer_memory(info);
4260 mgsl_free_intermediate_txbuffer_memory(info);
4261
4262 if ( info->io_addr_requested ) {
4263 release_region(info->io_base,info->io_addr_size);
4264 info->io_addr_requested = 0;
4265 }
4266 if ( info->shared_mem_requested ) {
4267 release_mem_region(info->phys_memory_base,0x40000);
4268 info->shared_mem_requested = 0;
4269 }
4270 if ( info->lcr_mem_requested ) {
4271 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4272 info->lcr_mem_requested = 0;
4273 }
4274 if (info->memory_base){
4275 iounmap(info->memory_base);
4276 info->memory_base = NULL;
4277 }
4278 if (info->lcr_base){
4279 iounmap(info->lcr_base - info->lcr_offset);
4280 info->lcr_base = NULL;
4281 }
4282
4283 if ( debug_level >= DEBUG_LEVEL_INFO )
4284 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4285 __FILE__,__LINE__,info->device_name );
4286
4287} /* end of mgsl_release_resources() */
4288
4289/* mgsl_add_device()
4290 *
4291 * Add the specified device instance data structure to the
4292 * global linked list of devices and increment the device count.
4293 *
4294 * Arguments: info pointer to device instance data
4295 * Return Value: None
4296 */
4297static void mgsl_add_device( struct mgsl_struct *info )
4298{
4299 info->next_device = NULL;
4300 info->line = mgsl_device_count;
4301 sprintf(info->device_name,"ttySL%d",info->line);
4302
4303 if (info->line < MAX_TOTAL_DEVICES) {
4304 if (maxframe[info->line])
4305 info->max_frame_size = maxframe[info->line];
4306 info->dosyncppp = dosyncppp[info->line];
4307
4308 if (txdmabufs[info->line]) {
4309 info->num_tx_dma_buffers = txdmabufs[info->line];
4310 if (info->num_tx_dma_buffers < 1)
4311 info->num_tx_dma_buffers = 1;
4312 }
4313
4314 if (txholdbufs[info->line]) {
4315 info->num_tx_holding_buffers = txholdbufs[info->line];
4316 if (info->num_tx_holding_buffers < 1)
4317 info->num_tx_holding_buffers = 1;
4318 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4319 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4320 }
4321 }
4322
4323 mgsl_device_count++;
4324
4325 if ( !mgsl_device_list )
4326 mgsl_device_list = info;
4327 else {
4328 struct mgsl_struct *current_dev = mgsl_device_list;
4329 while( current_dev->next_device )
4330 current_dev = current_dev->next_device;
4331 current_dev->next_device = info;
4332 }
4333
4334 if ( info->max_frame_size < 4096 )
4335 info->max_frame_size = 4096;
4336 else if ( info->max_frame_size > 65535 )
4337 info->max_frame_size = 65535;
4338
4339 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4340 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4341 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4342 info->phys_memory_base, info->phys_lcr_base,
4343 info->max_frame_size );
4344 } else {
4345 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4346 info->device_name, info->io_base, info->irq_level, info->dma_level,
4347 info->max_frame_size );
4348 }
4349
4350#ifdef CONFIG_HDLC
4351 hdlcdev_init(info);
4352#endif
4353
4354} /* end of mgsl_add_device() */
4355
4356/* mgsl_allocate_device()
4357 *
4358 * Allocate and initialize a device instance structure
4359 *
4360 * Arguments: none
4361 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4362 */
4363static struct mgsl_struct* mgsl_allocate_device(void)
4364{
4365 struct mgsl_struct *info;
4366
4367 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4368 GFP_KERNEL);
4369
4370 if (!info) {
4371 printk("Error can't allocate device instance data\n");
4372 } else {
4373 memset(info, 0, sizeof(struct mgsl_struct));
4374 info->magic = MGSL_MAGIC;
4375 INIT_WORK(&info->task, mgsl_bh_handler, info);
4376 info->max_frame_size = 4096;
4377 info->close_delay = 5*HZ/10;
4378 info->closing_wait = 30*HZ;
4379 init_waitqueue_head(&info->open_wait);
4380 init_waitqueue_head(&info->close_wait);
4381 init_waitqueue_head(&info->status_event_wait_q);
4382 init_waitqueue_head(&info->event_wait_q);
4383 spin_lock_init(&info->irq_spinlock);
4384 spin_lock_init(&info->netlock);
4385 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4386 info->idle_mode = HDLC_TXIDLE_FLAGS;
4387 info->num_tx_dma_buffers = 1;
4388 info->num_tx_holding_buffers = 0;
4389 }
4390
4391 return info;
4392
4393} /* end of mgsl_allocate_device()*/
4394
4395static struct tty_operations mgsl_ops = {
4396 .open = mgsl_open,
4397 .close = mgsl_close,
4398 .write = mgsl_write,
4399 .put_char = mgsl_put_char,
4400 .flush_chars = mgsl_flush_chars,
4401 .write_room = mgsl_write_room,
4402 .chars_in_buffer = mgsl_chars_in_buffer,
4403 .flush_buffer = mgsl_flush_buffer,
4404 .ioctl = mgsl_ioctl,
4405 .throttle = mgsl_throttle,
4406 .unthrottle = mgsl_unthrottle,
4407 .send_xchar = mgsl_send_xchar,
4408 .break_ctl = mgsl_break,
4409 .wait_until_sent = mgsl_wait_until_sent,
4410 .read_proc = mgsl_read_proc,
4411 .set_termios = mgsl_set_termios,
4412 .stop = mgsl_stop,
4413 .start = mgsl_start,
4414 .hangup = mgsl_hangup,
4415 .tiocmget = tiocmget,
4416 .tiocmset = tiocmset,
4417};
4418
4419/*
4420 * perform tty device initialization
4421 */
4422static int mgsl_init_tty(void)
4423{
4424 int rc;
4425
4426 serial_driver = alloc_tty_driver(128);
4427 if (!serial_driver)
4428 return -ENOMEM;
4429
4430 serial_driver->owner = THIS_MODULE;
4431 serial_driver->driver_name = "synclink";
4432 serial_driver->name = "ttySL";
4433 serial_driver->major = ttymajor;
4434 serial_driver->minor_start = 64;
4435 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4436 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4437 serial_driver->init_termios = tty_std_termios;
4438 serial_driver->init_termios.c_cflag =
4439 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4440 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4441 tty_set_operations(serial_driver, &mgsl_ops);
4442 if ((rc = tty_register_driver(serial_driver)) < 0) {
4443 printk("%s(%d):Couldn't register serial driver\n",
4444 __FILE__,__LINE__);
4445 put_tty_driver(serial_driver);
4446 serial_driver = NULL;
4447 return rc;
4448 }
4449
4450 printk("%s %s, tty major#%d\n",
4451 driver_name, driver_version,
4452 serial_driver->major);
4453 return 0;
4454}
4455
4456/* enumerate user specified ISA adapters
4457 */
4458static void mgsl_enum_isa_devices(void)
4459{
4460 struct mgsl_struct *info;
4461 int i;
4462
4463 /* Check for user specified ISA devices */
4464
4465 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4466 if ( debug_level >= DEBUG_LEVEL_INFO )
4467 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4468 io[i], irq[i], dma[i] );
4469
4470 info = mgsl_allocate_device();
4471 if ( !info ) {
4472 /* error allocating device instance data */
4473 if ( debug_level >= DEBUG_LEVEL_ERROR )
4474 printk( "can't allocate device instance data.\n");
4475 continue;
4476 }
4477
4478 /* Copy user configuration info to device instance data */
4479 info->io_base = (unsigned int)io[i];
4480 info->irq_level = (unsigned int)irq[i];
4481 info->irq_level = irq_canonicalize(info->irq_level);
4482 info->dma_level = (unsigned int)dma[i];
4483 info->bus_type = MGSL_BUS_TYPE_ISA;
4484 info->io_addr_size = 16;
4485 info->irq_flags = 0;
4486
4487 mgsl_add_device( info );
4488 }
4489}
4490
4491static void synclink_cleanup(void)
4492{
4493 int rc;
4494 struct mgsl_struct *info;
4495 struct mgsl_struct *tmp;
4496
4497 printk("Unloading %s: %s\n", driver_name, driver_version);
4498
4499 if (serial_driver) {
4500 if ((rc = tty_unregister_driver(serial_driver)))
4501 printk("%s(%d) failed to unregister tty driver err=%d\n",
4502 __FILE__,__LINE__,rc);
4503 put_tty_driver(serial_driver);
4504 }
4505
4506 info = mgsl_device_list;
4507 while(info) {
4508#ifdef CONFIG_HDLC
4509 hdlcdev_exit(info);
4510#endif
4511 mgsl_release_resources(info);
4512 tmp = info;
4513 info = info->next_device;
4514 kfree(tmp);
4515 }
4516
4517 if (tmp_buf) {
4518 free_page((unsigned long) tmp_buf);
4519 tmp_buf = NULL;
4520 }
4521
4522 if (pci_registered)
4523 pci_unregister_driver(&synclink_pci_driver);
4524}
4525
4526static int __init synclink_init(void)
4527{
4528 int rc;
4529
4530 if (break_on_load) {
4531 mgsl_get_text_ptr();
4532 BREAKPOINT();
4533 }
4534
4535 printk("%s %s\n", driver_name, driver_version);
4536
4537 mgsl_enum_isa_devices();
4538 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4539 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4540 else
4541 pci_registered = 1;
4542
4543 if ((rc = mgsl_init_tty()) < 0)
4544 goto error;
4545
4546 return 0;
4547
4548error:
4549 synclink_cleanup();
4550 return rc;
4551}
4552
4553static void __exit synclink_exit(void)
4554{
4555 synclink_cleanup();
4556}
4557
4558module_init(synclink_init);
4559module_exit(synclink_exit);
4560
4561/*
4562 * usc_RTCmd()
4563 *
4564 * Issue a USC Receive/Transmit command to the
4565 * Channel Command/Address Register (CCAR).
4566 *
4567 * Notes:
4568 *
4569 * The command is encoded in the most significant 5 bits <15..11>
4570 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4571 * and Bits <6..0> must be written as zeros.
4572 *
4573 * Arguments:
4574 *
4575 * info pointer to device information structure
4576 * Cmd command mask (use symbolic macros)
4577 *
4578 * Return Value:
4579 *
4580 * None
4581 */
4582static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4583{
4584 /* output command to CCAR in bits <15..11> */
4585 /* preserve bits <10..7>, bits <6..0> must be zero */
4586
4587 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4588
4589 /* Read to flush write to CCAR */
4590 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4591 inw( info->io_base + CCAR );
4592
4593} /* end of usc_RTCmd() */
4594
4595/*
4596 * usc_DmaCmd()
4597 *
4598 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4599 *
4600 * Arguments:
4601 *
4602 * info pointer to device information structure
4603 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4604 *
4605 * Return Value:
4606 *
4607 * None
4608 */
4609static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4610{
4611 /* write command mask to DCAR */
4612 outw( Cmd + info->mbre_bit, info->io_base );
4613
4614 /* Read to flush write to DCAR */
4615 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4616 inw( info->io_base );
4617
4618} /* end of usc_DmaCmd() */
4619
4620/*
4621 * usc_OutDmaReg()
4622 *
4623 * Write a 16-bit value to a USC DMA register
4624 *
4625 * Arguments:
4626 *
4627 * info pointer to device info structure
4628 * RegAddr register address (number) for write
4629 * RegValue 16-bit value to write to register
4630 *
4631 * Return Value:
4632 *
4633 * None
4634 *
4635 */
4636static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4637{
4638 /* Note: The DCAR is located at the adapter base address */
4639 /* Note: must preserve state of BIT8 in DCAR */
4640
4641 outw( RegAddr + info->mbre_bit, info->io_base );
4642 outw( RegValue, info->io_base );
4643
4644 /* Read to flush write to DCAR */
4645 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4646 inw( info->io_base );
4647
4648} /* end of usc_OutDmaReg() */
4649
4650/*
4651 * usc_InDmaReg()
4652 *
4653 * Read a 16-bit value from a DMA register
4654 *
4655 * Arguments:
4656 *
4657 * info pointer to device info structure
4658 * RegAddr register address (number) to read from
4659 *
4660 * Return Value:
4661 *
4662 * The 16-bit value read from register
4663 *
4664 */
4665static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4666{
4667 /* Note: The DCAR is located at the adapter base address */
4668 /* Note: must preserve state of BIT8 in DCAR */
4669
4670 outw( RegAddr + info->mbre_bit, info->io_base );
4671 return inw( info->io_base );
4672
4673} /* end of usc_InDmaReg() */
4674
4675/*
4676 *
4677 * usc_OutReg()
4678 *
4679 * Write a 16-bit value to a USC serial channel register
4680 *
4681 * Arguments:
4682 *
4683 * info pointer to device info structure
4684 * RegAddr register address (number) to write to
4685 * RegValue 16-bit value to write to register
4686 *
4687 * Return Value:
4688 *
4689 * None
4690 *
4691 */
4692static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4693{
4694 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4695 outw( RegValue, info->io_base + CCAR );
4696
4697 /* Read to flush write to CCAR */
4698 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4699 inw( info->io_base + CCAR );
4700
4701} /* end of usc_OutReg() */
4702
4703/*
4704 * usc_InReg()
4705 *
4706 * Reads a 16-bit value from a USC serial channel register
4707 *
4708 * Arguments:
4709 *
4710 * info pointer to device extension
4711 * RegAddr register address (number) to read from
4712 *
4713 * Return Value:
4714 *
4715 * 16-bit value read from register
4716 */
4717static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4718{
4719 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4720 return inw( info->io_base + CCAR );
4721
4722} /* end of usc_InReg() */
4723
4724/* usc_set_sdlc_mode()
4725 *
4726 * Set up the adapter for SDLC DMA communications.
4727 *
4728 * Arguments: info pointer to device instance data
4729 * Return Value: NONE
4730 */
4731static void usc_set_sdlc_mode( struct mgsl_struct *info )
4732{
4733 u16 RegValue;
4734 int PreSL1660;
4735
4736 /*
4737 * determine if the IUSC on the adapter is pre-SL1660. If
4738 * not, take advantage of the UnderWait feature of more
4739 * modern chips. If an underrun occurs and this bit is set,
4740 * the transmitter will idle the programmed idle pattern
4741 * until the driver has time to service the underrun. Otherwise,
4742 * the dma controller may get the cycles previously requested
4743 * and begin transmitting queued tx data.
4744 */
4745 usc_OutReg(info,TMCR,0x1f);
4746 RegValue=usc_InReg(info,TMDR);
4747 if ( RegValue == IUSC_PRE_SL1660 )
4748 PreSL1660 = 1;
4749 else
4750 PreSL1660 = 0;
4751
4752
4753 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4754 {
4755 /*
4756 ** Channel Mode Register (CMR)
4757 **
4758 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4759 ** <13> 0 0 = Transmit Disabled (initially)
4760 ** <12> 0 1 = Consecutive Idles share common 0
4761 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4762 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4763 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4764 **
4765 ** 1000 1110 0000 0110 = 0x8e06
4766 */
4767 RegValue = 0x8e06;
4768
4769 /*--------------------------------------------------
4770 * ignore user options for UnderRun Actions and
4771 * preambles
4772 *--------------------------------------------------*/
4773 }
4774 else
4775 {
4776 /* Channel mode Register (CMR)
4777 *
4778 * <15..14> 00 Tx Sub modes, Underrun Action
4779 * <13> 0 1 = Send Preamble before opening flag
4780 * <12> 0 1 = Consecutive Idles share common 0
4781 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4782 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4783 * <3..0> 0110 Receiver mode = HDLC/SDLC
4784 *
4785 * 0000 0110 0000 0110 = 0x0606
4786 */
4787 if (info->params.mode == MGSL_MODE_RAW) {
4788 RegValue = 0x0001; /* Set Receive mode = external sync */
4789
4790 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4791 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4792
4793 /*
4794 * TxSubMode:
4795 * CMR <15> 0 Don't send CRC on Tx Underrun
4796 * CMR <14> x undefined
4797 * CMR <13> 0 Send preamble before openning sync
4798 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4799 *
4800 * TxMode:
4801 * CMR <11-8) 0100 MonoSync
4802 *
4803 * 0x00 0100 xxxx xxxx 04xx
4804 */
4805 RegValue |= 0x0400;
4806 }
4807 else {
4808
4809 RegValue = 0x0606;
4810
4811 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4812 RegValue |= BIT14;
4813 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4814 RegValue |= BIT15;
4815 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4816 RegValue |= BIT15 + BIT14;
4817 }
4818
4819 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4820 RegValue |= BIT13;
4821 }
4822
4823 if ( info->params.mode == MGSL_MODE_HDLC &&
4824 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4825 RegValue |= BIT12;
4826
4827 if ( info->params.addr_filter != 0xff )
4828 {
4829 /* set up receive address filtering */
4830 usc_OutReg( info, RSR, info->params.addr_filter );
4831 RegValue |= BIT4;
4832 }
4833
4834 usc_OutReg( info, CMR, RegValue );
4835 info->cmr_value = RegValue;
4836
4837 /* Receiver mode Register (RMR)
4838 *
4839 * <15..13> 000 encoding
4840 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4841 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4842 * <9> 0 1 = Include Receive chars in CRC
4843 * <8> 1 1 = Use Abort/PE bit as abort indicator
4844 * <7..6> 00 Even parity
4845 * <5> 0 parity disabled
4846 * <4..2> 000 Receive Char Length = 8 bits
4847 * <1..0> 00 Disable Receiver
4848 *
4849 * 0000 0101 0000 0000 = 0x0500
4850 */
4851
4852 RegValue = 0x0500;
4853
4854 switch ( info->params.encoding ) {
4855 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4856 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4857 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4858 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4859 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4860 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4861 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4862 }
4863
4864 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4865 RegValue |= BIT9;
4866 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4867 RegValue |= ( BIT12 | BIT10 | BIT9 );
4868
4869 usc_OutReg( info, RMR, RegValue );
4870
4871 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4872 /* When an opening flag of an SDLC frame is recognized the */
4873 /* Receive Character count (RCC) is loaded with the value in */
4874 /* RCLR. The RCC is decremented for each received byte. The */
4875 /* value of RCC is stored after the closing flag of the frame */
4876 /* allowing the frame size to be computed. */
4877
4878 usc_OutReg( info, RCLR, RCLRVALUE );
4879
4880 usc_RCmd( info, RCmd_SelectRicrdma_level );
4881
4882 /* Receive Interrupt Control Register (RICR)
4883 *
4884 * <15..8> ? RxFIFO DMA Request Level
4885 * <7> 0 Exited Hunt IA (Interrupt Arm)
4886 * <6> 0 Idle Received IA
4887 * <5> 0 Break/Abort IA
4888 * <4> 0 Rx Bound IA
4889 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4890 * <2> 0 Abort/PE IA
4891 * <1> 1 Rx Overrun IA
4892 * <0> 0 Select TC0 value for readback
4893 *
4894 * 0000 0000 0000 1000 = 0x000a
4895 */
4896
4897 /* Carry over the Exit Hunt and Idle Received bits */
4898 /* in case they have been armed by usc_ArmEvents. */
4899
4900 RegValue = usc_InReg( info, RICR ) & 0xc0;
4901
4902 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4903 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4904 else
4905 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4906
4907 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4908
4909 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4910 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4911
4912 /* Transmit mode Register (TMR)
4913 *
4914 * <15..13> 000 encoding
4915 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4916 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4917 * <9> 0 1 = Tx CRC Enabled
4918 * <8> 0 1 = Append CRC to end of transmit frame
4919 * <7..6> 00 Transmit parity Even
4920 * <5> 0 Transmit parity Disabled
4921 * <4..2> 000 Tx Char Length = 8 bits
4922 * <1..0> 00 Disable Transmitter
4923 *
4924 * 0000 0100 0000 0000 = 0x0400
4925 */
4926
4927 RegValue = 0x0400;
4928
4929 switch ( info->params.encoding ) {
4930 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4931 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4932 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4933 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4934 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4935 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4936 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4937 }
4938
4939 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4940 RegValue |= BIT9 + BIT8;
4941 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4942 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4943
4944 usc_OutReg( info, TMR, RegValue );
4945
4946 usc_set_txidle( info );
4947
4948
4949 usc_TCmd( info, TCmd_SelectTicrdma_level );
4950
4951 /* Transmit Interrupt Control Register (TICR)
4952 *
4953 * <15..8> ? Transmit FIFO DMA Level
4954 * <7> 0 Present IA (Interrupt Arm)
4955 * <6> 0 Idle Sent IA
4956 * <5> 1 Abort Sent IA
4957 * <4> 1 EOF/EOM Sent IA
4958 * <3> 0 CRC Sent IA
4959 * <2> 1 1 = Wait for SW Trigger to Start Frame
4960 * <1> 1 Tx Underrun IA
4961 * <0> 0 TC0 constant on read back
4962 *
4963 * 0000 0000 0011 0110 = 0x0036
4964 */
4965
4966 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4967 usc_OutReg( info, TICR, 0x0736 );
4968 else
4969 usc_OutReg( info, TICR, 0x1436 );
4970
4971 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4972 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4973
4974 /*
4975 ** Transmit Command/Status Register (TCSR)
4976 **
4977 ** <15..12> 0000 TCmd
4978 ** <11> 0/1 UnderWait
4979 ** <10..08> 000 TxIdle
4980 ** <7> x PreSent
4981 ** <6> x IdleSent
4982 ** <5> x AbortSent
4983 ** <4> x EOF/EOM Sent
4984 ** <3> x CRC Sent
4985 ** <2> x All Sent
4986 ** <1> x TxUnder
4987 ** <0> x TxEmpty
4988 **
4989 ** 0000 0000 0000 0000 = 0x0000
4990 */
4991 info->tcsr_value = 0;
4992
4993 if ( !PreSL1660 )
4994 info->tcsr_value |= TCSR_UNDERWAIT;
4995
4996 usc_OutReg( info, TCSR, info->tcsr_value );
4997
4998 /* Clock mode Control Register (CMCR)
4999 *
5000 * <15..14> 00 counter 1 Source = Disabled
5001 * <13..12> 00 counter 0 Source = Disabled
5002 * <11..10> 11 BRG1 Input is TxC Pin
5003 * <9..8> 11 BRG0 Input is TxC Pin
5004 * <7..6> 01 DPLL Input is BRG1 Output
5005 * <5..3> XXX TxCLK comes from Port 0
5006 * <2..0> XXX RxCLK comes from Port 1
5007 *
5008 * 0000 1111 0111 0111 = 0x0f77
5009 */
5010
5011 RegValue = 0x0f40;
5012
5013 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
5014 RegValue |= 0x0003; /* RxCLK from DPLL */
5015 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
5016 RegValue |= 0x0004; /* RxCLK from BRG0 */
5017 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
5018 RegValue |= 0x0006; /* RxCLK from TXC Input */
5019 else
5020 RegValue |= 0x0007; /* RxCLK from Port1 */
5021
5022 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
5023 RegValue |= 0x0018; /* TxCLK from DPLL */
5024 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
5025 RegValue |= 0x0020; /* TxCLK from BRG0 */
5026 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
5027 RegValue |= 0x0038; /* RxCLK from TXC Input */
5028 else
5029 RegValue |= 0x0030; /* TxCLK from Port0 */
5030
5031 usc_OutReg( info, CMCR, RegValue );
5032
5033
5034 /* Hardware Configuration Register (HCR)
5035 *
5036 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
5037 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
5038 * <12> 0 CVOK:0=report code violation in biphase
5039 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5040 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5041 * <7..6> 00 reserved
5042 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5043 * <4> X BRG1 Enable
5044 * <3..2> 00 reserved
5045 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5046 * <0> 0 BRG0 Enable
5047 */
5048
5049 RegValue = 0x0000;
5050
5051 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5052 u32 XtalSpeed;
5053 u32 DpllDivisor;
5054 u16 Tc;
5055
5056 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5057 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5058
5059 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5060 XtalSpeed = 11059200;
5061 else
5062 XtalSpeed = 14745600;
5063
5064 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5065 DpllDivisor = 16;
5066 RegValue |= BIT10;
5067 }
5068 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5069 DpllDivisor = 8;
5070 RegValue |= BIT11;
5071 }
5072 else
5073 DpllDivisor = 32;
5074
5075 /* Tc = (Xtal/Speed) - 1 */
5076 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5077 /* then rounding up gives a more precise time constant. Instead */
5078 /* of rounding up and then subtracting 1 we just don't subtract */
5079 /* the one in this case. */
5080
5081 /*--------------------------------------------------
5082 * ejz: for DPLL mode, application should use the
5083 * same clock speed as the partner system, even
5084 * though clocking is derived from the input RxData.
5085 * In case the user uses a 0 for the clock speed,
5086 * default to 0xffffffff and don't try to divide by
5087 * zero
5088 *--------------------------------------------------*/
5089 if ( info->params.clock_speed )
5090 {
5091 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5092 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5093 / info->params.clock_speed) )
5094 Tc--;
5095 }
5096 else
5097 Tc = -1;
5098
5099
5100 /* Write 16-bit Time Constant for BRG1 */
5101 usc_OutReg( info, TC1R, Tc );
5102
5103 RegValue |= BIT4; /* enable BRG1 */
5104
5105 switch ( info->params.encoding ) {
5106 case HDLC_ENCODING_NRZ:
5107 case HDLC_ENCODING_NRZB:
5108 case HDLC_ENCODING_NRZI_MARK:
5109 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5110 case HDLC_ENCODING_BIPHASE_MARK:
5111 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5112 case HDLC_ENCODING_BIPHASE_LEVEL:
5113 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5114 }
5115 }
5116
5117 usc_OutReg( info, HCR, RegValue );
5118
5119
5120 /* Channel Control/status Register (CCSR)
5121 *
5122 * <15> X RCC FIFO Overflow status (RO)
5123 * <14> X RCC FIFO Not Empty status (RO)
5124 * <13> 0 1 = Clear RCC FIFO (WO)
5125 * <12> X DPLL Sync (RW)
5126 * <11> X DPLL 2 Missed Clocks status (RO)
5127 * <10> X DPLL 1 Missed Clock status (RO)
5128 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5129 * <7> X SDLC Loop On status (RO)
5130 * <6> X SDLC Loop Send status (RO)
5131 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5132 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5133 * <1..0> 00 reserved
5134 *
5135 * 0000 0000 0010 0000 = 0x0020
5136 */
5137
5138 usc_OutReg( info, CCSR, 0x1020 );
5139
5140
5141 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5142 usc_OutReg( info, SICR,
5143 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5144 }
5145
5146
5147 /* enable Master Interrupt Enable bit (MIE) */
5148 usc_EnableMasterIrqBit( info );
5149
5150 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5151 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5152
5153 /* arm RCC underflow interrupt */
5154 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5155 usc_EnableInterrupts(info, MISC);
5156
5157 info->mbre_bit = 0;
5158 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5159 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5160 info->mbre_bit = BIT8;
5161 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5162
5163 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5164 /* Enable DMAEN (Port 7, Bit 14) */
5165 /* This connects the DMA request signal to the ISA bus */
5166 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5167 }
5168
5169 /* DMA Control Register (DCR)
5170 *
5171 * <15..14> 10 Priority mode = Alternating Tx/Rx
5172 * 01 Rx has priority
5173 * 00 Tx has priority
5174 *
5175 * <13> 1 Enable Priority Preempt per DCR<15..14>
5176 * (WARNING DCR<11..10> must be 00 when this is 1)
5177 * 0 Choose activate channel per DCR<11..10>
5178 *
5179 * <12> 0 Little Endian for Array/List
5180 * <11..10> 00 Both Channels can use each bus grant
5181 * <9..6> 0000 reserved
5182 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5183 * <4> 0 1 = drive D/C and S/D pins
5184 * <3> 1 1 = Add one wait state to all DMA cycles.
5185 * <2> 0 1 = Strobe /UAS on every transfer.
5186 * <1..0> 11 Addr incrementing only affects LS24 bits
5187 *
5188 * 0110 0000 0000 1011 = 0x600b
5189 */
5190
5191 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5192 /* PCI adapter does not need DMA wait state */
5193 usc_OutDmaReg( info, DCR, 0xa00b );
5194 }
5195 else
5196 usc_OutDmaReg( info, DCR, 0x800b );
5197
5198
5199 /* Receive DMA mode Register (RDMR)
5200 *
5201 * <15..14> 11 DMA mode = Linked List Buffer mode
5202 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5203 * <12> 1 Clear count of List Entry after fetching
5204 * <11..10> 00 Address mode = Increment
5205 * <9> 1 Terminate Buffer on RxBound
5206 * <8> 0 Bus Width = 16bits
5207 * <7..0> ? status Bits (write as 0s)
5208 *
5209 * 1111 0010 0000 0000 = 0xf200
5210 */
5211
5212 usc_OutDmaReg( info, RDMR, 0xf200 );
5213
5214
5215 /* Transmit DMA mode Register (TDMR)
5216 *
5217 * <15..14> 11 DMA mode = Linked List Buffer mode
5218 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5219 * <12> 1 Clear count of List Entry after fetching
5220 * <11..10> 00 Address mode = Increment
5221 * <9> 1 Terminate Buffer on end of frame
5222 * <8> 0 Bus Width = 16bits
5223 * <7..0> ? status Bits (Read Only so write as 0)
5224 *
5225 * 1111 0010 0000 0000 = 0xf200
5226 */
5227
5228 usc_OutDmaReg( info, TDMR, 0xf200 );
5229
5230
5231 /* DMA Interrupt Control Register (DICR)
5232 *
5233 * <15> 1 DMA Interrupt Enable
5234 * <14> 0 1 = Disable IEO from USC
5235 * <13> 0 1 = Don't provide vector during IntAck
5236 * <12> 1 1 = Include status in Vector
5237 * <10..2> 0 reserved, Must be 0s
5238 * <1> 0 1 = Rx DMA Interrupt Enabled
5239 * <0> 0 1 = Tx DMA Interrupt Enabled
5240 *
5241 * 1001 0000 0000 0000 = 0x9000
5242 */
5243
5244 usc_OutDmaReg( info, DICR, 0x9000 );
5245
5246 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5247 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5248 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5249
5250 /* Channel Control Register (CCR)
5251 *
5252 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5253 * <13> 0 Trigger Tx on SW Command Disabled
5254 * <12> 0 Flag Preamble Disabled
5255 * <11..10> 00 Preamble Length
5256 * <9..8> 00 Preamble Pattern
5257 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5258 * <5> 0 Trigger Rx on SW Command Disabled
5259 * <4..0> 0 reserved
5260 *
5261 * 1000 0000 1000 0000 = 0x8080
5262 */
5263
5264 RegValue = 0x8080;
5265
5266 switch ( info->params.preamble_length ) {
5267 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5268 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5269 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5270 }
5271
5272 switch ( info->params.preamble ) {
5273 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5274 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5275 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5276 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5277 }
5278
5279 usc_OutReg( info, CCR, RegValue );
5280
5281
5282 /*
5283 * Burst/Dwell Control Register
5284 *
5285 * <15..8> 0x20 Maximum number of transfers per bus grant
5286 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5287 */
5288
5289 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5290 /* don't limit bus occupancy on PCI adapter */
5291 usc_OutDmaReg( info, BDCR, 0x0000 );
5292 }
5293 else
5294 usc_OutDmaReg( info, BDCR, 0x2000 );
5295
5296 usc_stop_transmitter(info);
5297 usc_stop_receiver(info);
5298
5299} /* end of usc_set_sdlc_mode() */
5300
5301/* usc_enable_loopback()
5302 *
5303 * Set the 16C32 for internal loopback mode.
5304 * The TxCLK and RxCLK signals are generated from the BRG0 and
5305 * the TxD is looped back to the RxD internally.
5306 *
5307 * Arguments: info pointer to device instance data
5308 * enable 1 = enable loopback, 0 = disable
5309 * Return Value: None
5310 */
5311static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5312{
5313 if (enable) {
5314 /* blank external TXD output */
5315 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5316
5317 /* Clock mode Control Register (CMCR)
5318 *
5319 * <15..14> 00 counter 1 Disabled
5320 * <13..12> 00 counter 0 Disabled
5321 * <11..10> 11 BRG1 Input is TxC Pin
5322 * <9..8> 11 BRG0 Input is TxC Pin
5323 * <7..6> 01 DPLL Input is BRG1 Output
5324 * <5..3> 100 TxCLK comes from BRG0
5325 * <2..0> 100 RxCLK comes from BRG0
5326 *
5327 * 0000 1111 0110 0100 = 0x0f64
5328 */
5329
5330 usc_OutReg( info, CMCR, 0x0f64 );
5331
5332 /* Write 16-bit Time Constant for BRG0 */
5333 /* use clock speed if available, otherwise use 8 for diagnostics */
5334 if (info->params.clock_speed) {
5335 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5336 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5337 else
5338 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5339 } else
5340 usc_OutReg(info, TC0R, (u16)8);
5341
5342 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5343 mode = Continuous Set Bit 0 to enable BRG0. */
5344 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5345
5346 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5347 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5348
5349 /* set Internal Data loopback mode */
5350 info->loopback_bits = 0x300;
5351 outw( 0x0300, info->io_base + CCAR );
5352 } else {
5353 /* enable external TXD output */
5354 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5355
5356 /* clear Internal Data loopback mode */
5357 info->loopback_bits = 0;
5358 outw( 0,info->io_base + CCAR );
5359 }
5360
5361} /* end of usc_enable_loopback() */
5362
5363/* usc_enable_aux_clock()
5364 *
5365 * Enabled the AUX clock output at the specified frequency.
5366 *
5367 * Arguments:
5368 *
5369 * info pointer to device extension
5370 * data_rate data rate of clock in bits per second
5371 * A data rate of 0 disables the AUX clock.
5372 *
5373 * Return Value: None
5374 */
5375static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5376{
5377 u32 XtalSpeed;
5378 u16 Tc;
5379
5380 if ( data_rate ) {
5381 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5382 XtalSpeed = 11059200;
5383 else
5384 XtalSpeed = 14745600;
5385
5386
5387 /* Tc = (Xtal/Speed) - 1 */
5388 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5389 /* then rounding up gives a more precise time constant. Instead */
5390 /* of rounding up and then subtracting 1 we just don't subtract */
5391 /* the one in this case. */
5392
5393
5394 Tc = (u16)(XtalSpeed/data_rate);
5395 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5396 Tc--;
5397
5398 /* Write 16-bit Time Constant for BRG0 */
5399 usc_OutReg( info, TC0R, Tc );
5400
5401 /*
5402 * Hardware Configuration Register (HCR)
5403 * Clear Bit 1, BRG0 mode = Continuous
5404 * Set Bit 0 to enable BRG0.
5405 */
5406
5407 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5408
5409 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5410 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5411 } else {
5412 /* data rate == 0 so turn off BRG0 */
5413 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5414 }
5415
5416} /* end of usc_enable_aux_clock() */
5417
5418/*
5419 *
5420 * usc_process_rxoverrun_sync()
5421 *
5422 * This function processes a receive overrun by resetting the
5423 * receive DMA buffers and issuing a Purge Rx FIFO command
5424 * to allow the receiver to continue receiving.
5425 *
5426 * Arguments:
5427 *
5428 * info pointer to device extension
5429 *
5430 * Return Value: None
5431 */
5432static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5433{
5434 int start_index;
5435 int end_index;
5436 int frame_start_index;
5437 int start_of_frame_found = FALSE;
5438 int end_of_frame_found = FALSE;
5439 int reprogram_dma = FALSE;
5440
5441 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5442 u32 phys_addr;
5443
5444 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5445 usc_RCmd( info, RCmd_EnterHuntmode );
5446 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5447
5448 /* CurrentRxBuffer points to the 1st buffer of the next */
5449 /* possibly available receive frame. */
5450
5451 frame_start_index = start_index = end_index = info->current_rx_buffer;
5452
5453 /* Search for an unfinished string of buffers. This means */
5454 /* that a receive frame started (at least one buffer with */
5455 /* count set to zero) but there is no terminiting buffer */
5456 /* (status set to non-zero). */
5457
5458 while( !buffer_list[end_index].count )
5459 {
5460 /* Count field has been reset to zero by 16C32. */
5461 /* This buffer is currently in use. */
5462
5463 if ( !start_of_frame_found )
5464 {
5465 start_of_frame_found = TRUE;
5466 frame_start_index = end_index;
5467 end_of_frame_found = FALSE;
5468 }
5469
5470 if ( buffer_list[end_index].status )
5471 {
5472 /* Status field has been set by 16C32. */
5473 /* This is the last buffer of a received frame. */
5474
5475 /* We want to leave the buffers for this frame intact. */
5476 /* Move on to next possible frame. */
5477
5478 start_of_frame_found = FALSE;
5479 end_of_frame_found = TRUE;
5480 }
5481
5482 /* advance to next buffer entry in linked list */
5483 end_index++;
5484 if ( end_index == info->rx_buffer_count )
5485 end_index = 0;
5486
5487 if ( start_index == end_index )
5488 {
5489 /* The entire list has been searched with all Counts == 0 and */
5490 /* all Status == 0. The receive buffers are */
5491 /* completely screwed, reset all receive buffers! */
5492 mgsl_reset_rx_dma_buffers( info );
5493 frame_start_index = 0;
5494 start_of_frame_found = FALSE;
5495 reprogram_dma = TRUE;
5496 break;
5497 }
5498 }
5499
5500 if ( start_of_frame_found && !end_of_frame_found )
5501 {
5502 /* There is an unfinished string of receive DMA buffers */
5503 /* as a result of the receiver overrun. */
5504
5505 /* Reset the buffers for the unfinished frame */
5506 /* and reprogram the receive DMA controller to start */
5507 /* at the 1st buffer of unfinished frame. */
5508
5509 start_index = frame_start_index;
5510
5511 do
5512 {
5513 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5514
5515 /* Adjust index for wrap around. */
5516 if ( start_index == info->rx_buffer_count )
5517 start_index = 0;
5518
5519 } while( start_index != end_index );
5520
5521 reprogram_dma = TRUE;
5522 }
5523
5524 if ( reprogram_dma )
5525 {
5526 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5527 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5528 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5529
5530 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5531
5532 /* This empties the receive FIFO and loads the RCC with RCLR */
5533 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5534
5535 /* program 16C32 with physical address of 1st DMA buffer entry */
5536 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5537 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5538 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5539
5540 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5541 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5542 usc_EnableInterrupts( info, RECEIVE_STATUS );
5543
5544 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5545 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5546
5547 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5548 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5549 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5550 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5551 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5552 else
5553 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5554 }
5555 else
5556 {
5557 /* This empties the receive FIFO and loads the RCC with RCLR */
5558 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5559 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5560 }
5561
5562} /* end of usc_process_rxoverrun_sync() */
5563
5564/* usc_stop_receiver()
5565 *
5566 * Disable USC receiver
5567 *
5568 * Arguments: info pointer to device instance data
5569 * Return Value: None
5570 */
5571static void usc_stop_receiver( struct mgsl_struct *info )
5572{
5573 if (debug_level >= DEBUG_LEVEL_ISR)
5574 printk("%s(%d):usc_stop_receiver(%s)\n",
5575 __FILE__,__LINE__, info->device_name );
5576
5577 /* Disable receive DMA channel. */
5578 /* This also disables receive DMA channel interrupts */
5579 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5580
5581 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5582 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5583 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5584
5585 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5586
5587 /* This empties the receive FIFO and loads the RCC with RCLR */
5588 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5589 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5590
5591 info->rx_enabled = 0;
5592 info->rx_overflow = 0;
5593 info->rx_rcc_underrun = 0;
5594
5595} /* end of stop_receiver() */
5596
5597/* usc_start_receiver()
5598 *
5599 * Enable the USC receiver
5600 *
5601 * Arguments: info pointer to device instance data
5602 * Return Value: None
5603 */
5604static void usc_start_receiver( struct mgsl_struct *info )
5605{
5606 u32 phys_addr;
5607
5608 if (debug_level >= DEBUG_LEVEL_ISR)
5609 printk("%s(%d):usc_start_receiver(%s)\n",
5610 __FILE__,__LINE__, info->device_name );
5611
5612 mgsl_reset_rx_dma_buffers( info );
5613 usc_stop_receiver( info );
5614
5615 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5616 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5617
5618 if ( info->params.mode == MGSL_MODE_HDLC ||
5619 info->params.mode == MGSL_MODE_RAW ) {
5620 /* DMA mode Transfers */
5621 /* Program the DMA controller. */
5622 /* Enable the DMA controller end of buffer interrupt. */
5623
5624 /* program 16C32 with physical address of 1st DMA buffer entry */
5625 phys_addr = info->rx_buffer_list[0].phys_entry;
5626 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5627 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5628
5629 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5630 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5631 usc_EnableInterrupts( info, RECEIVE_STATUS );
5632
5633 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5634 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5635
5636 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5637 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5638 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5639 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5640 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5641 else
5642 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5643 } else {
5644 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5645 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5646 usc_EnableInterrupts(info, RECEIVE_DATA);
5647
5648 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5649 usc_RCmd( info, RCmd_EnterHuntmode );
5650
5651 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5652 }
5653
5654 usc_OutReg( info, CCSR, 0x1020 );
5655
5656 info->rx_enabled = 1;
5657
5658} /* end of usc_start_receiver() */
5659
5660/* usc_start_transmitter()
5661 *
5662 * Enable the USC transmitter and send a transmit frame if
5663 * one is loaded in the DMA buffers.
5664 *
5665 * Arguments: info pointer to device instance data
5666 * Return Value: None
5667 */
5668static void usc_start_transmitter( struct mgsl_struct *info )
5669{
5670 u32 phys_addr;
5671 unsigned int FrameSize;
5672
5673 if (debug_level >= DEBUG_LEVEL_ISR)
5674 printk("%s(%d):usc_start_transmitter(%s)\n",
5675 __FILE__,__LINE__, info->device_name );
5676
5677 if ( info->xmit_cnt ) {
5678
5679 /* If auto RTS enabled and RTS is inactive, then assert */
5680 /* RTS and set a flag indicating that the driver should */
5681 /* negate RTS when the transmission completes. */
5682
5683 info->drop_rts_on_tx_done = 0;
5684
5685 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5686 usc_get_serial_signals( info );
5687 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5688 info->serial_signals |= SerialSignal_RTS;
5689 usc_set_serial_signals( info );
5690 info->drop_rts_on_tx_done = 1;
5691 }
5692 }
5693
5694
5695 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5696 if ( !info->tx_active ) {
5697 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5698 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5699 usc_EnableInterrupts(info, TRANSMIT_DATA);
5700 usc_load_txfifo(info);
5701 }
5702 } else {
5703 /* Disable transmit DMA controller while programming. */
5704 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5705
5706 /* Transmit DMA buffer is loaded, so program USC */
5707 /* to send the frame contained in the buffers. */
5708
5709 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5710
5711 /* if operating in Raw sync mode, reset the rcc component
5712 * of the tx dma buffer entry, otherwise, the serial controller
5713 * will send a closing sync char after this count.
5714 */
5715 if ( info->params.mode == MGSL_MODE_RAW )
5716 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5717
5718 /* Program the Transmit Character Length Register (TCLR) */
5719 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5720 usc_OutReg( info, TCLR, (u16)FrameSize );
5721
5722 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5723
5724 /* Program the address of the 1st DMA Buffer Entry in linked list */
5725 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5726 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5727 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5728
5729 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5730 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5731 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5732
5733 if ( info->params.mode == MGSL_MODE_RAW &&
5734 info->num_tx_dma_buffers > 1 ) {
5735 /* When running external sync mode, attempt to 'stream' transmit */
5736 /* by filling tx dma buffers as they become available. To do this */
5737 /* we need to enable Tx DMA EOB Status interrupts : */
5738 /* */
5739 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5740 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5741
5742 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5743 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5744 }
5745
5746 /* Initialize Transmit DMA Channel */
5747 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5748
5749 usc_TCmd( info, TCmd_SendFrame );
5750
5751 info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
5752 add_timer(&info->tx_timer);
5753 }
5754 info->tx_active = 1;
5755 }
5756
5757 if ( !info->tx_enabled ) {
5758 info->tx_enabled = 1;
5759 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5760 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5761 else
5762 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5763 }
5764
5765} /* end of usc_start_transmitter() */
5766
5767/* usc_stop_transmitter()
5768 *
5769 * Stops the transmitter and DMA
5770 *
5771 * Arguments: info pointer to device isntance data
5772 * Return Value: None
5773 */
5774static void usc_stop_transmitter( struct mgsl_struct *info )
5775{
5776 if (debug_level >= DEBUG_LEVEL_ISR)
5777 printk("%s(%d):usc_stop_transmitter(%s)\n",
5778 __FILE__,__LINE__, info->device_name );
5779
5780 del_timer(&info->tx_timer);
5781
5782 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5783 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5784 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5785
5786 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5787 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5788 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5789
5790 info->tx_enabled = 0;
5791 info->tx_active = 0;
5792
5793} /* end of usc_stop_transmitter() */
5794
5795/* usc_load_txfifo()
5796 *
5797 * Fill the transmit FIFO until the FIFO is full or
5798 * there is no more data to load.
5799 *
5800 * Arguments: info pointer to device extension (instance data)
5801 * Return Value: None
5802 */
5803static void usc_load_txfifo( struct mgsl_struct *info )
5804{
5805 int Fifocount;
5806 u8 TwoBytes[2];
5807
5808 if ( !info->xmit_cnt && !info->x_char )
5809 return;
5810
5811 /* Select transmit FIFO status readback in TICR */
5812 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5813
5814 /* load the Transmit FIFO until FIFOs full or all data sent */
5815
5816 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5817 /* there is more space in the transmit FIFO and */
5818 /* there is more data in transmit buffer */
5819
5820 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5821 /* write a 16-bit word from transmit buffer to 16C32 */
5822
5823 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5824 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5825 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5826 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5827
5828 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5829
5830 info->xmit_cnt -= 2;
5831 info->icount.tx += 2;
5832 } else {
5833 /* only 1 byte left to transmit or 1 FIFO slot left */
5834
5835 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5836 info->io_base + CCAR );
5837
5838 if (info->x_char) {
5839 /* transmit pending high priority char */
5840 outw( info->x_char,info->io_base + CCAR );
5841 info->x_char = 0;
5842 } else {
5843 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5844 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5845 info->xmit_cnt--;
5846 }
5847 info->icount.tx++;
5848 }
5849 }
5850
5851} /* end of usc_load_txfifo() */
5852
5853/* usc_reset()
5854 *
5855 * Reset the adapter to a known state and prepare it for further use.
5856 *
5857 * Arguments: info pointer to device instance data
5858 * Return Value: None
5859 */
5860static void usc_reset( struct mgsl_struct *info )
5861{
5862 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5863 int i;
5864 u32 readval;
5865
5866 /* Set BIT30 of Misc Control Register */
5867 /* (Local Control Register 0x50) to force reset of USC. */
5868
5869 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5870 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5871
5872 info->misc_ctrl_value |= BIT30;
5873 *MiscCtrl = info->misc_ctrl_value;
5874
5875 /*
5876 * Force at least 170ns delay before clearing
5877 * reset bit. Each read from LCR takes at least
5878 * 30ns so 10 times for 300ns to be safe.
5879 */
5880 for(i=0;i<10;i++)
5881 readval = *MiscCtrl;
5882
5883 info->misc_ctrl_value &= ~BIT30;
5884 *MiscCtrl = info->misc_ctrl_value;
5885
5886 *LCR0BRDR = BUS_DESCRIPTOR(
5887 1, // Write Strobe Hold (0-3)
5888 2, // Write Strobe Delay (0-3)
5889 2, // Read Strobe Delay (0-3)
5890 0, // NWDD (Write data-data) (0-3)
5891 4, // NWAD (Write Addr-data) (0-31)
5892 0, // NXDA (Read/Write Data-Addr) (0-3)
5893 0, // NRDD (Read Data-Data) (0-3)
5894 5 // NRAD (Read Addr-Data) (0-31)
5895 );
5896 } else {
5897 /* do HW reset */
5898 outb( 0,info->io_base + 8 );
5899 }
5900
5901 info->mbre_bit = 0;
5902 info->loopback_bits = 0;
5903 info->usc_idle_mode = 0;
5904
5905 /*
5906 * Program the Bus Configuration Register (BCR)
5907 *
5908 * <15> 0 Don't use separate address
5909 * <14..6> 0 reserved
5910 * <5..4> 00 IAckmode = Default, don't care
5911 * <3> 1 Bus Request Totem Pole output
5912 * <2> 1 Use 16 Bit data bus
5913 * <1> 0 IRQ Totem Pole output
5914 * <0> 0 Don't Shift Right Addr
5915 *
5916 * 0000 0000 0000 1100 = 0x000c
5917 *
5918 * By writing to io_base + SDPIN the Wait/Ack pin is
5919 * programmed to work as a Wait pin.
5920 */
5921
5922 outw( 0x000c,info->io_base + SDPIN );
5923
5924
5925 outw( 0,info->io_base );
5926 outw( 0,info->io_base + CCAR );
5927
5928 /* select little endian byte ordering */
5929 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5930
5931
5932 /* Port Control Register (PCR)
5933 *
5934 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5935 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5936 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5937 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5938 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5939 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5940 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5941 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5942 *
5943 * 1111 0000 1111 0101 = 0xf0f5
5944 */
5945
5946 usc_OutReg( info, PCR, 0xf0f5 );
5947
5948
5949 /*
5950 * Input/Output Control Register
5951 *
5952 * <15..14> 00 CTS is active low input
5953 * <13..12> 00 DCD is active low input
5954 * <11..10> 00 TxREQ pin is input (DSR)
5955 * <9..8> 00 RxREQ pin is input (RI)
5956 * <7..6> 00 TxD is output (Transmit Data)
5957 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5958 * <2..0> 100 RxC is Output (drive with BRG0)
5959 *
5960 * 0000 0000 0000 0100 = 0x0004
5961 */
5962
5963 usc_OutReg( info, IOCR, 0x0004 );
5964
5965} /* end of usc_reset() */
5966
5967/* usc_set_async_mode()
5968 *
5969 * Program adapter for asynchronous communications.
5970 *
5971 * Arguments: info pointer to device instance data
5972 * Return Value: None
5973 */
5974static void usc_set_async_mode( struct mgsl_struct *info )
5975{
5976 u16 RegValue;
5977
5978 /* disable interrupts while programming USC */
5979 usc_DisableMasterIrqBit( info );
5980
5981 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5982 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5983
5984 usc_loopback_frame( info );
5985
5986 /* Channel mode Register (CMR)
5987 *
5988 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5989 * <13..12> 00 00 = 16X Clock
5990 * <11..8> 0000 Transmitter mode = Asynchronous
5991 * <7..6> 00 reserved?
5992 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5993 * <3..0> 0000 Receiver mode = Asynchronous
5994 *
5995 * 0000 0000 0000 0000 = 0x0
5996 */
5997
5998 RegValue = 0;
5999 if ( info->params.stop_bits != 1 )
6000 RegValue |= BIT14;
6001 usc_OutReg( info, CMR, RegValue );
6002
6003
6004 /* Receiver mode Register (RMR)
6005 *
6006 * <15..13> 000 encoding = None
6007 * <12..08> 00000 reserved (Sync Only)
6008 * <7..6> 00 Even parity
6009 * <5> 0 parity disabled
6010 * <4..2> 000 Receive Char Length = 8 bits
6011 * <1..0> 00 Disable Receiver
6012 *
6013 * 0000 0000 0000 0000 = 0x0
6014 */
6015
6016 RegValue = 0;
6017
6018 if ( info->params.data_bits != 8 )
6019 RegValue |= BIT4+BIT3+BIT2;
6020
6021 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6022 RegValue |= BIT5;
6023 if ( info->params.parity != ASYNC_PARITY_ODD )
6024 RegValue |= BIT6;
6025 }
6026
6027 usc_OutReg( info, RMR, RegValue );
6028
6029
6030 /* Set IRQ trigger level */
6031
6032 usc_RCmd( info, RCmd_SelectRicrIntLevel );
6033
6034
6035 /* Receive Interrupt Control Register (RICR)
6036 *
6037 * <15..8> ? RxFIFO IRQ Request Level
6038 *
6039 * Note: For async mode the receive FIFO level must be set
6040 * to 0 to aviod the situation where the FIFO contains fewer bytes
6041 * than the trigger level and no more data is expected.
6042 *
6043 * <7> 0 Exited Hunt IA (Interrupt Arm)
6044 * <6> 0 Idle Received IA
6045 * <5> 0 Break/Abort IA
6046 * <4> 0 Rx Bound IA
6047 * <3> 0 Queued status reflects oldest byte in FIFO
6048 * <2> 0 Abort/PE IA
6049 * <1> 0 Rx Overrun IA
6050 * <0> 0 Select TC0 value for readback
6051 *
6052 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6053 */
6054
6055 usc_OutReg( info, RICR, 0x0000 );
6056
6057 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6058 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6059
6060
6061 /* Transmit mode Register (TMR)
6062 *
6063 * <15..13> 000 encoding = None
6064 * <12..08> 00000 reserved (Sync Only)
6065 * <7..6> 00 Transmit parity Even
6066 * <5> 0 Transmit parity Disabled
6067 * <4..2> 000 Tx Char Length = 8 bits
6068 * <1..0> 00 Disable Transmitter
6069 *
6070 * 0000 0000 0000 0000 = 0x0
6071 */
6072
6073 RegValue = 0;
6074
6075 if ( info->params.data_bits != 8 )
6076 RegValue |= BIT4+BIT3+BIT2;
6077
6078 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6079 RegValue |= BIT5;
6080 if ( info->params.parity != ASYNC_PARITY_ODD )
6081 RegValue |= BIT6;
6082 }
6083
6084 usc_OutReg( info, TMR, RegValue );
6085
6086 usc_set_txidle( info );
6087
6088
6089 /* Set IRQ trigger level */
6090
6091 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6092
6093
6094 /* Transmit Interrupt Control Register (TICR)
6095 *
6096 * <15..8> ? Transmit FIFO IRQ Level
6097 * <7> 0 Present IA (Interrupt Arm)
6098 * <6> 1 Idle Sent IA
6099 * <5> 0 Abort Sent IA
6100 * <4> 0 EOF/EOM Sent IA
6101 * <3> 0 CRC Sent IA
6102 * <2> 0 1 = Wait for SW Trigger to Start Frame
6103 * <1> 0 Tx Underrun IA
6104 * <0> 0 TC0 constant on read back
6105 *
6106 * 0000 0000 0100 0000 = 0x0040
6107 */
6108
6109 usc_OutReg( info, TICR, 0x1f40 );
6110
6111 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6112 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6113
6114 usc_enable_async_clock( info, info->params.data_rate );
6115
6116
6117 /* Channel Control/status Register (CCSR)
6118 *
6119 * <15> X RCC FIFO Overflow status (RO)
6120 * <14> X RCC FIFO Not Empty status (RO)
6121 * <13> 0 1 = Clear RCC FIFO (WO)
6122 * <12> X DPLL in Sync status (RO)
6123 * <11> X DPLL 2 Missed Clocks status (RO)
6124 * <10> X DPLL 1 Missed Clock status (RO)
6125 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6126 * <7> X SDLC Loop On status (RO)
6127 * <6> X SDLC Loop Send status (RO)
6128 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6129 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6130 * <1..0> 00 reserved
6131 *
6132 * 0000 0000 0010 0000 = 0x0020
6133 */
6134
6135 usc_OutReg( info, CCSR, 0x0020 );
6136
6137 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6138 RECEIVE_DATA + RECEIVE_STATUS );
6139
6140 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6141 RECEIVE_DATA + RECEIVE_STATUS );
6142
6143 usc_EnableMasterIrqBit( info );
6144
6145 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6146 /* Enable INTEN (Port 6, Bit12) */
6147 /* This connects the IRQ request signal to the ISA bus */
6148 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6149 }
6150
Paul Fulghum7c1fff52005-09-09 13:02:14 -07006151 if (info->params.loopback) {
6152 info->loopback_bits = 0x300;
6153 outw(0x0300, info->io_base + CCAR);
6154 }
6155
Linus Torvalds1da177e2005-04-16 15:20:36 -07006156} /* end of usc_set_async_mode() */
6157
6158/* usc_loopback_frame()
6159 *
6160 * Loop back a small (2 byte) dummy SDLC frame.
6161 * Interrupts and DMA are NOT used. The purpose of this is to
6162 * clear any 'stale' status info left over from running in async mode.
6163 *
6164 * The 16C32 shows the strange behaviour of marking the 1st
6165 * received SDLC frame with a CRC error even when there is no
6166 * CRC error. To get around this a small dummy from of 2 bytes
6167 * is looped back when switching from async to sync mode.
6168 *
6169 * Arguments: info pointer to device instance data
6170 * Return Value: None
6171 */
6172static void usc_loopback_frame( struct mgsl_struct *info )
6173{
6174 int i;
6175 unsigned long oldmode = info->params.mode;
6176
6177 info->params.mode = MGSL_MODE_HDLC;
6178
6179 usc_DisableMasterIrqBit( info );
6180
6181 usc_set_sdlc_mode( info );
6182 usc_enable_loopback( info, 1 );
6183
6184 /* Write 16-bit Time Constant for BRG0 */
6185 usc_OutReg( info, TC0R, 0 );
6186
6187 /* Channel Control Register (CCR)
6188 *
6189 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6190 * <13> 0 Trigger Tx on SW Command Disabled
6191 * <12> 0 Flag Preamble Disabled
6192 * <11..10> 00 Preamble Length = 8-Bits
6193 * <9..8> 01 Preamble Pattern = flags
6194 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6195 * <5> 0 Trigger Rx on SW Command Disabled
6196 * <4..0> 0 reserved
6197 *
6198 * 0000 0001 0000 0000 = 0x0100
6199 */
6200
6201 usc_OutReg( info, CCR, 0x0100 );
6202
6203 /* SETUP RECEIVER */
6204 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6205 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6206
6207 /* SETUP TRANSMITTER */
6208 /* Program the Transmit Character Length Register (TCLR) */
6209 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6210 usc_OutReg( info, TCLR, 2 );
6211 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6212
6213 /* unlatch Tx status bits, and start transmit channel. */
6214 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6215 outw(0,info->io_base + DATAREG);
6216
6217 /* ENABLE TRANSMITTER */
6218 usc_TCmd( info, TCmd_SendFrame );
6219 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6220
6221 /* WAIT FOR RECEIVE COMPLETE */
6222 for (i=0 ; i<1000 ; i++)
6223 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6224 break;
6225
6226 /* clear Internal Data loopback mode */
6227 usc_enable_loopback(info, 0);
6228
6229 usc_EnableMasterIrqBit(info);
6230
6231 info->params.mode = oldmode;
6232
6233} /* end of usc_loopback_frame() */
6234
6235/* usc_set_sync_mode() Programs the USC for SDLC communications.
6236 *
6237 * Arguments: info pointer to adapter info structure
6238 * Return Value: None
6239 */
6240static void usc_set_sync_mode( struct mgsl_struct *info )
6241{
6242 usc_loopback_frame( info );
6243 usc_set_sdlc_mode( info );
6244
6245 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6246 /* Enable INTEN (Port 6, Bit12) */
6247 /* This connects the IRQ request signal to the ISA bus */
6248 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6249 }
6250
6251 usc_enable_aux_clock(info, info->params.clock_speed);
6252
6253 if (info->params.loopback)
6254 usc_enable_loopback(info,1);
6255
6256} /* end of mgsl_set_sync_mode() */
6257
6258/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6259 *
6260 * Arguments: info pointer to device instance data
6261 * Return Value: None
6262 */
6263static void usc_set_txidle( struct mgsl_struct *info )
6264{
6265 u16 usc_idle_mode = IDLEMODE_FLAGS;
6266
6267 /* Map API idle mode to USC register bits */
6268
6269 switch( info->idle_mode ){
6270 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6271 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6272 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6273 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6274 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6275 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6276 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6277 }
6278
6279 info->usc_idle_mode = usc_idle_mode;
6280 //usc_OutReg(info, TCSR, usc_idle_mode);
6281 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6282 info->tcsr_value += usc_idle_mode;
6283 usc_OutReg(info, TCSR, info->tcsr_value);
6284
6285 /*
6286 * if SyncLink WAN adapter is running in external sync mode, the
6287 * transmitter has been set to Monosync in order to try to mimic
6288 * a true raw outbound bit stream. Monosync still sends an open/close
6289 * sync char at the start/end of a frame. Try to match those sync
6290 * patterns to the idle mode set here
6291 */
6292 if ( info->params.mode == MGSL_MODE_RAW ) {
6293 unsigned char syncpat = 0;
6294 switch( info->idle_mode ) {
6295 case HDLC_TXIDLE_FLAGS:
6296 syncpat = 0x7e;
6297 break;
6298 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6299 syncpat = 0x55;
6300 break;
6301 case HDLC_TXIDLE_ZEROS:
6302 case HDLC_TXIDLE_SPACE:
6303 syncpat = 0x00;
6304 break;
6305 case HDLC_TXIDLE_ONES:
6306 case HDLC_TXIDLE_MARK:
6307 syncpat = 0xff;
6308 break;
6309 case HDLC_TXIDLE_ALT_MARK_SPACE:
6310 syncpat = 0xaa;
6311 break;
6312 }
6313
6314 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6315 }
6316
6317} /* end of usc_set_txidle() */
6318
6319/* usc_get_serial_signals()
6320 *
6321 * Query the adapter for the state of the V24 status (input) signals.
6322 *
6323 * Arguments: info pointer to device instance data
6324 * Return Value: None
6325 */
6326static void usc_get_serial_signals( struct mgsl_struct *info )
6327{
6328 u16 status;
6329
6330 /* clear all serial signals except DTR and RTS */
6331 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6332
6333 /* Read the Misc Interrupt status Register (MISR) to get */
6334 /* the V24 status signals. */
6335
6336 status = usc_InReg( info, MISR );
6337
6338 /* set serial signal bits to reflect MISR */
6339
6340 if ( status & MISCSTATUS_CTS )
6341 info->serial_signals |= SerialSignal_CTS;
6342
6343 if ( status & MISCSTATUS_DCD )
6344 info->serial_signals |= SerialSignal_DCD;
6345
6346 if ( status & MISCSTATUS_RI )
6347 info->serial_signals |= SerialSignal_RI;
6348
6349 if ( status & MISCSTATUS_DSR )
6350 info->serial_signals |= SerialSignal_DSR;
6351
6352} /* end of usc_get_serial_signals() */
6353
6354/* usc_set_serial_signals()
6355 *
6356 * Set the state of DTR and RTS based on contents of
6357 * serial_signals member of device extension.
6358 *
6359 * Arguments: info pointer to device instance data
6360 * Return Value: None
6361 */
6362static void usc_set_serial_signals( struct mgsl_struct *info )
6363{
6364 u16 Control;
6365 unsigned char V24Out = info->serial_signals;
6366
6367 /* get the current value of the Port Control Register (PCR) */
6368
6369 Control = usc_InReg( info, PCR );
6370
6371 if ( V24Out & SerialSignal_RTS )
6372 Control &= ~(BIT6);
6373 else
6374 Control |= BIT6;
6375
6376 if ( V24Out & SerialSignal_DTR )
6377 Control &= ~(BIT4);
6378 else
6379 Control |= BIT4;
6380
6381 usc_OutReg( info, PCR, Control );
6382
6383} /* end of usc_set_serial_signals() */
6384
6385/* usc_enable_async_clock()
6386 *
6387 * Enable the async clock at the specified frequency.
6388 *
6389 * Arguments: info pointer to device instance data
6390 * data_rate data rate of clock in bps
6391 * 0 disables the AUX clock.
6392 * Return Value: None
6393 */
6394static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6395{
6396 if ( data_rate ) {
6397 /*
6398 * Clock mode Control Register (CMCR)
6399 *
6400 * <15..14> 00 counter 1 Disabled
6401 * <13..12> 00 counter 0 Disabled
6402 * <11..10> 11 BRG1 Input is TxC Pin
6403 * <9..8> 11 BRG0 Input is TxC Pin
6404 * <7..6> 01 DPLL Input is BRG1 Output
6405 * <5..3> 100 TxCLK comes from BRG0
6406 * <2..0> 100 RxCLK comes from BRG0
6407 *
6408 * 0000 1111 0110 0100 = 0x0f64
6409 */
6410
6411 usc_OutReg( info, CMCR, 0x0f64 );
6412
6413
6414 /*
6415 * Write 16-bit Time Constant for BRG0
6416 * Time Constant = (ClkSpeed / data_rate) - 1
6417 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6418 */
6419
6420 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6421 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6422 else
6423 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6424
6425
6426 /*
6427 * Hardware Configuration Register (HCR)
6428 * Clear Bit 1, BRG0 mode = Continuous
6429 * Set Bit 0 to enable BRG0.
6430 */
6431
6432 usc_OutReg( info, HCR,
6433 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6434
6435
6436 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6437
6438 usc_OutReg( info, IOCR,
6439 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6440 } else {
6441 /* data rate == 0 so turn off BRG0 */
6442 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6443 }
6444
6445} /* end of usc_enable_async_clock() */
6446
6447/*
6448 * Buffer Structures:
6449 *
6450 * Normal memory access uses virtual addresses that can make discontiguous
6451 * physical memory pages appear to be contiguous in the virtual address
6452 * space (the processors memory mapping handles the conversions).
6453 *
6454 * DMA transfers require physically contiguous memory. This is because
6455 * the DMA system controller and DMA bus masters deal with memory using
6456 * only physical addresses.
6457 *
6458 * This causes a problem under Windows NT when large DMA buffers are
6459 * needed. Fragmentation of the nonpaged pool prevents allocations of
6460 * physically contiguous buffers larger than the PAGE_SIZE.
6461 *
6462 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6463 * allows DMA transfers to physically discontiguous buffers. Information
6464 * about each data transfer buffer is contained in a memory structure
6465 * called a 'buffer entry'. A list of buffer entries is maintained
6466 * to track and control the use of the data transfer buffers.
6467 *
6468 * To support this strategy we will allocate sufficient PAGE_SIZE
6469 * contiguous memory buffers to allow for the total required buffer
6470 * space.
6471 *
6472 * The 16C32 accesses the list of buffer entries using Bus Master
6473 * DMA. Control information is read from the buffer entries by the
6474 * 16C32 to control data transfers. status information is written to
6475 * the buffer entries by the 16C32 to indicate the status of completed
6476 * transfers.
6477 *
6478 * The CPU writes control information to the buffer entries to control
6479 * the 16C32 and reads status information from the buffer entries to
6480 * determine information about received and transmitted frames.
6481 *
6482 * Because the CPU and 16C32 (adapter) both need simultaneous access
6483 * to the buffer entries, the buffer entry memory is allocated with
6484 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6485 * entry list to PAGE_SIZE.
6486 *
6487 * The actual data buffers on the other hand will only be accessed
6488 * by the CPU or the adapter but not by both simultaneously. This allows
6489 * Scatter/Gather packet based DMA procedures for using physically
6490 * discontiguous pages.
6491 */
6492
6493/*
6494 * mgsl_reset_tx_dma_buffers()
6495 *
6496 * Set the count for all transmit buffers to 0 to indicate the
6497 * buffer is available for use and set the current buffer to the
6498 * first buffer. This effectively makes all buffers free and
6499 * discards any data in buffers.
6500 *
6501 * Arguments: info pointer to device instance data
6502 * Return Value: None
6503 */
6504static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6505{
6506 unsigned int i;
6507
6508 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6509 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6510 }
6511
6512 info->current_tx_buffer = 0;
6513 info->start_tx_dma_buffer = 0;
6514 info->tx_dma_buffers_used = 0;
6515
6516 info->get_tx_holding_index = 0;
6517 info->put_tx_holding_index = 0;
6518 info->tx_holding_count = 0;
6519
6520} /* end of mgsl_reset_tx_dma_buffers() */
6521
6522/*
6523 * num_free_tx_dma_buffers()
6524 *
6525 * returns the number of free tx dma buffers available
6526 *
6527 * Arguments: info pointer to device instance data
6528 * Return Value: number of free tx dma buffers
6529 */
6530static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6531{
6532 return info->tx_buffer_count - info->tx_dma_buffers_used;
6533}
6534
6535/*
6536 * mgsl_reset_rx_dma_buffers()
6537 *
6538 * Set the count for all receive buffers to DMABUFFERSIZE
6539 * and set the current buffer to the first buffer. This effectively
6540 * makes all buffers free and discards any data in buffers.
6541 *
6542 * Arguments: info pointer to device instance data
6543 * Return Value: None
6544 */
6545static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6546{
6547 unsigned int i;
6548
6549 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6550 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6551// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6552// info->rx_buffer_list[i].status = 0;
6553 }
6554
6555 info->current_rx_buffer = 0;
6556
6557} /* end of mgsl_reset_rx_dma_buffers() */
6558
6559/*
6560 * mgsl_free_rx_frame_buffers()
6561 *
6562 * Free the receive buffers used by a received SDLC
6563 * frame such that the buffers can be reused.
6564 *
6565 * Arguments:
6566 *
6567 * info pointer to device instance data
6568 * StartIndex index of 1st receive buffer of frame
6569 * EndIndex index of last receive buffer of frame
6570 *
6571 * Return Value: None
6572 */
6573static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6574{
6575 int Done = 0;
6576 DMABUFFERENTRY *pBufEntry;
6577 unsigned int Index;
6578
6579 /* Starting with 1st buffer entry of the frame clear the status */
6580 /* field and set the count field to DMA Buffer Size. */
6581
6582 Index = StartIndex;
6583
6584 while( !Done ) {
6585 pBufEntry = &(info->rx_buffer_list[Index]);
6586
6587 if ( Index == EndIndex ) {
6588 /* This is the last buffer of the frame! */
6589 Done = 1;
6590 }
6591
6592 /* reset current buffer for reuse */
6593// pBufEntry->status = 0;
6594// pBufEntry->count = DMABUFFERSIZE;
6595 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6596
6597 /* advance to next buffer entry in linked list */
6598 Index++;
6599 if ( Index == info->rx_buffer_count )
6600 Index = 0;
6601 }
6602
6603 /* set current buffer to next buffer after last buffer of frame */
6604 info->current_rx_buffer = Index;
6605
6606} /* end of free_rx_frame_buffers() */
6607
6608/* mgsl_get_rx_frame()
6609 *
6610 * This function attempts to return a received SDLC frame from the
6611 * receive DMA buffers. Only frames received without errors are returned.
6612 *
6613 * Arguments: info pointer to device extension
6614 * Return Value: 1 if frame returned, otherwise 0
6615 */
6616static int mgsl_get_rx_frame(struct mgsl_struct *info)
6617{
6618 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6619 unsigned short status;
6620 DMABUFFERENTRY *pBufEntry;
6621 unsigned int framesize = 0;
6622 int ReturnCode = 0;
6623 unsigned long flags;
6624 struct tty_struct *tty = info->tty;
6625 int return_frame = 0;
6626
6627 /*
6628 * current_rx_buffer points to the 1st buffer of the next available
6629 * receive frame. To find the last buffer of the frame look for
6630 * a non-zero status field in the buffer entries. (The status
6631 * field is set by the 16C32 after completing a receive frame.
6632 */
6633
6634 StartIndex = EndIndex = info->current_rx_buffer;
6635
6636 while( !info->rx_buffer_list[EndIndex].status ) {
6637 /*
6638 * If the count field of the buffer entry is non-zero then
6639 * this buffer has not been used. (The 16C32 clears the count
6640 * field when it starts using the buffer.) If an unused buffer
6641 * is encountered then there are no frames available.
6642 */
6643
6644 if ( info->rx_buffer_list[EndIndex].count )
6645 goto Cleanup;
6646
6647 /* advance to next buffer entry in linked list */
6648 EndIndex++;
6649 if ( EndIndex == info->rx_buffer_count )
6650 EndIndex = 0;
6651
6652 /* if entire list searched then no frame available */
6653 if ( EndIndex == StartIndex ) {
6654 /* If this occurs then something bad happened,
6655 * all buffers have been 'used' but none mark
6656 * the end of a frame. Reset buffers and receiver.
6657 */
6658
6659 if ( info->rx_enabled ){
6660 spin_lock_irqsave(&info->irq_spinlock,flags);
6661 usc_start_receiver(info);
6662 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6663 }
6664 goto Cleanup;
6665 }
6666 }
6667
6668
6669 /* check status of receive frame */
6670
6671 status = info->rx_buffer_list[EndIndex].status;
6672
6673 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6674 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6675 if ( status & RXSTATUS_SHORT_FRAME )
6676 info->icount.rxshort++;
6677 else if ( status & RXSTATUS_ABORT )
6678 info->icount.rxabort++;
6679 else if ( status & RXSTATUS_OVERRUN )
6680 info->icount.rxover++;
6681 else {
6682 info->icount.rxcrc++;
6683 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6684 return_frame = 1;
6685 }
6686 framesize = 0;
6687#ifdef CONFIG_HDLC
6688 {
6689 struct net_device_stats *stats = hdlc_stats(info->netdev);
6690 stats->rx_errors++;
6691 stats->rx_frame_errors++;
6692 }
6693#endif
6694 } else
6695 return_frame = 1;
6696
6697 if ( return_frame ) {
6698 /* receive frame has no errors, get frame size.
6699 * The frame size is the starting value of the RCC (which was
6700 * set to 0xffff) minus the ending value of the RCC (decremented
6701 * once for each receive character) minus 2 for the 16-bit CRC.
6702 */
6703
6704 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6705
6706 /* adjust frame size for CRC if any */
6707 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6708 framesize -= 2;
6709 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6710 framesize -= 4;
6711 }
6712
6713 if ( debug_level >= DEBUG_LEVEL_BH )
6714 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6715 __FILE__,__LINE__,info->device_name,status,framesize);
6716
6717 if ( debug_level >= DEBUG_LEVEL_DATA )
6718 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6719 min_t(int, framesize, DMABUFFERSIZE),0);
6720
6721 if (framesize) {
6722 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6723 ((framesize+1) > info->max_frame_size) ) ||
6724 (framesize > info->max_frame_size) )
6725 info->icount.rxlong++;
6726 else {
6727 /* copy dma buffer(s) to contiguous intermediate buffer */
6728 int copy_count = framesize;
6729 int index = StartIndex;
6730 unsigned char *ptmp = info->intermediate_rxbuffer;
6731
6732 if ( !(status & RXSTATUS_CRC_ERROR))
6733 info->icount.rxok++;
6734
6735 while(copy_count) {
6736 int partial_count;
6737 if ( copy_count > DMABUFFERSIZE )
6738 partial_count = DMABUFFERSIZE;
6739 else
6740 partial_count = copy_count;
6741
6742 pBufEntry = &(info->rx_buffer_list[index]);
6743 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6744 ptmp += partial_count;
6745 copy_count -= partial_count;
6746
6747 if ( ++index == info->rx_buffer_count )
6748 index = 0;
6749 }
6750
6751 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6752 ++framesize;
6753 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6754 RX_CRC_ERROR :
6755 RX_OK);
6756
6757 if ( debug_level >= DEBUG_LEVEL_DATA )
6758 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6759 __FILE__,__LINE__,info->device_name,
6760 *ptmp);
6761 }
6762
6763#ifdef CONFIG_HDLC
6764 if (info->netcount)
6765 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6766 else
6767#endif
6768 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6769 }
6770 }
6771 /* Free the buffers used by this frame. */
6772 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6773
6774 ReturnCode = 1;
6775
6776Cleanup:
6777
6778 if ( info->rx_enabled && info->rx_overflow ) {
6779 /* The receiver needs to restarted because of
6780 * a receive overflow (buffer or FIFO). If the
6781 * receive buffers are now empty, then restart receiver.
6782 */
6783
6784 if ( !info->rx_buffer_list[EndIndex].status &&
6785 info->rx_buffer_list[EndIndex].count ) {
6786 spin_lock_irqsave(&info->irq_spinlock,flags);
6787 usc_start_receiver(info);
6788 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6789 }
6790 }
6791
6792 return ReturnCode;
6793
6794} /* end of mgsl_get_rx_frame() */
6795
6796/* mgsl_get_raw_rx_frame()
6797 *
6798 * This function attempts to return a received frame from the
6799 * receive DMA buffers when running in external loop mode. In this mode,
6800 * we will return at most one DMABUFFERSIZE frame to the application.
6801 * The USC receiver is triggering off of DCD going active to start a new
6802 * frame, and DCD going inactive to terminate the frame (similar to
6803 * processing a closing flag character).
6804 *
6805 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6806 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6807 * status field and the RCC field will indicate the length of the
6808 * entire received frame. We take this RCC field and get the modulus
6809 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6810 * last Rx DMA buffer and return that last portion of the frame.
6811 *
6812 * Arguments: info pointer to device extension
6813 * Return Value: 1 if frame returned, otherwise 0
6814 */
6815static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6816{
6817 unsigned int CurrentIndex, NextIndex;
6818 unsigned short status;
6819 DMABUFFERENTRY *pBufEntry;
6820 unsigned int framesize = 0;
6821 int ReturnCode = 0;
6822 unsigned long flags;
6823 struct tty_struct *tty = info->tty;
6824
6825 /*
6826 * current_rx_buffer points to the 1st buffer of the next available
6827 * receive frame. The status field is set by the 16C32 after
6828 * completing a receive frame. If the status field of this buffer
6829 * is zero, either the USC is still filling this buffer or this
6830 * is one of a series of buffers making up a received frame.
6831 *
6832 * If the count field of this buffer is zero, the USC is either
6833 * using this buffer or has used this buffer. Look at the count
6834 * field of the next buffer. If that next buffer's count is
6835 * non-zero, the USC is still actively using the current buffer.
6836 * Otherwise, if the next buffer's count field is zero, the
6837 * current buffer is complete and the USC is using the next
6838 * buffer.
6839 */
6840 CurrentIndex = NextIndex = info->current_rx_buffer;
6841 ++NextIndex;
6842 if ( NextIndex == info->rx_buffer_count )
6843 NextIndex = 0;
6844
6845 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6846 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6847 info->rx_buffer_list[NextIndex].count == 0)) {
6848 /*
6849 * Either the status field of this dma buffer is non-zero
6850 * (indicating the last buffer of a receive frame) or the next
6851 * buffer is marked as in use -- implying this buffer is complete
6852 * and an intermediate buffer for this received frame.
6853 */
6854
6855 status = info->rx_buffer_list[CurrentIndex].status;
6856
6857 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6858 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6859 if ( status & RXSTATUS_SHORT_FRAME )
6860 info->icount.rxshort++;
6861 else if ( status & RXSTATUS_ABORT )
6862 info->icount.rxabort++;
6863 else if ( status & RXSTATUS_OVERRUN )
6864 info->icount.rxover++;
6865 else
6866 info->icount.rxcrc++;
6867 framesize = 0;
6868 } else {
6869 /*
6870 * A receive frame is available, get frame size and status.
6871 *
6872 * The frame size is the starting value of the RCC (which was
6873 * set to 0xffff) minus the ending value of the RCC (decremented
6874 * once for each receive character) minus 2 or 4 for the 16-bit
6875 * or 32-bit CRC.
6876 *
6877 * If the status field is zero, this is an intermediate buffer.
6878 * It's size is 4K.
6879 *
6880 * If the DMA Buffer Entry's Status field is non-zero, the
6881 * receive operation completed normally (ie: DCD dropped). The
6882 * RCC field is valid and holds the received frame size.
6883 * It is possible that the RCC field will be zero on a DMA buffer
6884 * entry with a non-zero status. This can occur if the total
6885 * frame size (number of bytes between the time DCD goes active
6886 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6887 * case the 16C32 has underrun on the RCC count and appears to
6888 * stop updating this counter to let us know the actual received
6889 * frame size. If this happens (non-zero status and zero RCC),
6890 * simply return the entire RxDMA Buffer
6891 */
6892 if ( status ) {
6893 /*
6894 * In the event that the final RxDMA Buffer is
6895 * terminated with a non-zero status and the RCC
6896 * field is zero, we interpret this as the RCC
6897 * having underflowed (received frame > 65535 bytes).
6898 *
6899 * Signal the event to the user by passing back
6900 * a status of RxStatus_CrcError returning the full
6901 * buffer and let the app figure out what data is
6902 * actually valid
6903 */
6904 if ( info->rx_buffer_list[CurrentIndex].rcc )
6905 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6906 else
6907 framesize = DMABUFFERSIZE;
6908 }
6909 else
6910 framesize = DMABUFFERSIZE;
6911 }
6912
6913 if ( framesize > DMABUFFERSIZE ) {
6914 /*
6915 * if running in raw sync mode, ISR handler for
6916 * End Of Buffer events terminates all buffers at 4K.
6917 * If this frame size is said to be >4K, get the
6918 * actual number of bytes of the frame in this buffer.
6919 */
6920 framesize = framesize % DMABUFFERSIZE;
6921 }
6922
6923
6924 if ( debug_level >= DEBUG_LEVEL_BH )
6925 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6926 __FILE__,__LINE__,info->device_name,status,framesize);
6927
6928 if ( debug_level >= DEBUG_LEVEL_DATA )
6929 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6930 min_t(int, framesize, DMABUFFERSIZE),0);
6931
6932 if (framesize) {
6933 /* copy dma buffer(s) to contiguous intermediate buffer */
6934 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6935
6936 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6937 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6938 info->icount.rxok++;
6939
6940 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6941 }
6942
6943 /* Free the buffers used by this frame. */
6944 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6945
6946 ReturnCode = 1;
6947 }
6948
6949
6950 if ( info->rx_enabled && info->rx_overflow ) {
6951 /* The receiver needs to restarted because of
6952 * a receive overflow (buffer or FIFO). If the
6953 * receive buffers are now empty, then restart receiver.
6954 */
6955
6956 if ( !info->rx_buffer_list[CurrentIndex].status &&
6957 info->rx_buffer_list[CurrentIndex].count ) {
6958 spin_lock_irqsave(&info->irq_spinlock,flags);
6959 usc_start_receiver(info);
6960 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6961 }
6962 }
6963
6964 return ReturnCode;
6965
6966} /* end of mgsl_get_raw_rx_frame() */
6967
6968/* mgsl_load_tx_dma_buffer()
6969 *
6970 * Load the transmit DMA buffer with the specified data.
6971 *
6972 * Arguments:
6973 *
6974 * info pointer to device extension
6975 * Buffer pointer to buffer containing frame to load
6976 * BufferSize size in bytes of frame in Buffer
6977 *
6978 * Return Value: None
6979 */
6980static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6981 const char *Buffer, unsigned int BufferSize)
6982{
6983 unsigned short Copycount;
6984 unsigned int i = 0;
6985 DMABUFFERENTRY *pBufEntry;
6986
6987 if ( debug_level >= DEBUG_LEVEL_DATA )
6988 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6989
6990 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6991 /* set CMR:13 to start transmit when
6992 * next GoAhead (abort) is received
6993 */
6994 info->cmr_value |= BIT13;
6995 }
6996
6997 /* begin loading the frame in the next available tx dma
6998 * buffer, remember it's starting location for setting
6999 * up tx dma operation
7000 */
7001 i = info->current_tx_buffer;
7002 info->start_tx_dma_buffer = i;
7003
7004 /* Setup the status and RCC (Frame Size) fields of the 1st */
7005 /* buffer entry in the transmit DMA buffer list. */
7006
7007 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
7008 info->tx_buffer_list[i].rcc = BufferSize;
7009 info->tx_buffer_list[i].count = BufferSize;
7010
7011 /* Copy frame data from 1st source buffer to the DMA buffers. */
7012 /* The frame data may span multiple DMA buffers. */
7013
7014 while( BufferSize ){
7015 /* Get a pointer to next DMA buffer entry. */
7016 pBufEntry = &info->tx_buffer_list[i++];
7017
7018 if ( i == info->tx_buffer_count )
7019 i=0;
7020
7021 /* Calculate the number of bytes that can be copied from */
7022 /* the source buffer to this DMA buffer. */
7023 if ( BufferSize > DMABUFFERSIZE )
7024 Copycount = DMABUFFERSIZE;
7025 else
7026 Copycount = BufferSize;
7027
7028 /* Actually copy data from source buffer to DMA buffer. */
7029 /* Also set the data count for this individual DMA buffer. */
7030 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
7031 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
7032 else
7033 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
7034
7035 pBufEntry->count = Copycount;
7036
7037 /* Advance source pointer and reduce remaining data count. */
7038 Buffer += Copycount;
7039 BufferSize -= Copycount;
7040
7041 ++info->tx_dma_buffers_used;
7042 }
7043
7044 /* remember next available tx dma buffer */
7045 info->current_tx_buffer = i;
7046
7047} /* end of mgsl_load_tx_dma_buffer() */
7048
7049/*
7050 * mgsl_register_test()
7051 *
7052 * Performs a register test of the 16C32.
7053 *
7054 * Arguments: info pointer to device instance data
7055 * Return Value: TRUE if test passed, otherwise FALSE
7056 */
7057static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7058{
7059 static unsigned short BitPatterns[] =
7060 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
Tobias Klauserfe971072006-01-09 20:54:02 -08007061 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007062 unsigned int i;
7063 BOOLEAN rc = TRUE;
7064 unsigned long flags;
7065
7066 spin_lock_irqsave(&info->irq_spinlock,flags);
7067 usc_reset(info);
7068
7069 /* Verify the reset state of some registers. */
7070
7071 if ( (usc_InReg( info, SICR ) != 0) ||
7072 (usc_InReg( info, IVR ) != 0) ||
7073 (usc_InDmaReg( info, DIVR ) != 0) ){
7074 rc = FALSE;
7075 }
7076
7077 if ( rc == TRUE ){
7078 /* Write bit patterns to various registers but do it out of */
7079 /* sync, then read back and verify values. */
7080
7081 for ( i = 0 ; i < Patterncount ; i++ ) {
7082 usc_OutReg( info, TC0R, BitPatterns[i] );
7083 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7084 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7085 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7086 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7087 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7088
7089 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7090 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7091 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7092 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7093 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7094 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7095 rc = FALSE;
7096 break;
7097 }
7098 }
7099 }
7100
7101 usc_reset(info);
7102 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7103
7104 return rc;
7105
7106} /* end of mgsl_register_test() */
7107
7108/* mgsl_irq_test() Perform interrupt test of the 16C32.
7109 *
7110 * Arguments: info pointer to device instance data
7111 * Return Value: TRUE if test passed, otherwise FALSE
7112 */
7113static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7114{
7115 unsigned long EndTime;
7116 unsigned long flags;
7117
7118 spin_lock_irqsave(&info->irq_spinlock,flags);
7119 usc_reset(info);
7120
7121 /*
7122 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7123 * The ISR sets irq_occurred to 1.
7124 */
7125
7126 info->irq_occurred = FALSE;
7127
7128 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7129 /* Enable INTEN (Port 6, Bit12) */
7130 /* This connects the IRQ request signal to the ISA bus */
7131 /* on the ISA adapter. This has no effect for the PCI adapter */
7132 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7133
7134 usc_EnableMasterIrqBit(info);
7135 usc_EnableInterrupts(info, IO_PIN);
7136 usc_ClearIrqPendingBits(info, IO_PIN);
7137
7138 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7139 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7140
7141 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7142
7143 EndTime=100;
7144 while( EndTime-- && !info->irq_occurred ) {
7145 msleep_interruptible(10);
7146 }
7147
7148 spin_lock_irqsave(&info->irq_spinlock,flags);
7149 usc_reset(info);
7150 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7151
7152 if ( !info->irq_occurred )
7153 return FALSE;
7154 else
7155 return TRUE;
7156
7157} /* end of mgsl_irq_test() */
7158
7159/* mgsl_dma_test()
7160 *
7161 * Perform a DMA test of the 16C32. A small frame is
7162 * transmitted via DMA from a transmit buffer to a receive buffer
7163 * using single buffer DMA mode.
7164 *
7165 * Arguments: info pointer to device instance data
7166 * Return Value: TRUE if test passed, otherwise FALSE
7167 */
7168static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7169{
7170 unsigned short FifoLevel;
7171 unsigned long phys_addr;
7172 unsigned int FrameSize;
7173 unsigned int i;
7174 char *TmpPtr;
7175 BOOLEAN rc = TRUE;
7176 unsigned short status=0;
7177 unsigned long EndTime;
7178 unsigned long flags;
7179 MGSL_PARAMS tmp_params;
7180
7181 /* save current port options */
7182 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7183 /* load default port options */
7184 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7185
7186#define TESTFRAMESIZE 40
7187
7188 spin_lock_irqsave(&info->irq_spinlock,flags);
7189
7190 /* setup 16C32 for SDLC DMA transfer mode */
7191
7192 usc_reset(info);
7193 usc_set_sdlc_mode(info);
7194 usc_enable_loopback(info,1);
7195
7196 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7197 * field of the buffer entry after fetching buffer address. This
7198 * way we can detect a DMA failure for a DMA read (which should be
7199 * non-destructive to system memory) before we try and write to
7200 * memory (where a failure could corrupt system memory).
7201 */
7202
7203 /* Receive DMA mode Register (RDMR)
7204 *
7205 * <15..14> 11 DMA mode = Linked List Buffer mode
7206 * <13> 1 RSBinA/L = store Rx status Block in List entry
7207 * <12> 0 1 = Clear count of List Entry after fetching
7208 * <11..10> 00 Address mode = Increment
7209 * <9> 1 Terminate Buffer on RxBound
7210 * <8> 0 Bus Width = 16bits
7211 * <7..0> ? status Bits (write as 0s)
7212 *
7213 * 1110 0010 0000 0000 = 0xe200
7214 */
7215
7216 usc_OutDmaReg( info, RDMR, 0xe200 );
7217
7218 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7219
7220
7221 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7222
7223 FrameSize = TESTFRAMESIZE;
7224
7225 /* setup 1st transmit buffer entry: */
7226 /* with frame size and transmit control word */
7227
7228 info->tx_buffer_list[0].count = FrameSize;
7229 info->tx_buffer_list[0].rcc = FrameSize;
7230 info->tx_buffer_list[0].status = 0x4000;
7231
7232 /* build a transmit frame in 1st transmit DMA buffer */
7233
7234 TmpPtr = info->tx_buffer_list[0].virt_addr;
7235 for (i = 0; i < FrameSize; i++ )
7236 *TmpPtr++ = i;
7237
7238 /* setup 1st receive buffer entry: */
7239 /* clear status, set max receive buffer size */
7240
7241 info->rx_buffer_list[0].status = 0;
7242 info->rx_buffer_list[0].count = FrameSize + 4;
7243
7244 /* zero out the 1st receive buffer */
7245
7246 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7247
7248 /* Set count field of next buffer entries to prevent */
7249 /* 16C32 from using buffers after the 1st one. */
7250
7251 info->tx_buffer_list[1].count = 0;
7252 info->rx_buffer_list[1].count = 0;
7253
7254
7255 /***************************/
7256 /* Program 16C32 receiver. */
7257 /***************************/
7258
7259 spin_lock_irqsave(&info->irq_spinlock,flags);
7260
7261 /* setup DMA transfers */
7262 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7263
7264 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7265 phys_addr = info->rx_buffer_list[0].phys_entry;
7266 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7267 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7268
7269 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7270 usc_InDmaReg( info, RDMR );
7271 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7272
7273 /* Enable Receiver (RMR <1..0> = 10) */
7274 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7275
7276 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7277
7278
7279 /*************************************************************/
7280 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7281 /*************************************************************/
7282
7283 /* Wait 100ms for interrupt. */
7284 EndTime = jiffies + msecs_to_jiffies(100);
7285
7286 for(;;) {
7287 if (time_after(jiffies, EndTime)) {
7288 rc = FALSE;
7289 break;
7290 }
7291
7292 spin_lock_irqsave(&info->irq_spinlock,flags);
7293 status = usc_InDmaReg( info, RDMR );
7294 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7295
7296 if ( !(status & BIT4) && (status & BIT5) ) {
7297 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7298 /* BUSY (BIT 5) is active (channel still active). */
7299 /* This means the buffer entry read has completed. */
7300 break;
7301 }
7302 }
7303
7304
7305 /******************************/
7306 /* Program 16C32 transmitter. */
7307 /******************************/
7308
7309 spin_lock_irqsave(&info->irq_spinlock,flags);
7310
7311 /* Program the Transmit Character Length Register (TCLR) */
7312 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7313
7314 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7315 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7316
7317 /* Program the address of the 1st DMA Buffer Entry in linked list */
7318
7319 phys_addr = info->tx_buffer_list[0].phys_entry;
7320 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7321 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7322
7323 /* unlatch Tx status bits, and start transmit channel. */
7324
7325 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7326 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7327
7328 /* wait for DMA controller to fill transmit FIFO */
7329
7330 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7331
7332 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7333
7334
7335 /**********************************/
7336 /* WAIT FOR TRANSMIT FIFO TO FILL */
7337 /**********************************/
7338
7339 /* Wait 100ms */
7340 EndTime = jiffies + msecs_to_jiffies(100);
7341
7342 for(;;) {
7343 if (time_after(jiffies, EndTime)) {
7344 rc = FALSE;
7345 break;
7346 }
7347
7348 spin_lock_irqsave(&info->irq_spinlock,flags);
7349 FifoLevel = usc_InReg(info, TICR) >> 8;
7350 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7351
7352 if ( FifoLevel < 16 )
7353 break;
7354 else
7355 if ( FrameSize < 32 ) {
7356 /* This frame is smaller than the entire transmit FIFO */
7357 /* so wait for the entire frame to be loaded. */
7358 if ( FifoLevel <= (32 - FrameSize) )
7359 break;
7360 }
7361 }
7362
7363
7364 if ( rc == TRUE )
7365 {
7366 /* Enable 16C32 transmitter. */
7367
7368 spin_lock_irqsave(&info->irq_spinlock,flags);
7369
7370 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7371 usc_TCmd( info, TCmd_SendFrame );
7372 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7373
7374 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7375
7376
7377 /******************************/
7378 /* WAIT FOR TRANSMIT COMPLETE */
7379 /******************************/
7380
7381 /* Wait 100ms */
7382 EndTime = jiffies + msecs_to_jiffies(100);
7383
7384 /* While timer not expired wait for transmit complete */
7385
7386 spin_lock_irqsave(&info->irq_spinlock,flags);
7387 status = usc_InReg( info, TCSR );
7388 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7389
7390 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7391 if (time_after(jiffies, EndTime)) {
7392 rc = FALSE;
7393 break;
7394 }
7395
7396 spin_lock_irqsave(&info->irq_spinlock,flags);
7397 status = usc_InReg( info, TCSR );
7398 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7399 }
7400 }
7401
7402
7403 if ( rc == TRUE ){
7404 /* CHECK FOR TRANSMIT ERRORS */
7405 if ( status & (BIT5 + BIT1) )
7406 rc = FALSE;
7407 }
7408
7409 if ( rc == TRUE ) {
7410 /* WAIT FOR RECEIVE COMPLETE */
7411
7412 /* Wait 100ms */
7413 EndTime = jiffies + msecs_to_jiffies(100);
7414
7415 /* Wait for 16C32 to write receive status to buffer entry. */
7416 status=info->rx_buffer_list[0].status;
7417 while ( status == 0 ) {
7418 if (time_after(jiffies, EndTime)) {
7419 rc = FALSE;
7420 break;
7421 }
7422 status=info->rx_buffer_list[0].status;
7423 }
7424 }
7425
7426
7427 if ( rc == TRUE ) {
7428 /* CHECK FOR RECEIVE ERRORS */
7429 status = info->rx_buffer_list[0].status;
7430
7431 if ( status & (BIT8 + BIT3 + BIT1) ) {
7432 /* receive error has occurred */
7433 rc = FALSE;
7434 } else {
7435 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7436 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7437 rc = FALSE;
7438 }
7439 }
7440 }
7441
7442 spin_lock_irqsave(&info->irq_spinlock,flags);
7443 usc_reset( info );
7444 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7445
7446 /* restore current port options */
7447 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7448
7449 return rc;
7450
7451} /* end of mgsl_dma_test() */
7452
7453/* mgsl_adapter_test()
7454 *
7455 * Perform the register, IRQ, and DMA tests for the 16C32.
7456 *
7457 * Arguments: info pointer to device instance data
7458 * Return Value: 0 if success, otherwise -ENODEV
7459 */
7460static int mgsl_adapter_test( struct mgsl_struct *info )
7461{
7462 if ( debug_level >= DEBUG_LEVEL_INFO )
7463 printk( "%s(%d):Testing device %s\n",
7464 __FILE__,__LINE__,info->device_name );
7465
7466 if ( !mgsl_register_test( info ) ) {
7467 info->init_error = DiagStatus_AddressFailure;
7468 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7469 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7470 return -ENODEV;
7471 }
7472
7473 if ( !mgsl_irq_test( info ) ) {
7474 info->init_error = DiagStatus_IrqFailure;
7475 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7476 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7477 return -ENODEV;
7478 }
7479
7480 if ( !mgsl_dma_test( info ) ) {
7481 info->init_error = DiagStatus_DmaFailure;
7482 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7483 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7484 return -ENODEV;
7485 }
7486
7487 if ( debug_level >= DEBUG_LEVEL_INFO )
7488 printk( "%s(%d):device %s passed diagnostics\n",
7489 __FILE__,__LINE__,info->device_name );
7490
7491 return 0;
7492
7493} /* end of mgsl_adapter_test() */
7494
7495/* mgsl_memory_test()
7496 *
7497 * Test the shared memory on a PCI adapter.
7498 *
7499 * Arguments: info pointer to device instance data
7500 * Return Value: TRUE if test passed, otherwise FALSE
7501 */
7502static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7503{
Tobias Klauserfe971072006-01-09 20:54:02 -08007504 static unsigned long BitPatterns[] =
7505 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7506 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007507 unsigned long i;
7508 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7509 unsigned long * TestAddr;
7510
7511 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7512 return TRUE;
7513
7514 TestAddr = (unsigned long *)info->memory_base;
7515
7516 /* Test data lines with test pattern at one location. */
7517
7518 for ( i = 0 ; i < Patterncount ; i++ ) {
7519 *TestAddr = BitPatterns[i];
7520 if ( *TestAddr != BitPatterns[i] )
7521 return FALSE;
7522 }
7523
7524 /* Test address lines with incrementing pattern over */
7525 /* entire address range. */
7526
7527 for ( i = 0 ; i < TestLimit ; i++ ) {
7528 *TestAddr = i * 4;
7529 TestAddr++;
7530 }
7531
7532 TestAddr = (unsigned long *)info->memory_base;
7533
7534 for ( i = 0 ; i < TestLimit ; i++ ) {
7535 if ( *TestAddr != i * 4 )
7536 return FALSE;
7537 TestAddr++;
7538 }
7539
7540 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7541
7542 return TRUE;
7543
7544} /* End Of mgsl_memory_test() */
7545
7546
7547/* mgsl_load_pci_memory()
7548 *
7549 * Load a large block of data into the PCI shared memory.
7550 * Use this instead of memcpy() or memmove() to move data
7551 * into the PCI shared memory.
7552 *
7553 * Notes:
7554 *
7555 * This function prevents the PCI9050 interface chip from hogging
7556 * the adapter local bus, which can starve the 16C32 by preventing
7557 * 16C32 bus master cycles.
7558 *
7559 * The PCI9050 documentation says that the 9050 will always release
7560 * control of the local bus after completing the current read
7561 * or write operation.
7562 *
7563 * It appears that as long as the PCI9050 write FIFO is full, the
7564 * PCI9050 treats all of the writes as a single burst transaction
7565 * and will not release the bus. This causes DMA latency problems
7566 * at high speeds when copying large data blocks to the shared
7567 * memory.
7568 *
7569 * This function in effect, breaks the a large shared memory write
7570 * into multiple transations by interleaving a shared memory read
7571 * which will flush the write FIFO and 'complete' the write
7572 * transation. This allows any pending DMA request to gain control
7573 * of the local bus in a timely fasion.
7574 *
7575 * Arguments:
7576 *
7577 * TargetPtr pointer to target address in PCI shared memory
7578 * SourcePtr pointer to source buffer for data
7579 * count count in bytes of data to copy
7580 *
7581 * Return Value: None
7582 */
7583static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7584 unsigned short count )
7585{
7586 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7587#define PCI_LOAD_INTERVAL 64
7588
7589 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7590 unsigned short Index;
7591 unsigned long Dummy;
7592
7593 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7594 {
7595 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7596 Dummy = *((volatile unsigned long *)TargetPtr);
7597 TargetPtr += PCI_LOAD_INTERVAL;
7598 SourcePtr += PCI_LOAD_INTERVAL;
7599 }
7600
7601 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7602
7603} /* End Of mgsl_load_pci_memory() */
7604
7605static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7606{
7607 int i;
7608 int linecount;
7609 if (xmit)
7610 printk("%s tx data:\n",info->device_name);
7611 else
7612 printk("%s rx data:\n",info->device_name);
7613
7614 while(count) {
7615 if (count > 16)
7616 linecount = 16;
7617 else
7618 linecount = count;
7619
7620 for(i=0;i<linecount;i++)
7621 printk("%02X ",(unsigned char)data[i]);
7622 for(;i<17;i++)
7623 printk(" ");
7624 for(i=0;i<linecount;i++) {
7625 if (data[i]>=040 && data[i]<=0176)
7626 printk("%c",data[i]);
7627 else
7628 printk(".");
7629 }
7630 printk("\n");
7631
7632 data += linecount;
7633 count -= linecount;
7634 }
7635} /* end of mgsl_trace_block() */
7636
7637/* mgsl_tx_timeout()
7638 *
7639 * called when HDLC frame times out
7640 * update stats and do tx completion processing
7641 *
7642 * Arguments: context pointer to device instance data
7643 * Return Value: None
7644 */
7645static void mgsl_tx_timeout(unsigned long context)
7646{
7647 struct mgsl_struct *info = (struct mgsl_struct*)context;
7648 unsigned long flags;
7649
7650 if ( debug_level >= DEBUG_LEVEL_INFO )
7651 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7652 __FILE__,__LINE__,info->device_name);
7653 if(info->tx_active &&
7654 (info->params.mode == MGSL_MODE_HDLC ||
7655 info->params.mode == MGSL_MODE_RAW) ) {
7656 info->icount.txtimeout++;
7657 }
7658 spin_lock_irqsave(&info->irq_spinlock,flags);
7659 info->tx_active = 0;
7660 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7661
7662 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7663 usc_loopmode_cancel_transmit( info );
7664
7665 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7666
7667#ifdef CONFIG_HDLC
7668 if (info->netcount)
7669 hdlcdev_tx_done(info);
7670 else
7671#endif
7672 mgsl_bh_transmit(info);
7673
7674} /* end of mgsl_tx_timeout() */
7675
7676/* signal that there are no more frames to send, so that
7677 * line is 'released' by echoing RxD to TxD when current
7678 * transmission is complete (or immediately if no tx in progress).
7679 */
7680static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7681{
7682 unsigned long flags;
7683
7684 spin_lock_irqsave(&info->irq_spinlock,flags);
7685 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7686 if (info->tx_active)
7687 info->loopmode_send_done_requested = TRUE;
7688 else
7689 usc_loopmode_send_done(info);
7690 }
7691 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7692
7693 return 0;
7694}
7695
7696/* release the line by echoing RxD to TxD
7697 * upon completion of a transmit frame
7698 */
7699static void usc_loopmode_send_done( struct mgsl_struct * info )
7700{
7701 info->loopmode_send_done_requested = FALSE;
7702 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7703 info->cmr_value &= ~BIT13;
7704 usc_OutReg(info, CMR, info->cmr_value);
7705}
7706
7707/* abort a transmit in progress while in HDLC LoopMode
7708 */
7709static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7710{
7711 /* reset tx dma channel and purge TxFifo */
7712 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7713 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7714 usc_loopmode_send_done( info );
7715}
7716
7717/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7718 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7719 * we must clear CMR:13 to begin repeating TxData to RxData
7720 */
7721static void usc_loopmode_insert_request( struct mgsl_struct * info )
7722{
7723 info->loopmode_insert_requested = TRUE;
7724
7725 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7726 * begin repeating TxData on RxData (complete insertion)
7727 */
7728 usc_OutReg( info, RICR,
7729 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7730
7731 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7732 info->cmr_value |= BIT13;
7733 usc_OutReg(info, CMR, info->cmr_value);
7734}
7735
7736/* return 1 if station is inserted into the loop, otherwise 0
7737 */
7738static int usc_loopmode_active( struct mgsl_struct * info)
7739{
7740 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7741}
7742
7743#ifdef CONFIG_HDLC
7744
7745/**
7746 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7747 * set encoding and frame check sequence (FCS) options
7748 *
7749 * dev pointer to network device structure
7750 * encoding serial encoding setting
7751 * parity FCS setting
7752 *
7753 * returns 0 if success, otherwise error code
7754 */
7755static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7756 unsigned short parity)
7757{
7758 struct mgsl_struct *info = dev_to_port(dev);
7759 unsigned char new_encoding;
7760 unsigned short new_crctype;
7761
7762 /* return error if TTY interface open */
7763 if (info->count)
7764 return -EBUSY;
7765
7766 switch (encoding)
7767 {
7768 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7769 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7770 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7771 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7772 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7773 default: return -EINVAL;
7774 }
7775
7776 switch (parity)
7777 {
7778 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7779 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7780 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7781 default: return -EINVAL;
7782 }
7783
7784 info->params.encoding = new_encoding;
7785 info->params.crc_type = new_crctype;;
7786
7787 /* if network interface up, reprogram hardware */
7788 if (info->netcount)
7789 mgsl_program_hw(info);
7790
7791 return 0;
7792}
7793
7794/**
7795 * called by generic HDLC layer to send frame
7796 *
7797 * skb socket buffer containing HDLC frame
7798 * dev pointer to network device structure
7799 *
7800 * returns 0 if success, otherwise error code
7801 */
7802static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7803{
7804 struct mgsl_struct *info = dev_to_port(dev);
7805 struct net_device_stats *stats = hdlc_stats(dev);
7806 unsigned long flags;
7807
7808 if (debug_level >= DEBUG_LEVEL_INFO)
7809 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7810
7811 /* stop sending until this frame completes */
7812 netif_stop_queue(dev);
7813
7814 /* copy data to device buffers */
7815 info->xmit_cnt = skb->len;
7816 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7817
7818 /* update network statistics */
7819 stats->tx_packets++;
7820 stats->tx_bytes += skb->len;
7821
7822 /* done with socket buffer, so free it */
7823 dev_kfree_skb(skb);
7824
7825 /* save start time for transmit timeout detection */
7826 dev->trans_start = jiffies;
7827
7828 /* start hardware transmitter if necessary */
7829 spin_lock_irqsave(&info->irq_spinlock,flags);
7830 if (!info->tx_active)
7831 usc_start_transmitter(info);
7832 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7833
7834 return 0;
7835}
7836
7837/**
7838 * called by network layer when interface enabled
7839 * claim resources and initialize hardware
7840 *
7841 * dev pointer to network device structure
7842 *
7843 * returns 0 if success, otherwise error code
7844 */
7845static int hdlcdev_open(struct net_device *dev)
7846{
7847 struct mgsl_struct *info = dev_to_port(dev);
7848 int rc;
7849 unsigned long flags;
7850
7851 if (debug_level >= DEBUG_LEVEL_INFO)
7852 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7853
7854 /* generic HDLC layer open processing */
7855 if ((rc = hdlc_open(dev)))
7856 return rc;
7857
7858 /* arbitrate between network and tty opens */
7859 spin_lock_irqsave(&info->netlock, flags);
7860 if (info->count != 0 || info->netcount != 0) {
7861 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7862 spin_unlock_irqrestore(&info->netlock, flags);
7863 return -EBUSY;
7864 }
7865 info->netcount=1;
7866 spin_unlock_irqrestore(&info->netlock, flags);
7867
7868 /* claim resources and init adapter */
7869 if ((rc = startup(info)) != 0) {
7870 spin_lock_irqsave(&info->netlock, flags);
7871 info->netcount=0;
7872 spin_unlock_irqrestore(&info->netlock, flags);
7873 return rc;
7874 }
7875
7876 /* assert DTR and RTS, apply hardware settings */
7877 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7878 mgsl_program_hw(info);
7879
7880 /* enable network layer transmit */
7881 dev->trans_start = jiffies;
7882 netif_start_queue(dev);
7883
7884 /* inform generic HDLC layer of current DCD status */
7885 spin_lock_irqsave(&info->irq_spinlock, flags);
7886 usc_get_serial_signals(info);
7887 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7888 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
7889
7890 return 0;
7891}
7892
7893/**
7894 * called by network layer when interface is disabled
7895 * shutdown hardware and release resources
7896 *
7897 * dev pointer to network device structure
7898 *
7899 * returns 0 if success, otherwise error code
7900 */
7901static int hdlcdev_close(struct net_device *dev)
7902{
7903 struct mgsl_struct *info = dev_to_port(dev);
7904 unsigned long flags;
7905
7906 if (debug_level >= DEBUG_LEVEL_INFO)
7907 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7908
7909 netif_stop_queue(dev);
7910
7911 /* shutdown adapter and release resources */
7912 shutdown(info);
7913
7914 hdlc_close(dev);
7915
7916 spin_lock_irqsave(&info->netlock, flags);
7917 info->netcount=0;
7918 spin_unlock_irqrestore(&info->netlock, flags);
7919
7920 return 0;
7921}
7922
7923/**
7924 * called by network layer to process IOCTL call to network device
7925 *
7926 * dev pointer to network device structure
7927 * ifr pointer to network interface request structure
7928 * cmd IOCTL command code
7929 *
7930 * returns 0 if success, otherwise error code
7931 */
7932static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7933{
7934 const size_t size = sizeof(sync_serial_settings);
7935 sync_serial_settings new_line;
7936 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7937 struct mgsl_struct *info = dev_to_port(dev);
7938 unsigned int flags;
7939
7940 if (debug_level >= DEBUG_LEVEL_INFO)
7941 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7942
7943 /* return error if TTY interface open */
7944 if (info->count)
7945 return -EBUSY;
7946
7947 if (cmd != SIOCWANDEV)
7948 return hdlc_ioctl(dev, ifr, cmd);
7949
7950 switch(ifr->ifr_settings.type) {
7951 case IF_GET_IFACE: /* return current sync_serial_settings */
7952
7953 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7954 if (ifr->ifr_settings.size < size) {
7955 ifr->ifr_settings.size = size; /* data size wanted */
7956 return -ENOBUFS;
7957 }
7958
7959 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7960 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7961 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7962 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7963
7964 switch (flags){
7965 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7966 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7967 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7968 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7969 default: new_line.clock_type = CLOCK_DEFAULT;
7970 }
7971
7972 new_line.clock_rate = info->params.clock_speed;
7973 new_line.loopback = info->params.loopback ? 1:0;
7974
7975 if (copy_to_user(line, &new_line, size))
7976 return -EFAULT;
7977 return 0;
7978
7979 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7980
7981 if(!capable(CAP_NET_ADMIN))
7982 return -EPERM;
7983 if (copy_from_user(&new_line, line, size))
7984 return -EFAULT;
7985
7986 switch (new_line.clock_type)
7987 {
7988 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7989 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7990 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7991 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7992 case CLOCK_DEFAULT: flags = info->params.flags &
7993 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7994 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7995 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7996 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7997 default: return -EINVAL;
7998 }
7999
8000 if (new_line.loopback != 0 && new_line.loopback != 1)
8001 return -EINVAL;
8002
8003 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
8004 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
8005 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
8006 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
8007 info->params.flags |= flags;
8008
8009 info->params.loopback = new_line.loopback;
8010
8011 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
8012 info->params.clock_speed = new_line.clock_rate;
8013 else
8014 info->params.clock_speed = 0;
8015
8016 /* if network interface up, reprogram hardware */
8017 if (info->netcount)
8018 mgsl_program_hw(info);
8019 return 0;
8020
8021 default:
8022 return hdlc_ioctl(dev, ifr, cmd);
8023 }
8024}
8025
8026/**
8027 * called by network layer when transmit timeout is detected
8028 *
8029 * dev pointer to network device structure
8030 */
8031static void hdlcdev_tx_timeout(struct net_device *dev)
8032{
8033 struct mgsl_struct *info = dev_to_port(dev);
8034 struct net_device_stats *stats = hdlc_stats(dev);
8035 unsigned long flags;
8036
8037 if (debug_level >= DEBUG_LEVEL_INFO)
8038 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
8039
8040 stats->tx_errors++;
8041 stats->tx_aborted_errors++;
8042
8043 spin_lock_irqsave(&info->irq_spinlock,flags);
8044 usc_stop_transmitter(info);
8045 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8046
8047 netif_wake_queue(dev);
8048}
8049
8050/**
8051 * called by device driver when transmit completes
8052 * reenable network layer transmit if stopped
8053 *
8054 * info pointer to device instance information
8055 */
8056static void hdlcdev_tx_done(struct mgsl_struct *info)
8057{
8058 if (netif_queue_stopped(info->netdev))
8059 netif_wake_queue(info->netdev);
8060}
8061
8062/**
8063 * called by device driver when frame received
8064 * pass frame to network layer
8065 *
8066 * info pointer to device instance information
8067 * buf pointer to buffer contianing frame data
8068 * size count of data bytes in buf
8069 */
8070static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8071{
8072 struct sk_buff *skb = dev_alloc_skb(size);
8073 struct net_device *dev = info->netdev;
8074 struct net_device_stats *stats = hdlc_stats(dev);
8075
8076 if (debug_level >= DEBUG_LEVEL_INFO)
8077 printk("hdlcdev_rx(%s)\n",dev->name);
8078
8079 if (skb == NULL) {
8080 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8081 stats->rx_dropped++;
8082 return;
8083 }
8084
8085 memcpy(skb_put(skb, size),buf,size);
8086
8087 skb->protocol = hdlc_type_trans(skb, info->netdev);
8088
8089 stats->rx_packets++;
8090 stats->rx_bytes += size;
8091
8092 netif_rx(skb);
8093
8094 info->netdev->last_rx = jiffies;
8095}
8096
8097/**
8098 * called by device driver when adding device instance
8099 * do generic HDLC initialization
8100 *
8101 * info pointer to device instance information
8102 *
8103 * returns 0 if success, otherwise error code
8104 */
8105static int hdlcdev_init(struct mgsl_struct *info)
8106{
8107 int rc;
8108 struct net_device *dev;
8109 hdlc_device *hdlc;
8110
8111 /* allocate and initialize network and HDLC layer objects */
8112
8113 if (!(dev = alloc_hdlcdev(info))) {
8114 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8115 return -ENOMEM;
8116 }
8117
8118 /* for network layer reporting purposes only */
8119 dev->base_addr = info->io_base;
8120 dev->irq = info->irq_level;
8121 dev->dma = info->dma_level;
8122
8123 /* network layer callbacks and settings */
8124 dev->do_ioctl = hdlcdev_ioctl;
8125 dev->open = hdlcdev_open;
8126 dev->stop = hdlcdev_close;
8127 dev->tx_timeout = hdlcdev_tx_timeout;
8128 dev->watchdog_timeo = 10*HZ;
8129 dev->tx_queue_len = 50;
8130
8131 /* generic HDLC layer callbacks and settings */
8132 hdlc = dev_to_hdlc(dev);
8133 hdlc->attach = hdlcdev_attach;
8134 hdlc->xmit = hdlcdev_xmit;
8135
8136 /* register objects with HDLC layer */
8137 if ((rc = register_hdlc_device(dev))) {
8138 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8139 free_netdev(dev);
8140 return rc;
8141 }
8142
8143 info->netdev = dev;
8144 return 0;
8145}
8146
8147/**
8148 * called by device driver when removing device instance
8149 * do generic HDLC cleanup
8150 *
8151 * info pointer to device instance information
8152 */
8153static void hdlcdev_exit(struct mgsl_struct *info)
8154{
8155 unregister_hdlc_device(info->netdev);
8156 free_netdev(info->netdev);
8157 info->netdev = NULL;
8158}
8159
8160#endif /* CONFIG_HDLC */
8161
8162
8163static int __devinit synclink_init_one (struct pci_dev *dev,
8164 const struct pci_device_id *ent)
8165{
8166 struct mgsl_struct *info;
8167
8168 if (pci_enable_device(dev)) {
8169 printk("error enabling pci device %p\n", dev);
8170 return -EIO;
8171 }
8172
8173 if (!(info = mgsl_allocate_device())) {
8174 printk("can't allocate device instance data.\n");
8175 return -EIO;
8176 }
8177
8178 /* Copy user configuration info to device instance data */
8179
8180 info->io_base = pci_resource_start(dev, 2);
8181 info->irq_level = dev->irq;
8182 info->phys_memory_base = pci_resource_start(dev, 3);
8183
8184 /* Because veremap only works on page boundaries we must map
8185 * a larger area than is actually implemented for the LCR
8186 * memory range. We map a full page starting at the page boundary.
8187 */
8188 info->phys_lcr_base = pci_resource_start(dev, 0);
8189 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8190 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8191
8192 info->bus_type = MGSL_BUS_TYPE_PCI;
8193 info->io_addr_size = 8;
8194 info->irq_flags = SA_SHIRQ;
8195
8196 if (dev->device == 0x0210) {
8197 /* Version 1 PCI9030 based universal PCI adapter */
8198 info->misc_ctrl_value = 0x007c4080;
8199 info->hw_version = 1;
8200 } else {
8201 /* Version 0 PCI9050 based 5V PCI adapter
8202 * A PCI9050 bug prevents reading LCR registers if
8203 * LCR base address bit 7 is set. Maintain shadow
8204 * value so we can write to LCR misc control reg.
8205 */
8206 info->misc_ctrl_value = 0x087e4546;
8207 info->hw_version = 0;
8208 }
8209
8210 mgsl_add_device(info);
8211
8212 return 0;
8213}
8214
8215static void __devexit synclink_remove_one (struct pci_dev *dev)
8216{
8217}
8218