blob: df782dd1098c1b61b92fc915d18f471b07dff0ad [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/char/synclink.c
3 *
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08004 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#if defined(__i386__)
57# define BREAKPOINT() asm(" int $3");
58#else
59# define BREAKPOINT() { }
60#endif
61
62#define MAX_ISA_DEVICES 10
63#define MAX_PCI_DEVICES 10
64#define MAX_TOTAL_DEVICES 20
65
66#include <linux/config.h>
67#include <linux/module.h>
68#include <linux/errno.h>
69#include <linux/signal.h>
70#include <linux/sched.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <linux/pci.h>
74#include <linux/tty.h>
75#include <linux/tty_flip.h>
76#include <linux/serial.h>
77#include <linux/major.h>
78#include <linux/string.h>
79#include <linux/fcntl.h>
80#include <linux/ptrace.h>
81#include <linux/ioport.h>
82#include <linux/mm.h>
83#include <linux/slab.h>
84#include <linux/delay.h>
85
86#include <linux/netdevice.h>
87
88#include <linux/vmalloc.h>
89#include <linux/init.h>
90#include <asm/serial.h>
91
92#include <linux/delay.h>
93#include <linux/ioctl.h>
94
95#include <asm/system.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98#include <asm/dma.h>
99#include <linux/bitops.h>
100#include <asm/types.h>
101#include <linux/termios.h>
102#include <linux/workqueue.h>
103#include <linux/hdlc.h>
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800104#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106#ifdef CONFIG_HDLC_MODULE
107#define CONFIG_HDLC 1
108#endif
109
110#define GET_USER(error,value,addr) error = get_user(value,addr)
111#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
112#define PUT_USER(error,value,addr) error = put_user(value,addr)
113#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
114
115#include <asm/uaccess.h>
116
117#include "linux/synclink.h"
118
119#define RCLRVALUE 0xffff
120
121static MGSL_PARAMS default_params = {
122 MGSL_MODE_HDLC, /* unsigned long mode */
123 0, /* unsigned char loopback; */
124 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
125 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
126 0, /* unsigned long clock_speed; */
127 0xff, /* unsigned char addr_filter; */
128 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
129 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
130 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
131 9600, /* unsigned long data_rate; */
132 8, /* unsigned char data_bits; */
133 1, /* unsigned char stop_bits; */
134 ASYNC_PARITY_NONE /* unsigned char parity; */
135};
136
137#define SHARED_MEM_ADDRESS_SIZE 0x40000
138#define BUFFERLISTSIZE (PAGE_SIZE)
139#define DMABUFFERSIZE (PAGE_SIZE)
140#define MAXRXFRAMES 7
141
142typedef struct _DMABUFFERENTRY
143{
144 u32 phys_addr; /* 32-bit flat physical address of data buffer */
Paul Fulghum4a918bc2005-09-09 13:02:12 -0700145 volatile u16 count; /* buffer size/data count */
146 volatile u16 status; /* Control/status field */
147 volatile u16 rcc; /* character count field */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 u16 reserved; /* padding required by 16C32 */
149 u32 link; /* 32-bit flat link to next buffer entry */
150 char *virt_addr; /* virtual address of data buffer */
151 u32 phys_entry; /* physical address of this buffer entry */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800152 dma_addr_t dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153} DMABUFFERENTRY, *DMAPBUFFERENTRY;
154
155/* The queue of BH actions to be performed */
156
157#define BH_RECEIVE 1
158#define BH_TRANSMIT 2
159#define BH_STATUS 4
160
161#define IO_PIN_SHUTDOWN_LIMIT 100
162
163#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
164
165struct _input_signal_events {
166 int ri_up;
167 int ri_down;
168 int dsr_up;
169 int dsr_down;
170 int dcd_up;
171 int dcd_down;
172 int cts_up;
173 int cts_down;
174};
175
176/* transmit holding buffer definitions*/
177#define MAX_TX_HOLDING_BUFFERS 5
178struct tx_holding_buffer {
179 int buffer_size;
180 unsigned char * buffer;
181};
182
183
184/*
185 * Device instance data structure
186 */
187
188struct mgsl_struct {
189 int magic;
190 int flags;
191 int count; /* count of opens */
192 int line;
193 int hw_version;
194 unsigned short close_delay;
195 unsigned short closing_wait; /* time to wait before closing */
196
197 struct mgsl_icount icount;
198
199 struct tty_struct *tty;
200 int timeout;
201 int x_char; /* xon/xoff character */
202 int blocked_open; /* # of blocked opens */
203 u16 read_status_mask;
204 u16 ignore_status_mask;
205 unsigned char *xmit_buf;
206 int xmit_head;
207 int xmit_tail;
208 int xmit_cnt;
209
210 wait_queue_head_t open_wait;
211 wait_queue_head_t close_wait;
212
213 wait_queue_head_t status_event_wait_q;
214 wait_queue_head_t event_wait_q;
215 struct timer_list tx_timer; /* HDLC transmit timeout timer */
216 struct mgsl_struct *next_device; /* device list link */
217
218 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
219 struct work_struct task; /* task structure for scheduling bh */
220
221 u32 EventMask; /* event trigger mask */
222 u32 RecordedEvents; /* pending events */
223
224 u32 max_frame_size; /* as set by device config */
225
226 u32 pending_bh;
227
228 int bh_running; /* Protection from multiple */
229 int isr_overflow;
230 int bh_requested;
231
232 int dcd_chkcount; /* check counts to prevent */
233 int cts_chkcount; /* too many IRQs if a signal */
234 int dsr_chkcount; /* is floating */
235 int ri_chkcount;
236
237 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800238 u32 buffer_list_phys;
239 dma_addr_t buffer_list_dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
242 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
243 unsigned int current_rx_buffer;
244
245 int num_tx_dma_buffers; /* number of tx dma frames required */
246 int tx_dma_buffers_used;
247 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
248 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
249 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
250 int current_tx_buffer; /* next tx dma buffer to be loaded */
251
252 unsigned char *intermediate_rxbuffer;
253
254 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
255 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
256 int put_tx_holding_index; /* next tx holding buffer to store user request */
257 int tx_holding_count; /* number of tx holding buffers waiting */
258 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
259
260 int rx_enabled;
261 int rx_overflow;
262 int rx_rcc_underrun;
263
264 int tx_enabled;
265 int tx_active;
266 u32 idle_mode;
267
268 u16 cmr_value;
269 u16 tcsr_value;
270
271 char device_name[25]; /* device instance name */
272
273 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
274 unsigned char bus; /* expansion bus number (zero based) */
275 unsigned char function; /* PCI device number */
276
277 unsigned int io_base; /* base I/O address of adapter */
278 unsigned int io_addr_size; /* size of the I/O address range */
279 int io_addr_requested; /* nonzero if I/O address requested */
280
281 unsigned int irq_level; /* interrupt level */
282 unsigned long irq_flags;
283 int irq_requested; /* nonzero if IRQ requested */
284
285 unsigned int dma_level; /* DMA channel */
286 int dma_requested; /* nonzero if dma channel requested */
287
288 u16 mbre_bit;
289 u16 loopback_bits;
290 u16 usc_idle_mode;
291
292 MGSL_PARAMS params; /* communications parameters */
293
294 unsigned char serial_signals; /* current serial signal states */
295
296 int irq_occurred; /* for diagnostics use */
297 unsigned int init_error; /* Initialization startup error (DIAGS) */
298 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
299
300 u32 last_mem_alloc;
301 unsigned char* memory_base; /* shared memory address (PCI only) */
302 u32 phys_memory_base;
303 int shared_mem_requested;
304
305 unsigned char* lcr_base; /* local config registers (PCI only) */
306 u32 phys_lcr_base;
307 u32 lcr_offset;
308 int lcr_mem_requested;
309
310 u32 misc_ctrl_value;
311 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
312 char char_buf[MAX_ASYNC_BUFFER_SIZE];
313 BOOLEAN drop_rts_on_tx_done;
314
315 BOOLEAN loopmode_insert_requested;
316 BOOLEAN loopmode_send_done_requested;
317
318 struct _input_signal_events input_signal_events;
319
320 /* generic HDLC device parts */
321 int netcount;
322 int dosyncppp;
323 spinlock_t netlock;
324
325#ifdef CONFIG_HDLC
326 struct net_device *netdev;
327#endif
328};
329
330#define MGSL_MAGIC 0x5401
331
332/*
333 * The size of the serial xmit buffer is 1 page, or 4096 bytes
334 */
335#ifndef SERIAL_XMIT_SIZE
336#define SERIAL_XMIT_SIZE 4096
337#endif
338
339/*
340 * These macros define the offsets used in calculating the
341 * I/O address of the specified USC registers.
342 */
343
344
345#define DCPIN 2 /* Bit 1 of I/O address */
346#define SDPIN 4 /* Bit 2 of I/O address */
347
348#define DCAR 0 /* DMA command/address register */
349#define CCAR SDPIN /* channel command/address register */
350#define DATAREG DCPIN + SDPIN /* serial data register */
351#define MSBONLY 0x41
352#define LSBONLY 0x40
353
354/*
355 * These macros define the register address (ordinal number)
356 * used for writing address/value pairs to the USC.
357 */
358
359#define CMR 0x02 /* Channel mode Register */
360#define CCSR 0x04 /* Channel Command/status Register */
361#define CCR 0x06 /* Channel Control Register */
362#define PSR 0x08 /* Port status Register */
363#define PCR 0x0a /* Port Control Register */
364#define TMDR 0x0c /* Test mode Data Register */
365#define TMCR 0x0e /* Test mode Control Register */
366#define CMCR 0x10 /* Clock mode Control Register */
367#define HCR 0x12 /* Hardware Configuration Register */
368#define IVR 0x14 /* Interrupt Vector Register */
369#define IOCR 0x16 /* Input/Output Control Register */
370#define ICR 0x18 /* Interrupt Control Register */
371#define DCCR 0x1a /* Daisy Chain Control Register */
372#define MISR 0x1c /* Misc Interrupt status Register */
373#define SICR 0x1e /* status Interrupt Control Register */
374#define RDR 0x20 /* Receive Data Register */
375#define RMR 0x22 /* Receive mode Register */
376#define RCSR 0x24 /* Receive Command/status Register */
377#define RICR 0x26 /* Receive Interrupt Control Register */
378#define RSR 0x28 /* Receive Sync Register */
379#define RCLR 0x2a /* Receive count Limit Register */
380#define RCCR 0x2c /* Receive Character count Register */
381#define TC0R 0x2e /* Time Constant 0 Register */
382#define TDR 0x30 /* Transmit Data Register */
383#define TMR 0x32 /* Transmit mode Register */
384#define TCSR 0x34 /* Transmit Command/status Register */
385#define TICR 0x36 /* Transmit Interrupt Control Register */
386#define TSR 0x38 /* Transmit Sync Register */
387#define TCLR 0x3a /* Transmit count Limit Register */
388#define TCCR 0x3c /* Transmit Character count Register */
389#define TC1R 0x3e /* Time Constant 1 Register */
390
391
392/*
393 * MACRO DEFINITIONS FOR DMA REGISTERS
394 */
395
396#define DCR 0x06 /* DMA Control Register (shared) */
397#define DACR 0x08 /* DMA Array count Register (shared) */
398#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
399#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
400#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
401#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
402#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
403
404#define TDMR 0x02 /* Transmit DMA mode Register */
405#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
406#define TBCR 0x2a /* Transmit Byte count Register */
407#define TARL 0x2c /* Transmit Address Register (low) */
408#define TARU 0x2e /* Transmit Address Register (high) */
409#define NTBCR 0x3a /* Next Transmit Byte count Register */
410#define NTARL 0x3c /* Next Transmit Address Register (low) */
411#define NTARU 0x3e /* Next Transmit Address Register (high) */
412
413#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
414#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
415#define RBCR 0xaa /* Receive Byte count Register */
416#define RARL 0xac /* Receive Address Register (low) */
417#define RARU 0xae /* Receive Address Register (high) */
418#define NRBCR 0xba /* Next Receive Byte count Register */
419#define NRARL 0xbc /* Next Receive Address Register (low) */
420#define NRARU 0xbe /* Next Receive Address Register (high) */
421
422
423/*
424 * MACRO DEFINITIONS FOR MODEM STATUS BITS
425 */
426
427#define MODEMSTATUS_DTR 0x80
428#define MODEMSTATUS_DSR 0x40
429#define MODEMSTATUS_RTS 0x20
430#define MODEMSTATUS_CTS 0x10
431#define MODEMSTATUS_RI 0x04
432#define MODEMSTATUS_DCD 0x01
433
434
435/*
436 * Channel Command/Address Register (CCAR) Command Codes
437 */
438
439#define RTCmd_Null 0x0000
440#define RTCmd_ResetHighestIus 0x1000
441#define RTCmd_TriggerChannelLoadDma 0x2000
442#define RTCmd_TriggerRxDma 0x2800
443#define RTCmd_TriggerTxDma 0x3000
444#define RTCmd_TriggerRxAndTxDma 0x3800
445#define RTCmd_PurgeRxFifo 0x4800
446#define RTCmd_PurgeTxFifo 0x5000
447#define RTCmd_PurgeRxAndTxFifo 0x5800
448#define RTCmd_LoadRcc 0x6800
449#define RTCmd_LoadTcc 0x7000
450#define RTCmd_LoadRccAndTcc 0x7800
451#define RTCmd_LoadTC0 0x8800
452#define RTCmd_LoadTC1 0x9000
453#define RTCmd_LoadTC0AndTC1 0x9800
454#define RTCmd_SerialDataLSBFirst 0xa000
455#define RTCmd_SerialDataMSBFirst 0xa800
456#define RTCmd_SelectBigEndian 0xb000
457#define RTCmd_SelectLittleEndian 0xb800
458
459
460/*
461 * DMA Command/Address Register (DCAR) Command Codes
462 */
463
464#define DmaCmd_Null 0x0000
465#define DmaCmd_ResetTxChannel 0x1000
466#define DmaCmd_ResetRxChannel 0x1200
467#define DmaCmd_StartTxChannel 0x2000
468#define DmaCmd_StartRxChannel 0x2200
469#define DmaCmd_ContinueTxChannel 0x3000
470#define DmaCmd_ContinueRxChannel 0x3200
471#define DmaCmd_PauseTxChannel 0x4000
472#define DmaCmd_PauseRxChannel 0x4200
473#define DmaCmd_AbortTxChannel 0x5000
474#define DmaCmd_AbortRxChannel 0x5200
475#define DmaCmd_InitTxChannel 0x7000
476#define DmaCmd_InitRxChannel 0x7200
477#define DmaCmd_ResetHighestDmaIus 0x8000
478#define DmaCmd_ResetAllChannels 0x9000
479#define DmaCmd_StartAllChannels 0xa000
480#define DmaCmd_ContinueAllChannels 0xb000
481#define DmaCmd_PauseAllChannels 0xc000
482#define DmaCmd_AbortAllChannels 0xd000
483#define DmaCmd_InitAllChannels 0xf000
484
485#define TCmd_Null 0x0000
486#define TCmd_ClearTxCRC 0x2000
487#define TCmd_SelectTicrTtsaData 0x4000
488#define TCmd_SelectTicrTxFifostatus 0x5000
489#define TCmd_SelectTicrIntLevel 0x6000
490#define TCmd_SelectTicrdma_level 0x7000
491#define TCmd_SendFrame 0x8000
492#define TCmd_SendAbort 0x9000
493#define TCmd_EnableDleInsertion 0xc000
494#define TCmd_DisableDleInsertion 0xd000
495#define TCmd_ClearEofEom 0xe000
496#define TCmd_SetEofEom 0xf000
497
498#define RCmd_Null 0x0000
499#define RCmd_ClearRxCRC 0x2000
500#define RCmd_EnterHuntmode 0x3000
501#define RCmd_SelectRicrRtsaData 0x4000
502#define RCmd_SelectRicrRxFifostatus 0x5000
503#define RCmd_SelectRicrIntLevel 0x6000
504#define RCmd_SelectRicrdma_level 0x7000
505
506/*
507 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
508 */
509
510#define RECEIVE_STATUS BIT5
511#define RECEIVE_DATA BIT4
512#define TRANSMIT_STATUS BIT3
513#define TRANSMIT_DATA BIT2
514#define IO_PIN BIT1
515#define MISC BIT0
516
517
518/*
519 * Receive status Bits in Receive Command/status Register RCSR
520 */
521
522#define RXSTATUS_SHORT_FRAME BIT8
523#define RXSTATUS_CODE_VIOLATION BIT8
524#define RXSTATUS_EXITED_HUNT BIT7
525#define RXSTATUS_IDLE_RECEIVED BIT6
526#define RXSTATUS_BREAK_RECEIVED BIT5
527#define RXSTATUS_ABORT_RECEIVED BIT5
528#define RXSTATUS_RXBOUND BIT4
529#define RXSTATUS_CRC_ERROR BIT3
530#define RXSTATUS_FRAMING_ERROR BIT3
531#define RXSTATUS_ABORT BIT2
532#define RXSTATUS_PARITY_ERROR BIT2
533#define RXSTATUS_OVERRUN BIT1
534#define RXSTATUS_DATA_AVAILABLE BIT0
535#define RXSTATUS_ALL 0x01f6
536#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
537
538/*
539 * Values for setting transmit idle mode in
540 * Transmit Control/status Register (TCSR)
541 */
542#define IDLEMODE_FLAGS 0x0000
543#define IDLEMODE_ALT_ONE_ZERO 0x0100
544#define IDLEMODE_ZERO 0x0200
545#define IDLEMODE_ONE 0x0300
546#define IDLEMODE_ALT_MARK_SPACE 0x0500
547#define IDLEMODE_SPACE 0x0600
548#define IDLEMODE_MARK 0x0700
549#define IDLEMODE_MASK 0x0700
550
551/*
552 * IUSC revision identifiers
553 */
554#define IUSC_SL1660 0x4d44
555#define IUSC_PRE_SL1660 0x4553
556
557/*
558 * Transmit status Bits in Transmit Command/status Register (TCSR)
559 */
560
561#define TCSR_PRESERVE 0x0F00
562
563#define TCSR_UNDERWAIT BIT11
564#define TXSTATUS_PREAMBLE_SENT BIT7
565#define TXSTATUS_IDLE_SENT BIT6
566#define TXSTATUS_ABORT_SENT BIT5
567#define TXSTATUS_EOF_SENT BIT4
568#define TXSTATUS_EOM_SENT BIT4
569#define TXSTATUS_CRC_SENT BIT3
570#define TXSTATUS_ALL_SENT BIT2
571#define TXSTATUS_UNDERRUN BIT1
572#define TXSTATUS_FIFO_EMPTY BIT0
573#define TXSTATUS_ALL 0x00fa
574#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
575
576
577#define MISCSTATUS_RXC_LATCHED BIT15
578#define MISCSTATUS_RXC BIT14
579#define MISCSTATUS_TXC_LATCHED BIT13
580#define MISCSTATUS_TXC BIT12
581#define MISCSTATUS_RI_LATCHED BIT11
582#define MISCSTATUS_RI BIT10
583#define MISCSTATUS_DSR_LATCHED BIT9
584#define MISCSTATUS_DSR BIT8
585#define MISCSTATUS_DCD_LATCHED BIT7
586#define MISCSTATUS_DCD BIT6
587#define MISCSTATUS_CTS_LATCHED BIT5
588#define MISCSTATUS_CTS BIT4
589#define MISCSTATUS_RCC_UNDERRUN BIT3
590#define MISCSTATUS_DPLL_NO_SYNC BIT2
591#define MISCSTATUS_BRG1_ZERO BIT1
592#define MISCSTATUS_BRG0_ZERO BIT0
593
594#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
595#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
596
597#define SICR_RXC_ACTIVE BIT15
598#define SICR_RXC_INACTIVE BIT14
599#define SICR_RXC (BIT15+BIT14)
600#define SICR_TXC_ACTIVE BIT13
601#define SICR_TXC_INACTIVE BIT12
602#define SICR_TXC (BIT13+BIT12)
603#define SICR_RI_ACTIVE BIT11
604#define SICR_RI_INACTIVE BIT10
605#define SICR_RI (BIT11+BIT10)
606#define SICR_DSR_ACTIVE BIT9
607#define SICR_DSR_INACTIVE BIT8
608#define SICR_DSR (BIT9+BIT8)
609#define SICR_DCD_ACTIVE BIT7
610#define SICR_DCD_INACTIVE BIT6
611#define SICR_DCD (BIT7+BIT6)
612#define SICR_CTS_ACTIVE BIT5
613#define SICR_CTS_INACTIVE BIT4
614#define SICR_CTS (BIT5+BIT4)
615#define SICR_RCC_UNDERFLOW BIT3
616#define SICR_DPLL_NO_SYNC BIT2
617#define SICR_BRG1_ZERO BIT1
618#define SICR_BRG0_ZERO BIT0
619
620void usc_DisableMasterIrqBit( struct mgsl_struct *info );
621void usc_EnableMasterIrqBit( struct mgsl_struct *info );
622void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
623void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
624void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
625
626#define usc_EnableInterrupts( a, b ) \
627 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
628
629#define usc_DisableInterrupts( a, b ) \
630 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
631
632#define usc_EnableMasterIrqBit(a) \
633 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
634
635#define usc_DisableMasterIrqBit(a) \
636 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
637
638#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
639
640/*
641 * Transmit status Bits in Transmit Control status Register (TCSR)
642 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
643 */
644
645#define TXSTATUS_PREAMBLE_SENT BIT7
646#define TXSTATUS_IDLE_SENT BIT6
647#define TXSTATUS_ABORT_SENT BIT5
648#define TXSTATUS_EOF BIT4
649#define TXSTATUS_CRC_SENT BIT3
650#define TXSTATUS_ALL_SENT BIT2
651#define TXSTATUS_UNDERRUN BIT1
652#define TXSTATUS_FIFO_EMPTY BIT0
653
654#define DICR_MASTER BIT15
655#define DICR_TRANSMIT BIT0
656#define DICR_RECEIVE BIT1
657
658#define usc_EnableDmaInterrupts(a,b) \
659 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
660
661#define usc_DisableDmaInterrupts(a,b) \
662 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
663
664#define usc_EnableStatusIrqs(a,b) \
665 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
666
667#define usc_DisablestatusIrqs(a,b) \
668 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
669
670/* Transmit status Bits in Transmit Control status Register (TCSR) */
671/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
672
673
674#define DISABLE_UNCONDITIONAL 0
675#define DISABLE_END_OF_FRAME 1
676#define ENABLE_UNCONDITIONAL 2
677#define ENABLE_AUTO_CTS 3
678#define ENABLE_AUTO_DCD 3
679#define usc_EnableTransmitter(a,b) \
680 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
681#define usc_EnableReceiver(a,b) \
682 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
683
684static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
685static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
686static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
687
688static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
689static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
690static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
691void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
692void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
693
694#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
695#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
696
697#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
698
699static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
700static void usc_start_receiver( struct mgsl_struct *info );
701static void usc_stop_receiver( struct mgsl_struct *info );
702
703static void usc_start_transmitter( struct mgsl_struct *info );
704static void usc_stop_transmitter( struct mgsl_struct *info );
705static void usc_set_txidle( struct mgsl_struct *info );
706static void usc_load_txfifo( struct mgsl_struct *info );
707
708static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
709static void usc_enable_loopback( struct mgsl_struct *info, int enable );
710
711static void usc_get_serial_signals( struct mgsl_struct *info );
712static void usc_set_serial_signals( struct mgsl_struct *info );
713
714static void usc_reset( struct mgsl_struct *info );
715
716static void usc_set_sync_mode( struct mgsl_struct *info );
717static void usc_set_sdlc_mode( struct mgsl_struct *info );
718static void usc_set_async_mode( struct mgsl_struct *info );
719static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
720
721static void usc_loopback_frame( struct mgsl_struct *info );
722
723static void mgsl_tx_timeout(unsigned long context);
724
725
726static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
727static void usc_loopmode_insert_request( struct mgsl_struct * info );
728static int usc_loopmode_active( struct mgsl_struct * info);
729static void usc_loopmode_send_done( struct mgsl_struct * info );
730
731static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
732
733#ifdef CONFIG_HDLC
734#define dev_to_port(D) (dev_to_hdlc(D)->priv)
735static void hdlcdev_tx_done(struct mgsl_struct *info);
736static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
737static int hdlcdev_init(struct mgsl_struct *info);
738static void hdlcdev_exit(struct mgsl_struct *info);
739#endif
740
741/*
742 * Defines a BUS descriptor value for the PCI adapter
743 * local bus address ranges.
744 */
745
746#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
747(0x00400020 + \
748((WrHold) << 30) + \
749((WrDly) << 28) + \
750((RdDly) << 26) + \
751((Nwdd) << 20) + \
752((Nwad) << 15) + \
753((Nxda) << 13) + \
754((Nrdd) << 11) + \
755((Nrad) << 6) )
756
757static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
758
759/*
760 * Adapter diagnostic routines
761 */
762static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
763static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
764static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
765static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
766static int mgsl_adapter_test( struct mgsl_struct *info );
767
768/*
769 * device and resource management routines
770 */
771static int mgsl_claim_resources(struct mgsl_struct *info);
772static void mgsl_release_resources(struct mgsl_struct *info);
773static void mgsl_add_device(struct mgsl_struct *info);
774static struct mgsl_struct* mgsl_allocate_device(void);
775
776/*
777 * DMA buffer manupulation functions.
778 */
779static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
780static int mgsl_get_rx_frame( struct mgsl_struct *info );
781static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
782static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
783static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
784static int num_free_tx_dma_buffers(struct mgsl_struct *info);
785static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
786static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
787
788/*
789 * DMA and Shared Memory buffer allocation and formatting
790 */
791static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
792static void mgsl_free_dma_buffers(struct mgsl_struct *info);
793static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
794static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
795static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
796static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
797static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
798static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
799static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
800static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
801static int load_next_tx_holding_buffer(struct mgsl_struct *info);
802static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
803
804/*
805 * Bottom half interrupt handlers
806 */
807static void mgsl_bh_handler(void* Context);
808static void mgsl_bh_receive(struct mgsl_struct *info);
809static void mgsl_bh_transmit(struct mgsl_struct *info);
810static void mgsl_bh_status(struct mgsl_struct *info);
811
812/*
813 * Interrupt handler routines and dispatch table.
814 */
815static void mgsl_isr_null( struct mgsl_struct *info );
816static void mgsl_isr_transmit_data( struct mgsl_struct *info );
817static void mgsl_isr_receive_data( struct mgsl_struct *info );
818static void mgsl_isr_receive_status( struct mgsl_struct *info );
819static void mgsl_isr_transmit_status( struct mgsl_struct *info );
820static void mgsl_isr_io_pin( struct mgsl_struct *info );
821static void mgsl_isr_misc( struct mgsl_struct *info );
822static void mgsl_isr_receive_dma( struct mgsl_struct *info );
823static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
824
825typedef void (*isr_dispatch_func)(struct mgsl_struct *);
826
827static isr_dispatch_func UscIsrTable[7] =
828{
829 mgsl_isr_null,
830 mgsl_isr_misc,
831 mgsl_isr_io_pin,
832 mgsl_isr_transmit_data,
833 mgsl_isr_transmit_status,
834 mgsl_isr_receive_data,
835 mgsl_isr_receive_status
836};
837
838/*
839 * ioctl call handlers
840 */
841static int tiocmget(struct tty_struct *tty, struct file *file);
842static int tiocmset(struct tty_struct *tty, struct file *file,
843 unsigned int set, unsigned int clear);
844static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
845 __user *user_icount);
846static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
847static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
848static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
849static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
850static int mgsl_txenable(struct mgsl_struct * info, int enable);
851static int mgsl_txabort(struct mgsl_struct * info);
852static int mgsl_rxenable(struct mgsl_struct * info, int enable);
853static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
854static int mgsl_loopmode_send_done( struct mgsl_struct * info );
855
856/* set non-zero on successful registration with PCI subsystem */
857static int pci_registered;
858
859/*
860 * Global linked list of SyncLink devices
861 */
862static struct mgsl_struct *mgsl_device_list;
863static int mgsl_device_count;
864
865/*
866 * Set this param to non-zero to load eax with the
867 * .text section address and breakpoint on module load.
868 * This is useful for use with gdb and add-symbol-file command.
869 */
870static int break_on_load;
871
872/*
873 * Driver major number, defaults to zero to get auto
874 * assigned major number. May be forced as module parameter.
875 */
876static int ttymajor;
877
878/*
879 * Array of user specified options for ISA adapters.
880 */
881static int io[MAX_ISA_DEVICES];
882static int irq[MAX_ISA_DEVICES];
883static int dma[MAX_ISA_DEVICES];
884static int debug_level;
885static int maxframe[MAX_TOTAL_DEVICES];
886static int dosyncppp[MAX_TOTAL_DEVICES];
887static int txdmabufs[MAX_TOTAL_DEVICES];
888static int txholdbufs[MAX_TOTAL_DEVICES];
889
890module_param(break_on_load, bool, 0);
891module_param(ttymajor, int, 0);
892module_param_array(io, int, NULL, 0);
893module_param_array(irq, int, NULL, 0);
894module_param_array(dma, int, NULL, 0);
895module_param(debug_level, int, 0);
896module_param_array(maxframe, int, NULL, 0);
897module_param_array(dosyncppp, int, NULL, 0);
898module_param_array(txdmabufs, int, NULL, 0);
899module_param_array(txholdbufs, int, NULL, 0);
900
901static char *driver_name = "SyncLink serial driver";
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800902static char *driver_version = "$Revision: 4.38 $";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904static int synclink_init_one (struct pci_dev *dev,
905 const struct pci_device_id *ent);
906static void synclink_remove_one (struct pci_dev *dev);
907
908static struct pci_device_id synclink_pci_tbl[] = {
909 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
910 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
911 { 0, }, /* terminate list */
912};
913MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
914
915MODULE_LICENSE("GPL");
916
917static struct pci_driver synclink_pci_driver = {
918 .name = "synclink",
919 .id_table = synclink_pci_tbl,
920 .probe = synclink_init_one,
921 .remove = __devexit_p(synclink_remove_one),
922};
923
924static struct tty_driver *serial_driver;
925
926/* number of characters left in xmit buffer before we ask for more */
927#define WAKEUP_CHARS 256
928
929
930static void mgsl_change_params(struct mgsl_struct *info);
931static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
932
933/*
934 * 1st function defined in .text section. Calling this function in
935 * init_module() followed by a breakpoint allows a remote debugger
936 * (gdb) to get the .text address for the add-symbol-file command.
937 * This allows remote debugging of dynamically loadable modules.
938 */
939static void* mgsl_get_text_ptr(void)
940{
941 return mgsl_get_text_ptr;
942}
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944static inline int mgsl_paranoia_check(struct mgsl_struct *info,
945 char *name, const char *routine)
946{
947#ifdef MGSL_PARANOIA_CHECK
948 static const char *badmagic =
949 "Warning: bad magic number for mgsl struct (%s) in %s\n";
950 static const char *badinfo =
951 "Warning: null mgsl_struct for (%s) in %s\n";
952
953 if (!info) {
954 printk(badinfo, name, routine);
955 return 1;
956 }
957 if (info->magic != MGSL_MAGIC) {
958 printk(badmagic, name, routine);
959 return 1;
960 }
961#else
962 if (!info)
963 return 1;
964#endif
965 return 0;
966}
967
968/**
969 * line discipline callback wrappers
970 *
971 * The wrappers maintain line discipline references
972 * while calling into the line discipline.
973 *
974 * ldisc_receive_buf - pass receive data to line discipline
975 */
976
977static void ldisc_receive_buf(struct tty_struct *tty,
978 const __u8 *data, char *flags, int count)
979{
980 struct tty_ldisc *ld;
981 if (!tty)
982 return;
983 ld = tty_ldisc_ref(tty);
984 if (ld) {
985 if (ld->receive_buf)
986 ld->receive_buf(tty, data, flags, count);
987 tty_ldisc_deref(ld);
988 }
989}
990
991/* mgsl_stop() throttle (stop) transmitter
992 *
993 * Arguments: tty pointer to tty info structure
994 * Return Value: None
995 */
996static void mgsl_stop(struct tty_struct *tty)
997{
998 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
999 unsigned long flags;
1000
1001 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1002 return;
1003
1004 if ( debug_level >= DEBUG_LEVEL_INFO )
1005 printk("mgsl_stop(%s)\n",info->device_name);
1006
1007 spin_lock_irqsave(&info->irq_spinlock,flags);
1008 if (info->tx_enabled)
1009 usc_stop_transmitter(info);
1010 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1011
1012} /* end of mgsl_stop() */
1013
1014/* mgsl_start() release (start) transmitter
1015 *
1016 * Arguments: tty pointer to tty info structure
1017 * Return Value: None
1018 */
1019static void mgsl_start(struct tty_struct *tty)
1020{
1021 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1022 unsigned long flags;
1023
1024 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1025 return;
1026
1027 if ( debug_level >= DEBUG_LEVEL_INFO )
1028 printk("mgsl_start(%s)\n",info->device_name);
1029
1030 spin_lock_irqsave(&info->irq_spinlock,flags);
1031 if (!info->tx_enabled)
1032 usc_start_transmitter(info);
1033 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1034
1035} /* end of mgsl_start() */
1036
1037/*
1038 * Bottom half work queue access functions
1039 */
1040
1041/* mgsl_bh_action() Return next bottom half action to perform.
1042 * Return Value: BH action code or 0 if nothing to do.
1043 */
1044static int mgsl_bh_action(struct mgsl_struct *info)
1045{
1046 unsigned long flags;
1047 int rc = 0;
1048
1049 spin_lock_irqsave(&info->irq_spinlock,flags);
1050
1051 if (info->pending_bh & BH_RECEIVE) {
1052 info->pending_bh &= ~BH_RECEIVE;
1053 rc = BH_RECEIVE;
1054 } else if (info->pending_bh & BH_TRANSMIT) {
1055 info->pending_bh &= ~BH_TRANSMIT;
1056 rc = BH_TRANSMIT;
1057 } else if (info->pending_bh & BH_STATUS) {
1058 info->pending_bh &= ~BH_STATUS;
1059 rc = BH_STATUS;
1060 }
1061
1062 if (!rc) {
1063 /* Mark BH routine as complete */
1064 info->bh_running = 0;
1065 info->bh_requested = 0;
1066 }
1067
1068 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1069
1070 return rc;
1071}
1072
1073/*
1074 * Perform bottom half processing of work items queued by ISR.
1075 */
1076static void mgsl_bh_handler(void* Context)
1077{
1078 struct mgsl_struct *info = (struct mgsl_struct*)Context;
1079 int action;
1080
1081 if (!info)
1082 return;
1083
1084 if ( debug_level >= DEBUG_LEVEL_BH )
1085 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1086 __FILE__,__LINE__,info->device_name);
1087
1088 info->bh_running = 1;
1089
1090 while((action = mgsl_bh_action(info)) != 0) {
1091
1092 /* Process work item */
1093 if ( debug_level >= DEBUG_LEVEL_BH )
1094 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1095 __FILE__,__LINE__,action);
1096
1097 switch (action) {
1098
1099 case BH_RECEIVE:
1100 mgsl_bh_receive(info);
1101 break;
1102 case BH_TRANSMIT:
1103 mgsl_bh_transmit(info);
1104 break;
1105 case BH_STATUS:
1106 mgsl_bh_status(info);
1107 break;
1108 default:
1109 /* unknown work item ID */
1110 printk("Unknown work item ID=%08X!\n", action);
1111 break;
1112 }
1113 }
1114
1115 if ( debug_level >= DEBUG_LEVEL_BH )
1116 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1117 __FILE__,__LINE__,info->device_name);
1118}
1119
1120static void mgsl_bh_receive(struct mgsl_struct *info)
1121{
1122 int (*get_rx_frame)(struct mgsl_struct *info) =
1123 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1124
1125 if ( debug_level >= DEBUG_LEVEL_BH )
1126 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1127 __FILE__,__LINE__,info->device_name);
1128
1129 do
1130 {
1131 if (info->rx_rcc_underrun) {
1132 unsigned long flags;
1133 spin_lock_irqsave(&info->irq_spinlock,flags);
1134 usc_start_receiver(info);
1135 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1136 return;
1137 }
1138 } while(get_rx_frame(info));
1139}
1140
1141static void mgsl_bh_transmit(struct mgsl_struct *info)
1142{
1143 struct tty_struct *tty = info->tty;
1144 unsigned long flags;
1145
1146 if ( debug_level >= DEBUG_LEVEL_BH )
1147 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1148 __FILE__,__LINE__,info->device_name);
1149
1150 if (tty) {
1151 tty_wakeup(tty);
1152 wake_up_interruptible(&tty->write_wait);
1153 }
1154
1155 /* if transmitter idle and loopmode_send_done_requested
1156 * then start echoing RxD to TxD
1157 */
1158 spin_lock_irqsave(&info->irq_spinlock,flags);
1159 if ( !info->tx_active && info->loopmode_send_done_requested )
1160 usc_loopmode_send_done( info );
1161 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1162}
1163
1164static void mgsl_bh_status(struct mgsl_struct *info)
1165{
1166 if ( debug_level >= DEBUG_LEVEL_BH )
1167 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1168 __FILE__,__LINE__,info->device_name);
1169
1170 info->ri_chkcount = 0;
1171 info->dsr_chkcount = 0;
1172 info->dcd_chkcount = 0;
1173 info->cts_chkcount = 0;
1174}
1175
1176/* mgsl_isr_receive_status()
1177 *
1178 * Service a receive status interrupt. The type of status
1179 * interrupt is indicated by the state of the RCSR.
1180 * This is only used for HDLC mode.
1181 *
1182 * Arguments: info pointer to device instance data
1183 * Return Value: None
1184 */
1185static void mgsl_isr_receive_status( struct mgsl_struct *info )
1186{
1187 u16 status = usc_InReg( info, RCSR );
1188
1189 if ( debug_level >= DEBUG_LEVEL_ISR )
1190 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1191 __FILE__,__LINE__,status);
1192
1193 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1194 info->loopmode_insert_requested &&
1195 usc_loopmode_active(info) )
1196 {
1197 ++info->icount.rxabort;
1198 info->loopmode_insert_requested = FALSE;
1199
1200 /* clear CMR:13 to start echoing RxD to TxD */
1201 info->cmr_value &= ~BIT13;
1202 usc_OutReg(info, CMR, info->cmr_value);
1203
1204 /* disable received abort irq (no longer required) */
1205 usc_OutReg(info, RICR,
1206 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1207 }
1208
1209 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1210 if (status & RXSTATUS_EXITED_HUNT)
1211 info->icount.exithunt++;
1212 if (status & RXSTATUS_IDLE_RECEIVED)
1213 info->icount.rxidle++;
1214 wake_up_interruptible(&info->event_wait_q);
1215 }
1216
1217 if (status & RXSTATUS_OVERRUN){
1218 info->icount.rxover++;
1219 usc_process_rxoverrun_sync( info );
1220 }
1221
1222 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1223 usc_UnlatchRxstatusBits( info, status );
1224
1225} /* end of mgsl_isr_receive_status() */
1226
1227/* mgsl_isr_transmit_status()
1228 *
1229 * Service a transmit status interrupt
1230 * HDLC mode :end of transmit frame
1231 * Async mode:all data is sent
1232 * transmit status is indicated by bits in the TCSR.
1233 *
1234 * Arguments: info pointer to device instance data
1235 * Return Value: None
1236 */
1237static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1238{
1239 u16 status = usc_InReg( info, TCSR );
1240
1241 if ( debug_level >= DEBUG_LEVEL_ISR )
1242 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1243 __FILE__,__LINE__,status);
1244
1245 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1246 usc_UnlatchTxstatusBits( info, status );
1247
1248 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1249 {
1250 /* finished sending HDLC abort. This may leave */
1251 /* the TxFifo with data from the aborted frame */
1252 /* so purge the TxFifo. Also shutdown the DMA */
1253 /* channel in case there is data remaining in */
1254 /* the DMA buffer */
1255 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1256 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1257 }
1258
1259 if ( status & TXSTATUS_EOF_SENT )
1260 info->icount.txok++;
1261 else if ( status & TXSTATUS_UNDERRUN )
1262 info->icount.txunder++;
1263 else if ( status & TXSTATUS_ABORT_SENT )
1264 info->icount.txabort++;
1265 else
1266 info->icount.txunder++;
1267
1268 info->tx_active = 0;
1269 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1270 del_timer(&info->tx_timer);
1271
1272 if ( info->drop_rts_on_tx_done ) {
1273 usc_get_serial_signals( info );
1274 if ( info->serial_signals & SerialSignal_RTS ) {
1275 info->serial_signals &= ~SerialSignal_RTS;
1276 usc_set_serial_signals( info );
1277 }
1278 info->drop_rts_on_tx_done = 0;
1279 }
1280
1281#ifdef CONFIG_HDLC
1282 if (info->netcount)
1283 hdlcdev_tx_done(info);
1284 else
1285#endif
1286 {
1287 if (info->tty->stopped || info->tty->hw_stopped) {
1288 usc_stop_transmitter(info);
1289 return;
1290 }
1291 info->pending_bh |= BH_TRANSMIT;
1292 }
1293
1294} /* end of mgsl_isr_transmit_status() */
1295
1296/* mgsl_isr_io_pin()
1297 *
1298 * Service an Input/Output pin interrupt. The type of
1299 * interrupt is indicated by bits in the MISR
1300 *
1301 * Arguments: info pointer to device instance data
1302 * Return Value: None
1303 */
1304static void mgsl_isr_io_pin( struct mgsl_struct *info )
1305{
1306 struct mgsl_icount *icount;
1307 u16 status = usc_InReg( info, MISR );
1308
1309 if ( debug_level >= DEBUG_LEVEL_ISR )
1310 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1311 __FILE__,__LINE__,status);
1312
1313 usc_ClearIrqPendingBits( info, IO_PIN );
1314 usc_UnlatchIostatusBits( info, status );
1315
1316 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1317 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1318 icount = &info->icount;
1319 /* update input line counters */
1320 if (status & MISCSTATUS_RI_LATCHED) {
1321 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1322 usc_DisablestatusIrqs(info,SICR_RI);
1323 icount->rng++;
1324 if ( status & MISCSTATUS_RI )
1325 info->input_signal_events.ri_up++;
1326 else
1327 info->input_signal_events.ri_down++;
1328 }
1329 if (status & MISCSTATUS_DSR_LATCHED) {
1330 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1331 usc_DisablestatusIrqs(info,SICR_DSR);
1332 icount->dsr++;
1333 if ( status & MISCSTATUS_DSR )
1334 info->input_signal_events.dsr_up++;
1335 else
1336 info->input_signal_events.dsr_down++;
1337 }
1338 if (status & MISCSTATUS_DCD_LATCHED) {
1339 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1340 usc_DisablestatusIrqs(info,SICR_DCD);
1341 icount->dcd++;
1342 if (status & MISCSTATUS_DCD) {
1343 info->input_signal_events.dcd_up++;
1344 } else
1345 info->input_signal_events.dcd_down++;
1346#ifdef CONFIG_HDLC
1347 if (info->netcount)
1348 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev);
1349#endif
1350 }
1351 if (status & MISCSTATUS_CTS_LATCHED)
1352 {
1353 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1354 usc_DisablestatusIrqs(info,SICR_CTS);
1355 icount->cts++;
1356 if ( status & MISCSTATUS_CTS )
1357 info->input_signal_events.cts_up++;
1358 else
1359 info->input_signal_events.cts_down++;
1360 }
1361 wake_up_interruptible(&info->status_event_wait_q);
1362 wake_up_interruptible(&info->event_wait_q);
1363
1364 if ( (info->flags & ASYNC_CHECK_CD) &&
1365 (status & MISCSTATUS_DCD_LATCHED) ) {
1366 if ( debug_level >= DEBUG_LEVEL_ISR )
1367 printk("%s CD now %s...", info->device_name,
1368 (status & MISCSTATUS_DCD) ? "on" : "off");
1369 if (status & MISCSTATUS_DCD)
1370 wake_up_interruptible(&info->open_wait);
1371 else {
1372 if ( debug_level >= DEBUG_LEVEL_ISR )
1373 printk("doing serial hangup...");
1374 if (info->tty)
1375 tty_hangup(info->tty);
1376 }
1377 }
1378
1379 if ( (info->flags & ASYNC_CTS_FLOW) &&
1380 (status & MISCSTATUS_CTS_LATCHED) ) {
1381 if (info->tty->hw_stopped) {
1382 if (status & MISCSTATUS_CTS) {
1383 if ( debug_level >= DEBUG_LEVEL_ISR )
1384 printk("CTS tx start...");
1385 if (info->tty)
1386 info->tty->hw_stopped = 0;
1387 usc_start_transmitter(info);
1388 info->pending_bh |= BH_TRANSMIT;
1389 return;
1390 }
1391 } else {
1392 if (!(status & MISCSTATUS_CTS)) {
1393 if ( debug_level >= DEBUG_LEVEL_ISR )
1394 printk("CTS tx stop...");
1395 if (info->tty)
1396 info->tty->hw_stopped = 1;
1397 usc_stop_transmitter(info);
1398 }
1399 }
1400 }
1401 }
1402
1403 info->pending_bh |= BH_STATUS;
1404
1405 /* for diagnostics set IRQ flag */
1406 if ( status & MISCSTATUS_TXC_LATCHED ){
1407 usc_OutReg( info, SICR,
1408 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1409 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1410 info->irq_occurred = 1;
1411 }
1412
1413} /* end of mgsl_isr_io_pin() */
1414
1415/* mgsl_isr_transmit_data()
1416 *
1417 * Service a transmit data interrupt (async mode only).
1418 *
1419 * Arguments: info pointer to device instance data
1420 * Return Value: None
1421 */
1422static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1423{
1424 if ( debug_level >= DEBUG_LEVEL_ISR )
1425 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1426 __FILE__,__LINE__,info->xmit_cnt);
1427
1428 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1429
1430 if (info->tty->stopped || info->tty->hw_stopped) {
1431 usc_stop_transmitter(info);
1432 return;
1433 }
1434
1435 if ( info->xmit_cnt )
1436 usc_load_txfifo( info );
1437 else
1438 info->tx_active = 0;
1439
1440 if (info->xmit_cnt < WAKEUP_CHARS)
1441 info->pending_bh |= BH_TRANSMIT;
1442
1443} /* end of mgsl_isr_transmit_data() */
1444
1445/* mgsl_isr_receive_data()
1446 *
1447 * Service a receive data interrupt. This occurs
1448 * when operating in asynchronous interrupt transfer mode.
1449 * The receive data FIFO is flushed to the receive data buffers.
1450 *
1451 * Arguments: info pointer to device instance data
1452 * Return Value: None
1453 */
1454static void mgsl_isr_receive_data( struct mgsl_struct *info )
1455{
1456 int Fifocount;
1457 u16 status;
Alan Cox33f0f882006-01-09 20:54:13 -08001458 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 unsigned char DataByte;
1460 struct tty_struct *tty = info->tty;
1461 struct mgsl_icount *icount = &info->icount;
1462
1463 if ( debug_level >= DEBUG_LEVEL_ISR )
1464 printk("%s(%d):mgsl_isr_receive_data\n",
1465 __FILE__,__LINE__);
1466
1467 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1468
1469 /* select FIFO status for RICR readback */
1470 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1471
1472 /* clear the Wordstatus bit so that status readback */
1473 /* only reflects the status of this byte */
1474 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1475
1476 /* flush the receive FIFO */
1477
1478 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
Alan Cox33f0f882006-01-09 20:54:13 -08001479 int flag;
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 /* read one byte from RxFIFO */
1482 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1483 info->io_base + CCAR );
1484 DataByte = inb( info->io_base + CCAR );
1485
1486 /* get the status of the received byte */
1487 status = usc_InReg(info, RCSR);
1488 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1489 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1490 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 icount->rx++;
1493
Alan Cox33f0f882006-01-09 20:54:13 -08001494 flag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1496 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1497 printk("rxerr=%04X\n",status);
1498 /* update error statistics */
1499 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1500 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1501 icount->brk++;
1502 } else if (status & RXSTATUS_PARITY_ERROR)
1503 icount->parity++;
1504 else if (status & RXSTATUS_FRAMING_ERROR)
1505 icount->frame++;
1506 else if (status & RXSTATUS_OVERRUN) {
1507 /* must issue purge fifo cmd before */
1508 /* 16C32 accepts more receive chars */
1509 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1510 icount->overrun++;
1511 }
1512
1513 /* discard char if tty control flags say so */
1514 if (status & info->ignore_status_mask)
1515 continue;
1516
1517 status &= info->read_status_mask;
1518
1519 if (status & RXSTATUS_BREAK_RECEIVED) {
Alan Cox33f0f882006-01-09 20:54:13 -08001520 flag = TTY_BREAK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (info->flags & ASYNC_SAK)
1522 do_SAK(tty);
1523 } else if (status & RXSTATUS_PARITY_ERROR)
Alan Cox33f0f882006-01-09 20:54:13 -08001524 flag = TTY_PARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 else if (status & RXSTATUS_FRAMING_ERROR)
Alan Cox33f0f882006-01-09 20:54:13 -08001526 flag = TTY_FRAME;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 } /* end of if (error) */
Alan Cox33f0f882006-01-09 20:54:13 -08001528 tty_insert_flip_char(tty, DataByte, flag);
1529 if (status & RXSTATUS_OVERRUN) {
1530 /* Overrun is special, since it's
1531 * reported immediately, and doesn't
1532 * affect the current character
1533 */
1534 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1535 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 }
1537
1538 if ( debug_level >= DEBUG_LEVEL_ISR ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1540 __FILE__,__LINE__,icount->rx,icount->brk,
1541 icount->parity,icount->frame,icount->overrun);
1542 }
1543
Alan Cox33f0f882006-01-09 20:54:13 -08001544 if(work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 tty_flip_buffer_push(tty);
1546}
1547
1548/* mgsl_isr_misc()
1549 *
1550 * Service a miscellaneos interrupt source.
1551 *
1552 * Arguments: info pointer to device extension (instance data)
1553 * Return Value: None
1554 */
1555static void mgsl_isr_misc( struct mgsl_struct *info )
1556{
1557 u16 status = usc_InReg( info, MISR );
1558
1559 if ( debug_level >= DEBUG_LEVEL_ISR )
1560 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1561 __FILE__,__LINE__,status);
1562
1563 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1564 (info->params.mode == MGSL_MODE_HDLC)) {
1565
1566 /* turn off receiver and rx DMA */
1567 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1568 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1569 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1570 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1571 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1572
1573 /* schedule BH handler to restart receiver */
1574 info->pending_bh |= BH_RECEIVE;
1575 info->rx_rcc_underrun = 1;
1576 }
1577
1578 usc_ClearIrqPendingBits( info, MISC );
1579 usc_UnlatchMiscstatusBits( info, status );
1580
1581} /* end of mgsl_isr_misc() */
1582
1583/* mgsl_isr_null()
1584 *
1585 * Services undefined interrupt vectors from the
1586 * USC. (hence this function SHOULD never be called)
1587 *
1588 * Arguments: info pointer to device extension (instance data)
1589 * Return Value: None
1590 */
1591static void mgsl_isr_null( struct mgsl_struct *info )
1592{
1593
1594} /* end of mgsl_isr_null() */
1595
1596/* mgsl_isr_receive_dma()
1597 *
1598 * Service a receive DMA channel interrupt.
1599 * For this driver there are two sources of receive DMA interrupts
1600 * as identified in the Receive DMA mode Register (RDMR):
1601 *
1602 * BIT3 EOA/EOL End of List, all receive buffers in receive
1603 * buffer list have been filled (no more free buffers
1604 * available). The DMA controller has shut down.
1605 *
1606 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1607 * DMA buffer is terminated in response to completion
1608 * of a good frame or a frame with errors. The status
1609 * of the frame is stored in the buffer entry in the
1610 * list of receive buffer entries.
1611 *
1612 * Arguments: info pointer to device instance data
1613 * Return Value: None
1614 */
1615static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1616{
1617 u16 status;
1618
1619 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1620 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1621
1622 /* Read the receive DMA status to identify interrupt type. */
1623 /* This also clears the status bits. */
1624 status = usc_InDmaReg( info, RDMR );
1625
1626 if ( debug_level >= DEBUG_LEVEL_ISR )
1627 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1628 __FILE__,__LINE__,info->device_name,status);
1629
1630 info->pending_bh |= BH_RECEIVE;
1631
1632 if ( status & BIT3 ) {
1633 info->rx_overflow = 1;
1634 info->icount.buf_overrun++;
1635 }
1636
1637} /* end of mgsl_isr_receive_dma() */
1638
1639/* mgsl_isr_transmit_dma()
1640 *
1641 * This function services a transmit DMA channel interrupt.
1642 *
1643 * For this driver there is one source of transmit DMA interrupts
1644 * as identified in the Transmit DMA Mode Register (TDMR):
1645 *
1646 * BIT2 EOB End of Buffer. This interrupt occurs when a
1647 * transmit DMA buffer has been emptied.
1648 *
1649 * The driver maintains enough transmit DMA buffers to hold at least
1650 * one max frame size transmit frame. When operating in a buffered
1651 * transmit mode, there may be enough transmit DMA buffers to hold at
1652 * least two or more max frame size frames. On an EOB condition,
1653 * determine if there are any queued transmit buffers and copy into
1654 * transmit DMA buffers if we have room.
1655 *
1656 * Arguments: info pointer to device instance data
1657 * Return Value: None
1658 */
1659static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1660{
1661 u16 status;
1662
1663 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1664 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1665
1666 /* Read the transmit DMA status to identify interrupt type. */
1667 /* This also clears the status bits. */
1668
1669 status = usc_InDmaReg( info, TDMR );
1670
1671 if ( debug_level >= DEBUG_LEVEL_ISR )
1672 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1673 __FILE__,__LINE__,info->device_name,status);
1674
1675 if ( status & BIT2 ) {
1676 --info->tx_dma_buffers_used;
1677
1678 /* if there are transmit frames queued,
1679 * try to load the next one
1680 */
1681 if ( load_next_tx_holding_buffer(info) ) {
1682 /* if call returns non-zero value, we have
1683 * at least one free tx holding buffer
1684 */
1685 info->pending_bh |= BH_TRANSMIT;
1686 }
1687 }
1688
1689} /* end of mgsl_isr_transmit_dma() */
1690
1691/* mgsl_interrupt()
1692 *
1693 * Interrupt service routine entry point.
1694 *
1695 * Arguments:
1696 *
1697 * irq interrupt number that caused interrupt
1698 * dev_id device ID supplied during interrupt registration
1699 * regs interrupted processor context
1700 *
1701 * Return Value: None
1702 */
1703static irqreturn_t mgsl_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1704{
1705 struct mgsl_struct * info;
1706 u16 UscVector;
1707 u16 DmaVector;
1708
1709 if ( debug_level >= DEBUG_LEVEL_ISR )
1710 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1711 __FILE__,__LINE__,irq);
1712
1713 info = (struct mgsl_struct *)dev_id;
1714 if (!info)
1715 return IRQ_NONE;
1716
1717 spin_lock(&info->irq_spinlock);
1718
1719 for(;;) {
1720 /* Read the interrupt vectors from hardware. */
1721 UscVector = usc_InReg(info, IVR) >> 9;
1722 DmaVector = usc_InDmaReg(info, DIVR);
1723
1724 if ( debug_level >= DEBUG_LEVEL_ISR )
1725 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1726 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1727
1728 if ( !UscVector && !DmaVector )
1729 break;
1730
1731 /* Dispatch interrupt vector */
1732 if ( UscVector )
1733 (*UscIsrTable[UscVector])(info);
1734 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1735 mgsl_isr_transmit_dma(info);
1736 else
1737 mgsl_isr_receive_dma(info);
1738
1739 if ( info->isr_overflow ) {
1740 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1741 __FILE__,__LINE__,info->device_name, irq);
1742 usc_DisableMasterIrqBit(info);
1743 usc_DisableDmaInterrupts(info,DICR_MASTER);
1744 break;
1745 }
1746 }
1747
1748 /* Request bottom half processing if there's something
1749 * for it to do and the bh is not already running
1750 */
1751
1752 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1753 if ( debug_level >= DEBUG_LEVEL_ISR )
1754 printk("%s(%d):%s queueing bh task.\n",
1755 __FILE__,__LINE__,info->device_name);
1756 schedule_work(&info->task);
1757 info->bh_requested = 1;
1758 }
1759
1760 spin_unlock(&info->irq_spinlock);
1761
1762 if ( debug_level >= DEBUG_LEVEL_ISR )
1763 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1764 __FILE__,__LINE__,irq);
1765 return IRQ_HANDLED;
1766} /* end of mgsl_interrupt() */
1767
1768/* startup()
1769 *
1770 * Initialize and start device.
1771 *
1772 * Arguments: info pointer to device instance data
1773 * Return Value: 0 if success, otherwise error code
1774 */
1775static int startup(struct mgsl_struct * info)
1776{
1777 int retval = 0;
1778
1779 if ( debug_level >= DEBUG_LEVEL_INFO )
1780 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1781
1782 if (info->flags & ASYNC_INITIALIZED)
1783 return 0;
1784
1785 if (!info->xmit_buf) {
1786 /* allocate a page of memory for a transmit buffer */
1787 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1788 if (!info->xmit_buf) {
1789 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1790 __FILE__,__LINE__,info->device_name);
1791 return -ENOMEM;
1792 }
1793 }
1794
1795 info->pending_bh = 0;
1796
Paul Fulghum96612392005-09-09 13:02:13 -07001797 memset(&info->icount, 0, sizeof(info->icount));
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 init_timer(&info->tx_timer);
1800 info->tx_timer.data = (unsigned long)info;
1801 info->tx_timer.function = mgsl_tx_timeout;
1802
1803 /* Allocate and claim adapter resources */
1804 retval = mgsl_claim_resources(info);
1805
1806 /* perform existence check and diagnostics */
1807 if ( !retval )
1808 retval = mgsl_adapter_test(info);
1809
1810 if ( retval ) {
1811 if (capable(CAP_SYS_ADMIN) && info->tty)
1812 set_bit(TTY_IO_ERROR, &info->tty->flags);
1813 mgsl_release_resources(info);
1814 return retval;
1815 }
1816
1817 /* program hardware for current parameters */
1818 mgsl_change_params(info);
1819
1820 if (info->tty)
1821 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1822
1823 info->flags |= ASYNC_INITIALIZED;
1824
1825 return 0;
1826
1827} /* end of startup() */
1828
1829/* shutdown()
1830 *
1831 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1832 *
1833 * Arguments: info pointer to device instance data
1834 * Return Value: None
1835 */
1836static void shutdown(struct mgsl_struct * info)
1837{
1838 unsigned long flags;
1839
1840 if (!(info->flags & ASYNC_INITIALIZED))
1841 return;
1842
1843 if (debug_level >= DEBUG_LEVEL_INFO)
1844 printk("%s(%d):mgsl_shutdown(%s)\n",
1845 __FILE__,__LINE__, info->device_name );
1846
1847 /* clear status wait queue because status changes */
1848 /* can't happen after shutting down the hardware */
1849 wake_up_interruptible(&info->status_event_wait_q);
1850 wake_up_interruptible(&info->event_wait_q);
1851
1852 del_timer(&info->tx_timer);
1853
1854 if (info->xmit_buf) {
1855 free_page((unsigned long) info->xmit_buf);
1856 info->xmit_buf = NULL;
1857 }
1858
1859 spin_lock_irqsave(&info->irq_spinlock,flags);
1860 usc_DisableMasterIrqBit(info);
1861 usc_stop_receiver(info);
1862 usc_stop_transmitter(info);
1863 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1864 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1865 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1866
1867 /* Disable DMAEN (Port 7, Bit 14) */
1868 /* This disconnects the DMA request signal from the ISA bus */
1869 /* on the ISA adapter. This has no effect for the PCI adapter */
1870 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1871
1872 /* Disable INTEN (Port 6, Bit12) */
1873 /* This disconnects the IRQ request signal to the ISA bus */
1874 /* on the ISA adapter. This has no effect for the PCI adapter */
1875 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1876
1877 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1878 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1879 usc_set_serial_signals(info);
1880 }
1881
1882 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1883
1884 mgsl_release_resources(info);
1885
1886 if (info->tty)
1887 set_bit(TTY_IO_ERROR, &info->tty->flags);
1888
1889 info->flags &= ~ASYNC_INITIALIZED;
1890
1891} /* end of shutdown() */
1892
1893static void mgsl_program_hw(struct mgsl_struct *info)
1894{
1895 unsigned long flags;
1896
1897 spin_lock_irqsave(&info->irq_spinlock,flags);
1898
1899 usc_stop_receiver(info);
1900 usc_stop_transmitter(info);
1901 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1902
1903 if (info->params.mode == MGSL_MODE_HDLC ||
1904 info->params.mode == MGSL_MODE_RAW ||
1905 info->netcount)
1906 usc_set_sync_mode(info);
1907 else
1908 usc_set_async_mode(info);
1909
1910 usc_set_serial_signals(info);
1911
1912 info->dcd_chkcount = 0;
1913 info->cts_chkcount = 0;
1914 info->ri_chkcount = 0;
1915 info->dsr_chkcount = 0;
1916
1917 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1918 usc_EnableInterrupts(info, IO_PIN);
1919 usc_get_serial_signals(info);
1920
1921 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1922 usc_start_receiver(info);
1923
1924 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1925}
1926
1927/* Reconfigure adapter based on new parameters
1928 */
1929static void mgsl_change_params(struct mgsl_struct *info)
1930{
1931 unsigned cflag;
1932 int bits_per_char;
1933
1934 if (!info->tty || !info->tty->termios)
1935 return;
1936
1937 if (debug_level >= DEBUG_LEVEL_INFO)
1938 printk("%s(%d):mgsl_change_params(%s)\n",
1939 __FILE__,__LINE__, info->device_name );
1940
1941 cflag = info->tty->termios->c_cflag;
1942
1943 /* if B0 rate (hangup) specified then negate DTR and RTS */
1944 /* otherwise assert DTR and RTS */
1945 if (cflag & CBAUD)
1946 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1947 else
1948 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1949
1950 /* byte size and parity */
1951
1952 switch (cflag & CSIZE) {
1953 case CS5: info->params.data_bits = 5; break;
1954 case CS6: info->params.data_bits = 6; break;
1955 case CS7: info->params.data_bits = 7; break;
1956 case CS8: info->params.data_bits = 8; break;
1957 /* Never happens, but GCC is too dumb to figure it out */
1958 default: info->params.data_bits = 7; break;
1959 }
1960
1961 if (cflag & CSTOPB)
1962 info->params.stop_bits = 2;
1963 else
1964 info->params.stop_bits = 1;
1965
1966 info->params.parity = ASYNC_PARITY_NONE;
1967 if (cflag & PARENB) {
1968 if (cflag & PARODD)
1969 info->params.parity = ASYNC_PARITY_ODD;
1970 else
1971 info->params.parity = ASYNC_PARITY_EVEN;
1972#ifdef CMSPAR
1973 if (cflag & CMSPAR)
1974 info->params.parity = ASYNC_PARITY_SPACE;
1975#endif
1976 }
1977
1978 /* calculate number of jiffies to transmit a full
1979 * FIFO (32 bytes) at specified data rate
1980 */
1981 bits_per_char = info->params.data_bits +
1982 info->params.stop_bits + 1;
1983
1984 /* if port data rate is set to 460800 or less then
1985 * allow tty settings to override, otherwise keep the
1986 * current data rate.
1987 */
1988 if (info->params.data_rate <= 460800)
1989 info->params.data_rate = tty_get_baud_rate(info->tty);
1990
1991 if ( info->params.data_rate ) {
1992 info->timeout = (32*HZ*bits_per_char) /
1993 info->params.data_rate;
1994 }
1995 info->timeout += HZ/50; /* Add .02 seconds of slop */
1996
1997 if (cflag & CRTSCTS)
1998 info->flags |= ASYNC_CTS_FLOW;
1999 else
2000 info->flags &= ~ASYNC_CTS_FLOW;
2001
2002 if (cflag & CLOCAL)
2003 info->flags &= ~ASYNC_CHECK_CD;
2004 else
2005 info->flags |= ASYNC_CHECK_CD;
2006
2007 /* process tty input control flags */
2008
2009 info->read_status_mask = RXSTATUS_OVERRUN;
2010 if (I_INPCK(info->tty))
2011 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2012 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2013 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2014
2015 if (I_IGNPAR(info->tty))
2016 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2017 if (I_IGNBRK(info->tty)) {
2018 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2019 /* If ignoring parity and break indicators, ignore
2020 * overruns too. (For real raw support).
2021 */
2022 if (I_IGNPAR(info->tty))
2023 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2024 }
2025
2026 mgsl_program_hw(info);
2027
2028} /* end of mgsl_change_params() */
2029
2030/* mgsl_put_char()
2031 *
2032 * Add a character to the transmit buffer.
2033 *
2034 * Arguments: tty pointer to tty information structure
2035 * ch character to add to transmit buffer
2036 *
2037 * Return Value: None
2038 */
2039static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2040{
2041 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2042 unsigned long flags;
2043
2044 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2045 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2046 __FILE__,__LINE__,ch,info->device_name);
2047 }
2048
2049 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2050 return;
2051
2052 if (!tty || !info->xmit_buf)
2053 return;
2054
2055 spin_lock_irqsave(&info->irq_spinlock,flags);
2056
2057 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2058
2059 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2060 info->xmit_buf[info->xmit_head++] = ch;
2061 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2062 info->xmit_cnt++;
2063 }
2064 }
2065
2066 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2067
2068} /* end of mgsl_put_char() */
2069
2070/* mgsl_flush_chars()
2071 *
2072 * Enable transmitter so remaining characters in the
2073 * transmit buffer are sent.
2074 *
2075 * Arguments: tty pointer to tty information structure
2076 * Return Value: None
2077 */
2078static void mgsl_flush_chars(struct tty_struct *tty)
2079{
2080 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2081 unsigned long flags;
2082
2083 if ( debug_level >= DEBUG_LEVEL_INFO )
2084 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2085 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2086
2087 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2088 return;
2089
2090 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2091 !info->xmit_buf)
2092 return;
2093
2094 if ( debug_level >= DEBUG_LEVEL_INFO )
2095 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2096 __FILE__,__LINE__,info->device_name );
2097
2098 spin_lock_irqsave(&info->irq_spinlock,flags);
2099
2100 if (!info->tx_active) {
2101 if ( (info->params.mode == MGSL_MODE_HDLC ||
2102 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2103 /* operating in synchronous (frame oriented) mode */
2104 /* copy data from circular xmit_buf to */
2105 /* transmit DMA buffer. */
2106 mgsl_load_tx_dma_buffer(info,
2107 info->xmit_buf,info->xmit_cnt);
2108 }
2109 usc_start_transmitter(info);
2110 }
2111
2112 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2113
2114} /* end of mgsl_flush_chars() */
2115
2116/* mgsl_write()
2117 *
2118 * Send a block of data
2119 *
2120 * Arguments:
2121 *
2122 * tty pointer to tty information structure
2123 * buf pointer to buffer containing send data
2124 * count size of send data in bytes
2125 *
2126 * Return Value: number of characters written
2127 */
2128static int mgsl_write(struct tty_struct * tty,
2129 const unsigned char *buf, int count)
2130{
2131 int c, ret = 0;
2132 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2133 unsigned long flags;
2134
2135 if ( debug_level >= DEBUG_LEVEL_INFO )
2136 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2137 __FILE__,__LINE__,info->device_name,count);
2138
2139 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2140 goto cleanup;
2141
Paul Fulghum86a34142006-03-28 01:56:14 -08002142 if (!tty || !info->xmit_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 goto cleanup;
2144
2145 if ( info->params.mode == MGSL_MODE_HDLC ||
2146 info->params.mode == MGSL_MODE_RAW ) {
2147 /* operating in synchronous (frame oriented) mode */
2148 /* operating in synchronous (frame oriented) mode */
2149 if (info->tx_active) {
2150
2151 if ( info->params.mode == MGSL_MODE_HDLC ) {
2152 ret = 0;
2153 goto cleanup;
2154 }
2155 /* transmitter is actively sending data -
2156 * if we have multiple transmit dma and
2157 * holding buffers, attempt to queue this
2158 * frame for transmission at a later time.
2159 */
2160 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2161 /* no tx holding buffers available */
2162 ret = 0;
2163 goto cleanup;
2164 }
2165
2166 /* queue transmit frame request */
2167 ret = count;
2168 save_tx_buffer_request(info,buf,count);
2169
2170 /* if we have sufficient tx dma buffers,
2171 * load the next buffered tx request
2172 */
2173 spin_lock_irqsave(&info->irq_spinlock,flags);
2174 load_next_tx_holding_buffer(info);
2175 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2176 goto cleanup;
2177 }
2178
2179 /* if operating in HDLC LoopMode and the adapter */
2180 /* has yet to be inserted into the loop, we can't */
2181 /* transmit */
2182
2183 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2184 !usc_loopmode_active(info) )
2185 {
2186 ret = 0;
2187 goto cleanup;
2188 }
2189
2190 if ( info->xmit_cnt ) {
2191 /* Send accumulated from send_char() calls */
2192 /* as frame and wait before accepting more data. */
2193 ret = 0;
2194
2195 /* copy data from circular xmit_buf to */
2196 /* transmit DMA buffer. */
2197 mgsl_load_tx_dma_buffer(info,
2198 info->xmit_buf,info->xmit_cnt);
2199 if ( debug_level >= DEBUG_LEVEL_INFO )
2200 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2201 __FILE__,__LINE__,info->device_name);
2202 } else {
2203 if ( debug_level >= DEBUG_LEVEL_INFO )
2204 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2205 __FILE__,__LINE__,info->device_name);
2206 ret = count;
2207 info->xmit_cnt = count;
2208 mgsl_load_tx_dma_buffer(info,buf,count);
2209 }
2210 } else {
2211 while (1) {
2212 spin_lock_irqsave(&info->irq_spinlock,flags);
2213 c = min_t(int, count,
2214 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2215 SERIAL_XMIT_SIZE - info->xmit_head));
2216 if (c <= 0) {
2217 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2218 break;
2219 }
2220 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2221 info->xmit_head = ((info->xmit_head + c) &
2222 (SERIAL_XMIT_SIZE-1));
2223 info->xmit_cnt += c;
2224 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2225 buf += c;
2226 count -= c;
2227 ret += c;
2228 }
2229 }
2230
2231 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2232 spin_lock_irqsave(&info->irq_spinlock,flags);
2233 if (!info->tx_active)
2234 usc_start_transmitter(info);
2235 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2236 }
2237cleanup:
2238 if ( debug_level >= DEBUG_LEVEL_INFO )
2239 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2240 __FILE__,__LINE__,info->device_name,ret);
2241
2242 return ret;
2243
2244} /* end of mgsl_write() */
2245
2246/* mgsl_write_room()
2247 *
2248 * Return the count of free bytes in transmit buffer
2249 *
2250 * Arguments: tty pointer to tty info structure
2251 * Return Value: None
2252 */
2253static int mgsl_write_room(struct tty_struct *tty)
2254{
2255 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2256 int ret;
2257
2258 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2259 return 0;
2260 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2261 if (ret < 0)
2262 ret = 0;
2263
2264 if (debug_level >= DEBUG_LEVEL_INFO)
2265 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2266 __FILE__,__LINE__, info->device_name,ret );
2267
2268 if ( info->params.mode == MGSL_MODE_HDLC ||
2269 info->params.mode == MGSL_MODE_RAW ) {
2270 /* operating in synchronous (frame oriented) mode */
2271 if ( info->tx_active )
2272 return 0;
2273 else
2274 return HDLC_MAX_FRAME_SIZE;
2275 }
2276
2277 return ret;
2278
2279} /* end of mgsl_write_room() */
2280
2281/* mgsl_chars_in_buffer()
2282 *
2283 * Return the count of bytes in transmit buffer
2284 *
2285 * Arguments: tty pointer to tty info structure
2286 * Return Value: None
2287 */
2288static int mgsl_chars_in_buffer(struct tty_struct *tty)
2289{
2290 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2291
2292 if (debug_level >= DEBUG_LEVEL_INFO)
2293 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2294 __FILE__,__LINE__, info->device_name );
2295
2296 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2297 return 0;
2298
2299 if (debug_level >= DEBUG_LEVEL_INFO)
2300 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2301 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2302
2303 if ( info->params.mode == MGSL_MODE_HDLC ||
2304 info->params.mode == MGSL_MODE_RAW ) {
2305 /* operating in synchronous (frame oriented) mode */
2306 if ( info->tx_active )
2307 return info->max_frame_size;
2308 else
2309 return 0;
2310 }
2311
2312 return info->xmit_cnt;
2313} /* end of mgsl_chars_in_buffer() */
2314
2315/* mgsl_flush_buffer()
2316 *
2317 * Discard all data in the send buffer
2318 *
2319 * Arguments: tty pointer to tty info structure
2320 * Return Value: None
2321 */
2322static void mgsl_flush_buffer(struct tty_struct *tty)
2323{
2324 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2325 unsigned long flags;
2326
2327 if (debug_level >= DEBUG_LEVEL_INFO)
2328 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2329 __FILE__,__LINE__, info->device_name );
2330
2331 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2332 return;
2333
2334 spin_lock_irqsave(&info->irq_spinlock,flags);
2335 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2336 del_timer(&info->tx_timer);
2337 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2338
2339 wake_up_interruptible(&tty->write_wait);
2340 tty_wakeup(tty);
2341}
2342
2343/* mgsl_send_xchar()
2344 *
2345 * Send a high-priority XON/XOFF character
2346 *
2347 * Arguments: tty pointer to tty info structure
2348 * ch character to send
2349 * Return Value: None
2350 */
2351static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2352{
2353 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2354 unsigned long flags;
2355
2356 if (debug_level >= DEBUG_LEVEL_INFO)
2357 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2358 __FILE__,__LINE__, info->device_name, ch );
2359
2360 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2361 return;
2362
2363 info->x_char = ch;
2364 if (ch) {
2365 /* Make sure transmit interrupts are on */
2366 spin_lock_irqsave(&info->irq_spinlock,flags);
2367 if (!info->tx_enabled)
2368 usc_start_transmitter(info);
2369 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2370 }
2371} /* end of mgsl_send_xchar() */
2372
2373/* mgsl_throttle()
2374 *
2375 * Signal remote device to throttle send data (our receive data)
2376 *
2377 * Arguments: tty pointer to tty info structure
2378 * Return Value: None
2379 */
2380static void mgsl_throttle(struct tty_struct * tty)
2381{
2382 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2383 unsigned long flags;
2384
2385 if (debug_level >= DEBUG_LEVEL_INFO)
2386 printk("%s(%d):mgsl_throttle(%s) entry\n",
2387 __FILE__,__LINE__, info->device_name );
2388
2389 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2390 return;
2391
2392 if (I_IXOFF(tty))
2393 mgsl_send_xchar(tty, STOP_CHAR(tty));
2394
2395 if (tty->termios->c_cflag & CRTSCTS) {
2396 spin_lock_irqsave(&info->irq_spinlock,flags);
2397 info->serial_signals &= ~SerialSignal_RTS;
2398 usc_set_serial_signals(info);
2399 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2400 }
2401} /* end of mgsl_throttle() */
2402
2403/* mgsl_unthrottle()
2404 *
2405 * Signal remote device to stop throttling send data (our receive data)
2406 *
2407 * Arguments: tty pointer to tty info structure
2408 * Return Value: None
2409 */
2410static void mgsl_unthrottle(struct tty_struct * tty)
2411{
2412 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2413 unsigned long flags;
2414
2415 if (debug_level >= DEBUG_LEVEL_INFO)
2416 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2417 __FILE__,__LINE__, info->device_name );
2418
2419 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2420 return;
2421
2422 if (I_IXOFF(tty)) {
2423 if (info->x_char)
2424 info->x_char = 0;
2425 else
2426 mgsl_send_xchar(tty, START_CHAR(tty));
2427 }
2428
2429 if (tty->termios->c_cflag & CRTSCTS) {
2430 spin_lock_irqsave(&info->irq_spinlock,flags);
2431 info->serial_signals |= SerialSignal_RTS;
2432 usc_set_serial_signals(info);
2433 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2434 }
2435
2436} /* end of mgsl_unthrottle() */
2437
2438/* mgsl_get_stats()
2439 *
2440 * get the current serial parameters information
2441 *
2442 * Arguments: info pointer to device instance data
2443 * user_icount pointer to buffer to hold returned stats
2444 *
2445 * Return Value: 0 if success, otherwise error code
2446 */
2447static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2448{
2449 int err;
2450
2451 if (debug_level >= DEBUG_LEVEL_INFO)
2452 printk("%s(%d):mgsl_get_params(%s)\n",
2453 __FILE__,__LINE__, info->device_name);
2454
Paul Fulghum96612392005-09-09 13:02:13 -07002455 if (!user_icount) {
2456 memset(&info->icount, 0, sizeof(info->icount));
2457 } else {
2458 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2459 if (err)
2460 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 }
2462
2463 return 0;
2464
2465} /* end of mgsl_get_stats() */
2466
2467/* mgsl_get_params()
2468 *
2469 * get the current serial parameters information
2470 *
2471 * Arguments: info pointer to device instance data
2472 * user_params pointer to buffer to hold returned params
2473 *
2474 * Return Value: 0 if success, otherwise error code
2475 */
2476static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2477{
2478 int err;
2479 if (debug_level >= DEBUG_LEVEL_INFO)
2480 printk("%s(%d):mgsl_get_params(%s)\n",
2481 __FILE__,__LINE__, info->device_name);
2482
2483 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2484 if (err) {
2485 if ( debug_level >= DEBUG_LEVEL_INFO )
2486 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2487 __FILE__,__LINE__,info->device_name);
2488 return -EFAULT;
2489 }
2490
2491 return 0;
2492
2493} /* end of mgsl_get_params() */
2494
2495/* mgsl_set_params()
2496 *
2497 * set the serial parameters
2498 *
2499 * Arguments:
2500 *
2501 * info pointer to device instance data
2502 * new_params user buffer containing new serial params
2503 *
2504 * Return Value: 0 if success, otherwise error code
2505 */
2506static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2507{
2508 unsigned long flags;
2509 MGSL_PARAMS tmp_params;
2510 int err;
2511
2512 if (debug_level >= DEBUG_LEVEL_INFO)
2513 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2514 info->device_name );
2515 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2516 if (err) {
2517 if ( debug_level >= DEBUG_LEVEL_INFO )
2518 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2519 __FILE__,__LINE__,info->device_name);
2520 return -EFAULT;
2521 }
2522
2523 spin_lock_irqsave(&info->irq_spinlock,flags);
2524 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2525 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2526
2527 mgsl_change_params(info);
2528
2529 return 0;
2530
2531} /* end of mgsl_set_params() */
2532
2533/* mgsl_get_txidle()
2534 *
2535 * get the current transmit idle mode
2536 *
2537 * Arguments: info pointer to device instance data
2538 * idle_mode pointer to buffer to hold returned idle mode
2539 *
2540 * Return Value: 0 if success, otherwise error code
2541 */
2542static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2543{
2544 int err;
2545
2546 if (debug_level >= DEBUG_LEVEL_INFO)
2547 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2548 __FILE__,__LINE__, info->device_name, info->idle_mode);
2549
2550 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2551 if (err) {
2552 if ( debug_level >= DEBUG_LEVEL_INFO )
2553 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2554 __FILE__,__LINE__,info->device_name);
2555 return -EFAULT;
2556 }
2557
2558 return 0;
2559
2560} /* end of mgsl_get_txidle() */
2561
2562/* mgsl_set_txidle() service ioctl to set transmit idle mode
2563 *
2564 * Arguments: info pointer to device instance data
2565 * idle_mode new idle mode
2566 *
2567 * Return Value: 0 if success, otherwise error code
2568 */
2569static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2570{
2571 unsigned long flags;
2572
2573 if (debug_level >= DEBUG_LEVEL_INFO)
2574 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2575 info->device_name, idle_mode );
2576
2577 spin_lock_irqsave(&info->irq_spinlock,flags);
2578 info->idle_mode = idle_mode;
2579 usc_set_txidle( info );
2580 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2581 return 0;
2582
2583} /* end of mgsl_set_txidle() */
2584
2585/* mgsl_txenable()
2586 *
2587 * enable or disable the transmitter
2588 *
2589 * Arguments:
2590 *
2591 * info pointer to device instance data
2592 * enable 1 = enable, 0 = disable
2593 *
2594 * Return Value: 0 if success, otherwise error code
2595 */
2596static int mgsl_txenable(struct mgsl_struct * info, int enable)
2597{
2598 unsigned long flags;
2599
2600 if (debug_level >= DEBUG_LEVEL_INFO)
2601 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2602 info->device_name, enable);
2603
2604 spin_lock_irqsave(&info->irq_spinlock,flags);
2605 if ( enable ) {
2606 if ( !info->tx_enabled ) {
2607
2608 usc_start_transmitter(info);
2609 /*--------------------------------------------------
2610 * if HDLC/SDLC Loop mode, attempt to insert the
2611 * station in the 'loop' by setting CMR:13. Upon
2612 * receipt of the next GoAhead (RxAbort) sequence,
2613 * the OnLoop indicator (CCSR:7) should go active
2614 * to indicate that we are on the loop
2615 *--------------------------------------------------*/
2616 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2617 usc_loopmode_insert_request( info );
2618 }
2619 } else {
2620 if ( info->tx_enabled )
2621 usc_stop_transmitter(info);
2622 }
2623 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2624 return 0;
2625
2626} /* end of mgsl_txenable() */
2627
2628/* mgsl_txabort() abort send HDLC frame
2629 *
2630 * Arguments: info pointer to device instance data
2631 * Return Value: 0 if success, otherwise error code
2632 */
2633static int mgsl_txabort(struct mgsl_struct * info)
2634{
2635 unsigned long flags;
2636
2637 if (debug_level >= DEBUG_LEVEL_INFO)
2638 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2639 info->device_name);
2640
2641 spin_lock_irqsave(&info->irq_spinlock,flags);
2642 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2643 {
2644 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2645 usc_loopmode_cancel_transmit( info );
2646 else
2647 usc_TCmd(info,TCmd_SendAbort);
2648 }
2649 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2650 return 0;
2651
2652} /* end of mgsl_txabort() */
2653
2654/* mgsl_rxenable() enable or disable the receiver
2655 *
2656 * Arguments: info pointer to device instance data
2657 * enable 1 = enable, 0 = disable
2658 * Return Value: 0 if success, otherwise error code
2659 */
2660static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2661{
2662 unsigned long flags;
2663
2664 if (debug_level >= DEBUG_LEVEL_INFO)
2665 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2666 info->device_name, enable);
2667
2668 spin_lock_irqsave(&info->irq_spinlock,flags);
2669 if ( enable ) {
2670 if ( !info->rx_enabled )
2671 usc_start_receiver(info);
2672 } else {
2673 if ( info->rx_enabled )
2674 usc_stop_receiver(info);
2675 }
2676 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2677 return 0;
2678
2679} /* end of mgsl_rxenable() */
2680
2681/* mgsl_wait_event() wait for specified event to occur
2682 *
2683 * Arguments: info pointer to device instance data
2684 * mask pointer to bitmask of events to wait for
2685 * Return Value: 0 if successful and bit mask updated with
2686 * of events triggerred,
2687 * otherwise error code
2688 */
2689static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2690{
2691 unsigned long flags;
2692 int s;
2693 int rc=0;
2694 struct mgsl_icount cprev, cnow;
2695 int events;
2696 int mask;
2697 struct _input_signal_events oldsigs, newsigs;
2698 DECLARE_WAITQUEUE(wait, current);
2699
2700 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2701 if (rc) {
2702 return -EFAULT;
2703 }
2704
2705 if (debug_level >= DEBUG_LEVEL_INFO)
2706 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2707 info->device_name, mask);
2708
2709 spin_lock_irqsave(&info->irq_spinlock,flags);
2710
2711 /* return immediately if state matches requested events */
2712 usc_get_serial_signals(info);
2713 s = info->serial_signals;
2714 events = mask &
2715 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2716 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2717 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2718 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2719 if (events) {
2720 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2721 goto exit;
2722 }
2723
2724 /* save current irq counts */
2725 cprev = info->icount;
2726 oldsigs = info->input_signal_events;
2727
2728 /* enable hunt and idle irqs if needed */
2729 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2730 u16 oldreg = usc_InReg(info,RICR);
2731 u16 newreg = oldreg +
2732 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2733 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2734 if (oldreg != newreg)
2735 usc_OutReg(info, RICR, newreg);
2736 }
2737
2738 set_current_state(TASK_INTERRUPTIBLE);
2739 add_wait_queue(&info->event_wait_q, &wait);
2740
2741 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2742
2743
2744 for(;;) {
2745 schedule();
2746 if (signal_pending(current)) {
2747 rc = -ERESTARTSYS;
2748 break;
2749 }
2750
2751 /* get current irq counts */
2752 spin_lock_irqsave(&info->irq_spinlock,flags);
2753 cnow = info->icount;
2754 newsigs = info->input_signal_events;
2755 set_current_state(TASK_INTERRUPTIBLE);
2756 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2757
2758 /* if no change, wait aborted for some reason */
2759 if (newsigs.dsr_up == oldsigs.dsr_up &&
2760 newsigs.dsr_down == oldsigs.dsr_down &&
2761 newsigs.dcd_up == oldsigs.dcd_up &&
2762 newsigs.dcd_down == oldsigs.dcd_down &&
2763 newsigs.cts_up == oldsigs.cts_up &&
2764 newsigs.cts_down == oldsigs.cts_down &&
2765 newsigs.ri_up == oldsigs.ri_up &&
2766 newsigs.ri_down == oldsigs.ri_down &&
2767 cnow.exithunt == cprev.exithunt &&
2768 cnow.rxidle == cprev.rxidle) {
2769 rc = -EIO;
2770 break;
2771 }
2772
2773 events = mask &
2774 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2775 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2776 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2777 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2778 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2779 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2780 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2781 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2782 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2783 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2784 if (events)
2785 break;
2786
2787 cprev = cnow;
2788 oldsigs = newsigs;
2789 }
2790
2791 remove_wait_queue(&info->event_wait_q, &wait);
2792 set_current_state(TASK_RUNNING);
2793
2794 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2795 spin_lock_irqsave(&info->irq_spinlock,flags);
2796 if (!waitqueue_active(&info->event_wait_q)) {
2797 /* disable enable exit hunt mode/idle rcvd IRQs */
2798 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2799 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2800 }
2801 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2802 }
2803exit:
2804 if ( rc == 0 )
2805 PUT_USER(rc, events, mask_ptr);
2806
2807 return rc;
2808
2809} /* end of mgsl_wait_event() */
2810
2811static int modem_input_wait(struct mgsl_struct *info,int arg)
2812{
2813 unsigned long flags;
2814 int rc;
2815 struct mgsl_icount cprev, cnow;
2816 DECLARE_WAITQUEUE(wait, current);
2817
2818 /* save current irq counts */
2819 spin_lock_irqsave(&info->irq_spinlock,flags);
2820 cprev = info->icount;
2821 add_wait_queue(&info->status_event_wait_q, &wait);
2822 set_current_state(TASK_INTERRUPTIBLE);
2823 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2824
2825 for(;;) {
2826 schedule();
2827 if (signal_pending(current)) {
2828 rc = -ERESTARTSYS;
2829 break;
2830 }
2831
2832 /* get new irq counts */
2833 spin_lock_irqsave(&info->irq_spinlock,flags);
2834 cnow = info->icount;
2835 set_current_state(TASK_INTERRUPTIBLE);
2836 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2837
2838 /* if no change, wait aborted for some reason */
2839 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2840 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2841 rc = -EIO;
2842 break;
2843 }
2844
2845 /* check for change in caller specified modem input */
2846 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2847 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2848 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2849 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2850 rc = 0;
2851 break;
2852 }
2853
2854 cprev = cnow;
2855 }
2856 remove_wait_queue(&info->status_event_wait_q, &wait);
2857 set_current_state(TASK_RUNNING);
2858 return rc;
2859}
2860
2861/* return the state of the serial control and status signals
2862 */
2863static int tiocmget(struct tty_struct *tty, struct file *file)
2864{
2865 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2866 unsigned int result;
2867 unsigned long flags;
2868
2869 spin_lock_irqsave(&info->irq_spinlock,flags);
2870 usc_get_serial_signals(info);
2871 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2872
2873 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2874 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2875 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2876 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2877 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2878 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2879
2880 if (debug_level >= DEBUG_LEVEL_INFO)
2881 printk("%s(%d):%s tiocmget() value=%08X\n",
2882 __FILE__,__LINE__, info->device_name, result );
2883 return result;
2884}
2885
2886/* set modem control signals (DTR/RTS)
2887 */
2888static int tiocmset(struct tty_struct *tty, struct file *file,
2889 unsigned int set, unsigned int clear)
2890{
2891 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2892 unsigned long flags;
2893
2894 if (debug_level >= DEBUG_LEVEL_INFO)
2895 printk("%s(%d):%s tiocmset(%x,%x)\n",
2896 __FILE__,__LINE__,info->device_name, set, clear);
2897
2898 if (set & TIOCM_RTS)
2899 info->serial_signals |= SerialSignal_RTS;
2900 if (set & TIOCM_DTR)
2901 info->serial_signals |= SerialSignal_DTR;
2902 if (clear & TIOCM_RTS)
2903 info->serial_signals &= ~SerialSignal_RTS;
2904 if (clear & TIOCM_DTR)
2905 info->serial_signals &= ~SerialSignal_DTR;
2906
2907 spin_lock_irqsave(&info->irq_spinlock,flags);
2908 usc_set_serial_signals(info);
2909 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2910
2911 return 0;
2912}
2913
2914/* mgsl_break() Set or clear transmit break condition
2915 *
2916 * Arguments: tty pointer to tty instance data
2917 * break_state -1=set break condition, 0=clear
2918 * Return Value: None
2919 */
2920static void mgsl_break(struct tty_struct *tty, int break_state)
2921{
2922 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2923 unsigned long flags;
2924
2925 if (debug_level >= DEBUG_LEVEL_INFO)
2926 printk("%s(%d):mgsl_break(%s,%d)\n",
2927 __FILE__,__LINE__, info->device_name, break_state);
2928
2929 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2930 return;
2931
2932 spin_lock_irqsave(&info->irq_spinlock,flags);
2933 if (break_state == -1)
2934 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2935 else
2936 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2937 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2938
2939} /* end of mgsl_break() */
2940
2941/* mgsl_ioctl() Service an IOCTL request
2942 *
2943 * Arguments:
2944 *
2945 * tty pointer to tty instance data
2946 * file pointer to associated file object for device
2947 * cmd IOCTL command code
2948 * arg command argument/context
2949 *
2950 * Return Value: 0 if success, otherwise error code
2951 */
2952static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2953 unsigned int cmd, unsigned long arg)
2954{
2955 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2956
2957 if (debug_level >= DEBUG_LEVEL_INFO)
2958 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2959 info->device_name, cmd );
2960
2961 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2962 return -ENODEV;
2963
2964 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2965 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2966 if (tty->flags & (1 << TTY_IO_ERROR))
2967 return -EIO;
2968 }
2969
2970 return mgsl_ioctl_common(info, cmd, arg);
2971}
2972
2973static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2974{
2975 int error;
2976 struct mgsl_icount cnow; /* kernel counter temps */
2977 void __user *argp = (void __user *)arg;
2978 struct serial_icounter_struct __user *p_cuser; /* user space */
2979 unsigned long flags;
2980
2981 switch (cmd) {
2982 case MGSL_IOCGPARAMS:
2983 return mgsl_get_params(info, argp);
2984 case MGSL_IOCSPARAMS:
2985 return mgsl_set_params(info, argp);
2986 case MGSL_IOCGTXIDLE:
2987 return mgsl_get_txidle(info, argp);
2988 case MGSL_IOCSTXIDLE:
2989 return mgsl_set_txidle(info,(int)arg);
2990 case MGSL_IOCTXENABLE:
2991 return mgsl_txenable(info,(int)arg);
2992 case MGSL_IOCRXENABLE:
2993 return mgsl_rxenable(info,(int)arg);
2994 case MGSL_IOCTXABORT:
2995 return mgsl_txabort(info);
2996 case MGSL_IOCGSTATS:
2997 return mgsl_get_stats(info, argp);
2998 case MGSL_IOCWAITEVENT:
2999 return mgsl_wait_event(info, argp);
3000 case MGSL_IOCLOOPTXDONE:
3001 return mgsl_loopmode_send_done(info);
3002 /* Wait for modem input (DCD,RI,DSR,CTS) change
3003 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3004 */
3005 case TIOCMIWAIT:
3006 return modem_input_wait(info,(int)arg);
3007
3008 /*
3009 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3010 * Return: write counters to the user passed counter struct
3011 * NB: both 1->0 and 0->1 transitions are counted except for
3012 * RI where only 0->1 is counted.
3013 */
3014 case TIOCGICOUNT:
3015 spin_lock_irqsave(&info->irq_spinlock,flags);
3016 cnow = info->icount;
3017 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3018 p_cuser = argp;
3019 PUT_USER(error,cnow.cts, &p_cuser->cts);
3020 if (error) return error;
3021 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3022 if (error) return error;
3023 PUT_USER(error,cnow.rng, &p_cuser->rng);
3024 if (error) return error;
3025 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3026 if (error) return error;
3027 PUT_USER(error,cnow.rx, &p_cuser->rx);
3028 if (error) return error;
3029 PUT_USER(error,cnow.tx, &p_cuser->tx);
3030 if (error) return error;
3031 PUT_USER(error,cnow.frame, &p_cuser->frame);
3032 if (error) return error;
3033 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3034 if (error) return error;
3035 PUT_USER(error,cnow.parity, &p_cuser->parity);
3036 if (error) return error;
3037 PUT_USER(error,cnow.brk, &p_cuser->brk);
3038 if (error) return error;
3039 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3040 if (error) return error;
3041 return 0;
3042 default:
3043 return -ENOIOCTLCMD;
3044 }
3045 return 0;
3046}
3047
3048/* mgsl_set_termios()
3049 *
3050 * Set new termios settings
3051 *
3052 * Arguments:
3053 *
3054 * tty pointer to tty structure
3055 * termios pointer to buffer to hold returned old termios
3056 *
3057 * Return Value: None
3058 */
3059static void mgsl_set_termios(struct tty_struct *tty, struct termios *old_termios)
3060{
3061 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3062 unsigned long flags;
3063
3064 if (debug_level >= DEBUG_LEVEL_INFO)
3065 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3066 tty->driver->name );
3067
3068 /* just return if nothing has changed */
3069 if ((tty->termios->c_cflag == old_termios->c_cflag)
3070 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3071 == RELEVANT_IFLAG(old_termios->c_iflag)))
3072 return;
3073
3074 mgsl_change_params(info);
3075
3076 /* Handle transition to B0 status */
3077 if (old_termios->c_cflag & CBAUD &&
3078 !(tty->termios->c_cflag & CBAUD)) {
3079 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3080 spin_lock_irqsave(&info->irq_spinlock,flags);
3081 usc_set_serial_signals(info);
3082 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3083 }
3084
3085 /* Handle transition away from B0 status */
3086 if (!(old_termios->c_cflag & CBAUD) &&
3087 tty->termios->c_cflag & CBAUD) {
3088 info->serial_signals |= SerialSignal_DTR;
3089 if (!(tty->termios->c_cflag & CRTSCTS) ||
3090 !test_bit(TTY_THROTTLED, &tty->flags)) {
3091 info->serial_signals |= SerialSignal_RTS;
3092 }
3093 spin_lock_irqsave(&info->irq_spinlock,flags);
3094 usc_set_serial_signals(info);
3095 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3096 }
3097
3098 /* Handle turning off CRTSCTS */
3099 if (old_termios->c_cflag & CRTSCTS &&
3100 !(tty->termios->c_cflag & CRTSCTS)) {
3101 tty->hw_stopped = 0;
3102 mgsl_start(tty);
3103 }
3104
3105} /* end of mgsl_set_termios() */
3106
3107/* mgsl_close()
3108 *
3109 * Called when port is closed. Wait for remaining data to be
3110 * sent. Disable port and free resources.
3111 *
3112 * Arguments:
3113 *
3114 * tty pointer to open tty structure
3115 * filp pointer to open file object
3116 *
3117 * Return Value: None
3118 */
3119static void mgsl_close(struct tty_struct *tty, struct file * filp)
3120{
3121 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3122
3123 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3124 return;
3125
3126 if (debug_level >= DEBUG_LEVEL_INFO)
3127 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3128 __FILE__,__LINE__, info->device_name, info->count);
3129
3130 if (!info->count)
3131 return;
3132
3133 if (tty_hung_up_p(filp))
3134 goto cleanup;
3135
3136 if ((tty->count == 1) && (info->count != 1)) {
3137 /*
3138 * tty->count is 1 and the tty structure will be freed.
3139 * info->count should be one in this case.
3140 * if it's not, correct it so that the port is shutdown.
3141 */
3142 printk("mgsl_close: bad refcount; tty->count is 1, "
3143 "info->count is %d\n", info->count);
3144 info->count = 1;
3145 }
3146
3147 info->count--;
3148
3149 /* if at least one open remaining, leave hardware active */
3150 if (info->count)
3151 goto cleanup;
3152
3153 info->flags |= ASYNC_CLOSING;
3154
3155 /* set tty->closing to notify line discipline to
3156 * only process XON/XOFF characters. Only the N_TTY
3157 * discipline appears to use this (ppp does not).
3158 */
3159 tty->closing = 1;
3160
3161 /* wait for transmit data to clear all layers */
3162
3163 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3164 if (debug_level >= DEBUG_LEVEL_INFO)
3165 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3166 __FILE__,__LINE__, info->device_name );
3167 tty_wait_until_sent(tty, info->closing_wait);
3168 }
3169
3170 if (info->flags & ASYNC_INITIALIZED)
3171 mgsl_wait_until_sent(tty, info->timeout);
3172
3173 if (tty->driver->flush_buffer)
3174 tty->driver->flush_buffer(tty);
3175
3176 tty_ldisc_flush(tty);
3177
3178 shutdown(info);
3179
3180 tty->closing = 0;
3181 info->tty = NULL;
3182
3183 if (info->blocked_open) {
3184 if (info->close_delay) {
3185 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3186 }
3187 wake_up_interruptible(&info->open_wait);
3188 }
3189
3190 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3191
3192 wake_up_interruptible(&info->close_wait);
3193
3194cleanup:
3195 if (debug_level >= DEBUG_LEVEL_INFO)
3196 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3197 tty->driver->name, info->count);
3198
3199} /* end of mgsl_close() */
3200
3201/* mgsl_wait_until_sent()
3202 *
3203 * Wait until the transmitter is empty.
3204 *
3205 * Arguments:
3206 *
3207 * tty pointer to tty info structure
3208 * timeout time to wait for send completion
3209 *
3210 * Return Value: None
3211 */
3212static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3213{
3214 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3215 unsigned long orig_jiffies, char_time;
3216
3217 if (!info )
3218 return;
3219
3220 if (debug_level >= DEBUG_LEVEL_INFO)
3221 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3222 __FILE__,__LINE__, info->device_name );
3223
3224 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3225 return;
3226
3227 if (!(info->flags & ASYNC_INITIALIZED))
3228 goto exit;
3229
3230 orig_jiffies = jiffies;
3231
3232 /* Set check interval to 1/5 of estimated time to
3233 * send a character, and make it at least 1. The check
3234 * interval should also be less than the timeout.
3235 * Note: use tight timings here to satisfy the NIST-PCTS.
3236 */
3237
3238 if ( info->params.data_rate ) {
3239 char_time = info->timeout/(32 * 5);
3240 if (!char_time)
3241 char_time++;
3242 } else
3243 char_time = 1;
3244
3245 if (timeout)
3246 char_time = min_t(unsigned long, char_time, timeout);
3247
3248 if ( info->params.mode == MGSL_MODE_HDLC ||
3249 info->params.mode == MGSL_MODE_RAW ) {
3250 while (info->tx_active) {
3251 msleep_interruptible(jiffies_to_msecs(char_time));
3252 if (signal_pending(current))
3253 break;
3254 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3255 break;
3256 }
3257 } else {
3258 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3259 info->tx_enabled) {
3260 msleep_interruptible(jiffies_to_msecs(char_time));
3261 if (signal_pending(current))
3262 break;
3263 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3264 break;
3265 }
3266 }
3267
3268exit:
3269 if (debug_level >= DEBUG_LEVEL_INFO)
3270 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3271 __FILE__,__LINE__, info->device_name );
3272
3273} /* end of mgsl_wait_until_sent() */
3274
3275/* mgsl_hangup()
3276 *
3277 * Called by tty_hangup() when a hangup is signaled.
3278 * This is the same as to closing all open files for the port.
3279 *
3280 * Arguments: tty pointer to associated tty object
3281 * Return Value: None
3282 */
3283static void mgsl_hangup(struct tty_struct *tty)
3284{
3285 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3286
3287 if (debug_level >= DEBUG_LEVEL_INFO)
3288 printk("%s(%d):mgsl_hangup(%s)\n",
3289 __FILE__,__LINE__, info->device_name );
3290
3291 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3292 return;
3293
3294 mgsl_flush_buffer(tty);
3295 shutdown(info);
3296
3297 info->count = 0;
3298 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3299 info->tty = NULL;
3300
3301 wake_up_interruptible(&info->open_wait);
3302
3303} /* end of mgsl_hangup() */
3304
3305/* block_til_ready()
3306 *
3307 * Block the current process until the specified port
3308 * is ready to be opened.
3309 *
3310 * Arguments:
3311 *
3312 * tty pointer to tty info structure
3313 * filp pointer to open file object
3314 * info pointer to device instance data
3315 *
3316 * Return Value: 0 if success, otherwise error code
3317 */
3318static int block_til_ready(struct tty_struct *tty, struct file * filp,
3319 struct mgsl_struct *info)
3320{
3321 DECLARE_WAITQUEUE(wait, current);
3322 int retval;
3323 int do_clocal = 0, extra_count = 0;
3324 unsigned long flags;
3325
3326 if (debug_level >= DEBUG_LEVEL_INFO)
3327 printk("%s(%d):block_til_ready on %s\n",
3328 __FILE__,__LINE__, tty->driver->name );
3329
3330 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3331 /* nonblock mode is set or port is not enabled */
3332 info->flags |= ASYNC_NORMAL_ACTIVE;
3333 return 0;
3334 }
3335
3336 if (tty->termios->c_cflag & CLOCAL)
3337 do_clocal = 1;
3338
3339 /* Wait for carrier detect and the line to become
3340 * free (i.e., not in use by the callout). While we are in
3341 * this loop, info->count is dropped by one, so that
3342 * mgsl_close() knows when to free things. We restore it upon
3343 * exit, either normal or abnormal.
3344 */
3345
3346 retval = 0;
3347 add_wait_queue(&info->open_wait, &wait);
3348
3349 if (debug_level >= DEBUG_LEVEL_INFO)
3350 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3351 __FILE__,__LINE__, tty->driver->name, info->count );
3352
3353 spin_lock_irqsave(&info->irq_spinlock, flags);
3354 if (!tty_hung_up_p(filp)) {
3355 extra_count = 1;
3356 info->count--;
3357 }
3358 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3359 info->blocked_open++;
3360
3361 while (1) {
3362 if (tty->termios->c_cflag & CBAUD) {
3363 spin_lock_irqsave(&info->irq_spinlock,flags);
3364 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3365 usc_set_serial_signals(info);
3366 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3367 }
3368
3369 set_current_state(TASK_INTERRUPTIBLE);
3370
3371 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3372 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3373 -EAGAIN : -ERESTARTSYS;
3374 break;
3375 }
3376
3377 spin_lock_irqsave(&info->irq_spinlock,flags);
3378 usc_get_serial_signals(info);
3379 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3380
3381 if (!(info->flags & ASYNC_CLOSING) &&
3382 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3383 break;
3384 }
3385
3386 if (signal_pending(current)) {
3387 retval = -ERESTARTSYS;
3388 break;
3389 }
3390
3391 if (debug_level >= DEBUG_LEVEL_INFO)
3392 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3393 __FILE__,__LINE__, tty->driver->name, info->count );
3394
3395 schedule();
3396 }
3397
3398 set_current_state(TASK_RUNNING);
3399 remove_wait_queue(&info->open_wait, &wait);
3400
3401 if (extra_count)
3402 info->count++;
3403 info->blocked_open--;
3404
3405 if (debug_level >= DEBUG_LEVEL_INFO)
3406 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3407 __FILE__,__LINE__, tty->driver->name, info->count );
3408
3409 if (!retval)
3410 info->flags |= ASYNC_NORMAL_ACTIVE;
3411
3412 return retval;
3413
3414} /* end of block_til_ready() */
3415
3416/* mgsl_open()
3417 *
3418 * Called when a port is opened. Init and enable port.
3419 * Perform serial-specific initialization for the tty structure.
3420 *
3421 * Arguments: tty pointer to tty info structure
3422 * filp associated file pointer
3423 *
3424 * Return Value: 0 if success, otherwise error code
3425 */
3426static int mgsl_open(struct tty_struct *tty, struct file * filp)
3427{
3428 struct mgsl_struct *info;
3429 int retval, line;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 unsigned long flags;
3431
3432 /* verify range of specified line number */
3433 line = tty->index;
3434 if ((line < 0) || (line >= mgsl_device_count)) {
3435 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3436 __FILE__,__LINE__,line);
3437 return -ENODEV;
3438 }
3439
3440 /* find the info structure for the specified line */
3441 info = mgsl_device_list;
3442 while(info && info->line != line)
3443 info = info->next_device;
3444 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3445 return -ENODEV;
3446
3447 tty->driver_data = info;
3448 info->tty = tty;
3449
3450 if (debug_level >= DEBUG_LEVEL_INFO)
3451 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3452 __FILE__,__LINE__,tty->driver->name, info->count);
3453
3454 /* If port is closing, signal caller to try again */
3455 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3456 if (info->flags & ASYNC_CLOSING)
3457 interruptible_sleep_on(&info->close_wait);
3458 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3459 -EAGAIN : -ERESTARTSYS);
3460 goto cleanup;
3461 }
3462
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3464
3465 spin_lock_irqsave(&info->netlock, flags);
3466 if (info->netcount) {
3467 retval = -EBUSY;
3468 spin_unlock_irqrestore(&info->netlock, flags);
3469 goto cleanup;
3470 }
3471 info->count++;
3472 spin_unlock_irqrestore(&info->netlock, flags);
3473
3474 if (info->count == 1) {
3475 /* 1st open on this device, init hardware */
3476 retval = startup(info);
3477 if (retval < 0)
3478 goto cleanup;
3479 }
3480
3481 retval = block_til_ready(tty, filp, info);
3482 if (retval) {
3483 if (debug_level >= DEBUG_LEVEL_INFO)
3484 printk("%s(%d):block_til_ready(%s) returned %d\n",
3485 __FILE__,__LINE__, info->device_name, retval);
3486 goto cleanup;
3487 }
3488
3489 if (debug_level >= DEBUG_LEVEL_INFO)
3490 printk("%s(%d):mgsl_open(%s) success\n",
3491 __FILE__,__LINE__, info->device_name);
3492 retval = 0;
3493
3494cleanup:
3495 if (retval) {
3496 if (tty->count == 1)
3497 info->tty = NULL; /* tty layer will release tty struct */
3498 if(info->count)
3499 info->count--;
3500 }
3501
3502 return retval;
3503
3504} /* end of mgsl_open() */
3505
3506/*
3507 * /proc fs routines....
3508 */
3509
3510static inline int line_info(char *buf, struct mgsl_struct *info)
3511{
3512 char stat_buf[30];
3513 int ret;
3514 unsigned long flags;
3515
3516 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3517 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3518 info->device_name, info->io_base, info->irq_level,
3519 info->phys_memory_base, info->phys_lcr_base);
3520 } else {
3521 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3522 info->device_name, info->io_base,
3523 info->irq_level, info->dma_level);
3524 }
3525
3526 /* output current serial signal states */
3527 spin_lock_irqsave(&info->irq_spinlock,flags);
3528 usc_get_serial_signals(info);
3529 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3530
3531 stat_buf[0] = 0;
3532 stat_buf[1] = 0;
3533 if (info->serial_signals & SerialSignal_RTS)
3534 strcat(stat_buf, "|RTS");
3535 if (info->serial_signals & SerialSignal_CTS)
3536 strcat(stat_buf, "|CTS");
3537 if (info->serial_signals & SerialSignal_DTR)
3538 strcat(stat_buf, "|DTR");
3539 if (info->serial_signals & SerialSignal_DSR)
3540 strcat(stat_buf, "|DSR");
3541 if (info->serial_signals & SerialSignal_DCD)
3542 strcat(stat_buf, "|CD");
3543 if (info->serial_signals & SerialSignal_RI)
3544 strcat(stat_buf, "|RI");
3545
3546 if (info->params.mode == MGSL_MODE_HDLC ||
3547 info->params.mode == MGSL_MODE_RAW ) {
3548 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3549 info->icount.txok, info->icount.rxok);
3550 if (info->icount.txunder)
3551 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3552 if (info->icount.txabort)
3553 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3554 if (info->icount.rxshort)
3555 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3556 if (info->icount.rxlong)
3557 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3558 if (info->icount.rxover)
3559 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3560 if (info->icount.rxcrc)
3561 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3562 } else {
3563 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3564 info->icount.tx, info->icount.rx);
3565 if (info->icount.frame)
3566 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3567 if (info->icount.parity)
3568 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3569 if (info->icount.brk)
3570 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3571 if (info->icount.overrun)
3572 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3573 }
3574
3575 /* Append serial signal status to end */
3576 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3577
3578 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3579 info->tx_active,info->bh_requested,info->bh_running,
3580 info->pending_bh);
3581
3582 spin_lock_irqsave(&info->irq_spinlock,flags);
3583 {
3584 u16 Tcsr = usc_InReg( info, TCSR );
3585 u16 Tdmr = usc_InDmaReg( info, TDMR );
3586 u16 Ticr = usc_InReg( info, TICR );
3587 u16 Rscr = usc_InReg( info, RCSR );
3588 u16 Rdmr = usc_InDmaReg( info, RDMR );
3589 u16 Ricr = usc_InReg( info, RICR );
3590 u16 Icr = usc_InReg( info, ICR );
3591 u16 Dccr = usc_InReg( info, DCCR );
3592 u16 Tmr = usc_InReg( info, TMR );
3593 u16 Tccr = usc_InReg( info, TCCR );
3594 u16 Ccar = inw( info->io_base + CCAR );
3595 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3596 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3597 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3598 }
3599 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3600
3601 return ret;
3602
3603} /* end of line_info() */
3604
3605/* mgsl_read_proc()
3606 *
3607 * Called to print information about devices
3608 *
3609 * Arguments:
3610 * page page of memory to hold returned info
3611 * start
3612 * off
3613 * count
3614 * eof
3615 * data
3616 *
3617 * Return Value:
3618 */
3619static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3620 int *eof, void *data)
3621{
3622 int len = 0, l;
3623 off_t begin = 0;
3624 struct mgsl_struct *info;
3625
3626 len += sprintf(page, "synclink driver:%s\n", driver_version);
3627
3628 info = mgsl_device_list;
3629 while( info ) {
3630 l = line_info(page + len, info);
3631 len += l;
3632 if (len+begin > off+count)
3633 goto done;
3634 if (len+begin < off) {
3635 begin += len;
3636 len = 0;
3637 }
3638 info = info->next_device;
3639 }
3640
3641 *eof = 1;
3642done:
3643 if (off >= len+begin)
3644 return 0;
3645 *start = page + (off-begin);
3646 return ((count < begin+len-off) ? count : begin+len-off);
3647
3648} /* end of mgsl_read_proc() */
3649
3650/* mgsl_allocate_dma_buffers()
3651 *
3652 * Allocate and format DMA buffers (ISA adapter)
3653 * or format shared memory buffers (PCI adapter).
3654 *
3655 * Arguments: info pointer to device instance data
3656 * Return Value: 0 if success, otherwise error
3657 */
3658static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3659{
3660 unsigned short BuffersPerFrame;
3661
3662 info->last_mem_alloc = 0;
3663
3664 /* Calculate the number of DMA buffers necessary to hold the */
3665 /* largest allowable frame size. Note: If the max frame size is */
3666 /* not an even multiple of the DMA buffer size then we need to */
3667 /* round the buffer count per frame up one. */
3668
3669 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3670 if ( info->max_frame_size % DMABUFFERSIZE )
3671 BuffersPerFrame++;
3672
3673 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3674 /*
3675 * The PCI adapter has 256KBytes of shared memory to use.
3676 * This is 64 PAGE_SIZE buffers.
3677 *
3678 * The first page is used for padding at this time so the
3679 * buffer list does not begin at offset 0 of the PCI
3680 * adapter's shared memory.
3681 *
3682 * The 2nd page is used for the buffer list. A 4K buffer
3683 * list can hold 128 DMA_BUFFER structures at 32 bytes
3684 * each.
3685 *
3686 * This leaves 62 4K pages.
3687 *
3688 * The next N pages are used for transmit frame(s). We
3689 * reserve enough 4K page blocks to hold the required
3690 * number of transmit dma buffers (num_tx_dma_buffers),
3691 * each of MaxFrameSize size.
3692 *
3693 * Of the remaining pages (62-N), determine how many can
3694 * be used to receive full MaxFrameSize inbound frames
3695 */
3696 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3697 info->rx_buffer_count = 62 - info->tx_buffer_count;
3698 } else {
3699 /* Calculate the number of PAGE_SIZE buffers needed for */
3700 /* receive and transmit DMA buffers. */
3701
3702
3703 /* Calculate the number of DMA buffers necessary to */
3704 /* hold 7 max size receive frames and one max size transmit frame. */
3705 /* The receive buffer count is bumped by one so we avoid an */
3706 /* End of List condition if all receive buffers are used when */
3707 /* using linked list DMA buffers. */
3708
3709 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3710 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3711
3712 /*
3713 * limit total TxBuffers & RxBuffers to 62 4K total
3714 * (ala PCI Allocation)
3715 */
3716
3717 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3718 info->rx_buffer_count = 62 - info->tx_buffer_count;
3719
3720 }
3721
3722 if ( debug_level >= DEBUG_LEVEL_INFO )
3723 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3724 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3725
3726 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3727 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3728 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3729 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3730 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3731 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3732 return -ENOMEM;
3733 }
3734
3735 mgsl_reset_rx_dma_buffers( info );
3736 mgsl_reset_tx_dma_buffers( info );
3737
3738 return 0;
3739
3740} /* end of mgsl_allocate_dma_buffers() */
3741
3742/*
3743 * mgsl_alloc_buffer_list_memory()
3744 *
3745 * Allocate a common DMA buffer for use as the
3746 * receive and transmit buffer lists.
3747 *
3748 * A buffer list is a set of buffer entries where each entry contains
3749 * a pointer to an actual buffer and a pointer to the next buffer entry
3750 * (plus some other info about the buffer).
3751 *
3752 * The buffer entries for a list are built to form a circular list so
3753 * that when the entire list has been traversed you start back at the
3754 * beginning.
3755 *
3756 * This function allocates memory for just the buffer entries.
3757 * The links (pointer to next entry) are filled in with the physical
3758 * address of the next entry so the adapter can navigate the list
3759 * using bus master DMA. The pointers to the actual buffers are filled
3760 * out later when the actual buffers are allocated.
3761 *
3762 * Arguments: info pointer to device instance data
3763 * Return Value: 0 if success, otherwise error
3764 */
3765static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3766{
3767 unsigned int i;
3768
3769 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3770 /* PCI adapter uses shared memory. */
3771 info->buffer_list = info->memory_base + info->last_mem_alloc;
3772 info->buffer_list_phys = info->last_mem_alloc;
3773 info->last_mem_alloc += BUFFERLISTSIZE;
3774 } else {
3775 /* ISA adapter uses system memory. */
3776 /* The buffer lists are allocated as a common buffer that both */
3777 /* the processor and adapter can access. This allows the driver to */
3778 /* inspect portions of the buffer while other portions are being */
3779 /* updated by the adapter using Bus Master DMA. */
3780
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003781 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3782 if (info->buffer_list == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 return -ENOMEM;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003784 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 }
3786
3787 /* We got the memory for the buffer entry lists. */
3788 /* Initialize the memory block to all zeros. */
3789 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3790
3791 /* Save virtual address pointers to the receive and */
3792 /* transmit buffer lists. (Receive 1st). These pointers will */
3793 /* be used by the processor to access the lists. */
3794 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3795 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3796 info->tx_buffer_list += info->rx_buffer_count;
3797
3798 /*
3799 * Build the links for the buffer entry lists such that
3800 * two circular lists are built. (Transmit and Receive).
3801 *
3802 * Note: the links are physical addresses
3803 * which are read by the adapter to determine the next
3804 * buffer entry to use.
3805 */
3806
3807 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3808 /* calculate and store physical address of this buffer entry */
3809 info->rx_buffer_list[i].phys_entry =
3810 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3811
3812 /* calculate and store physical address of */
3813 /* next entry in cirular list of entries */
3814
3815 info->rx_buffer_list[i].link = info->buffer_list_phys;
3816
3817 if ( i < info->rx_buffer_count - 1 )
3818 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3819 }
3820
3821 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3822 /* calculate and store physical address of this buffer entry */
3823 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3824 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3825
3826 /* calculate and store physical address of */
3827 /* next entry in cirular list of entries */
3828
3829 info->tx_buffer_list[i].link = info->buffer_list_phys +
3830 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3831
3832 if ( i < info->tx_buffer_count - 1 )
3833 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3834 }
3835
3836 return 0;
3837
3838} /* end of mgsl_alloc_buffer_list_memory() */
3839
3840/* Free DMA buffers allocated for use as the
3841 * receive and transmit buffer lists.
3842 * Warning:
3843 *
3844 * The data transfer buffers associated with the buffer list
3845 * MUST be freed before freeing the buffer list itself because
3846 * the buffer list contains the information necessary to free
3847 * the individual buffers!
3848 */
3849static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3850{
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003851 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3852 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853
3854 info->buffer_list = NULL;
3855 info->rx_buffer_list = NULL;
3856 info->tx_buffer_list = NULL;
3857
3858} /* end of mgsl_free_buffer_list_memory() */
3859
3860/*
3861 * mgsl_alloc_frame_memory()
3862 *
3863 * Allocate the frame DMA buffers used by the specified buffer list.
3864 * Each DMA buffer will be one memory page in size. This is necessary
3865 * because memory can fragment enough that it may be impossible
3866 * contiguous pages.
3867 *
3868 * Arguments:
3869 *
3870 * info pointer to device instance data
3871 * BufferList pointer to list of buffer entries
3872 * Buffercount count of buffer entries in buffer list
3873 *
3874 * Return Value: 0 if success, otherwise -ENOMEM
3875 */
3876static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3877{
3878 int i;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003879 u32 phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880
3881 /* Allocate page sized buffers for the receive buffer list */
3882
3883 for ( i = 0; i < Buffercount; i++ ) {
3884 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3885 /* PCI adapter uses shared memory buffers. */
3886 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3887 phys_addr = info->last_mem_alloc;
3888 info->last_mem_alloc += DMABUFFERSIZE;
3889 } else {
3890 /* ISA adapter uses system memory. */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003891 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3892 if (BufferList[i].virt_addr == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 return -ENOMEM;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003894 phys_addr = (u32)(BufferList[i].dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 }
3896 BufferList[i].phys_addr = phys_addr;
3897 }
3898
3899 return 0;
3900
3901} /* end of mgsl_alloc_frame_memory() */
3902
3903/*
3904 * mgsl_free_frame_memory()
3905 *
3906 * Free the buffers associated with
3907 * each buffer entry of a buffer list.
3908 *
3909 * Arguments:
3910 *
3911 * info pointer to device instance data
3912 * BufferList pointer to list of buffer entries
3913 * Buffercount count of buffer entries in buffer list
3914 *
3915 * Return Value: None
3916 */
3917static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3918{
3919 int i;
3920
3921 if ( BufferList ) {
3922 for ( i = 0 ; i < Buffercount ; i++ ) {
3923 if ( BufferList[i].virt_addr ) {
3924 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003925 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 BufferList[i].virt_addr = NULL;
3927 }
3928 }
3929 }
3930
3931} /* end of mgsl_free_frame_memory() */
3932
3933/* mgsl_free_dma_buffers()
3934 *
3935 * Free DMA buffers
3936 *
3937 * Arguments: info pointer to device instance data
3938 * Return Value: None
3939 */
3940static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3941{
3942 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3943 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3944 mgsl_free_buffer_list_memory( info );
3945
3946} /* end of mgsl_free_dma_buffers() */
3947
3948
3949/*
3950 * mgsl_alloc_intermediate_rxbuffer_memory()
3951 *
3952 * Allocate a buffer large enough to hold max_frame_size. This buffer
3953 * is used to pass an assembled frame to the line discipline.
3954 *
3955 * Arguments:
3956 *
3957 * info pointer to device instance data
3958 *
3959 * Return Value: 0 if success, otherwise -ENOMEM
3960 */
3961static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3962{
3963 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3964 if ( info->intermediate_rxbuffer == NULL )
3965 return -ENOMEM;
3966
3967 return 0;
3968
3969} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3970
3971/*
3972 * mgsl_free_intermediate_rxbuffer_memory()
3973 *
3974 *
3975 * Arguments:
3976 *
3977 * info pointer to device instance data
3978 *
3979 * Return Value: None
3980 */
3981static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3982{
Jesper Juhl735d5662005-11-07 01:01:29 -08003983 kfree(info->intermediate_rxbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 info->intermediate_rxbuffer = NULL;
3985
3986} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3987
3988/*
3989 * mgsl_alloc_intermediate_txbuffer_memory()
3990 *
3991 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3992 * This buffer is used to load transmit frames into the adapter's dma transfer
3993 * buffers when there is sufficient space.
3994 *
3995 * Arguments:
3996 *
3997 * info pointer to device instance data
3998 *
3999 * Return Value: 0 if success, otherwise -ENOMEM
4000 */
4001static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4002{
4003 int i;
4004
4005 if ( debug_level >= DEBUG_LEVEL_INFO )
4006 printk("%s %s(%d) allocating %d tx holding buffers\n",
4007 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4008
4009 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4010
4011 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4012 info->tx_holding_buffers[i].buffer =
4013 kmalloc(info->max_frame_size, GFP_KERNEL);
4014 if ( info->tx_holding_buffers[i].buffer == NULL )
4015 return -ENOMEM;
4016 }
4017
4018 return 0;
4019
4020} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4021
4022/*
4023 * mgsl_free_intermediate_txbuffer_memory()
4024 *
4025 *
4026 * Arguments:
4027 *
4028 * info pointer to device instance data
4029 *
4030 * Return Value: None
4031 */
4032static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4033{
4034 int i;
4035
4036 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
Jesper Juhl735d5662005-11-07 01:01:29 -08004037 kfree(info->tx_holding_buffers[i].buffer);
4038 info->tx_holding_buffers[i].buffer = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 }
4040
4041 info->get_tx_holding_index = 0;
4042 info->put_tx_holding_index = 0;
4043 info->tx_holding_count = 0;
4044
4045} /* end of mgsl_free_intermediate_txbuffer_memory() */
4046
4047
4048/*
4049 * load_next_tx_holding_buffer()
4050 *
4051 * attempts to load the next buffered tx request into the
4052 * tx dma buffers
4053 *
4054 * Arguments:
4055 *
4056 * info pointer to device instance data
4057 *
4058 * Return Value: 1 if next buffered tx request loaded
4059 * into adapter's tx dma buffer,
4060 * 0 otherwise
4061 */
4062static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4063{
4064 int ret = 0;
4065
4066 if ( info->tx_holding_count ) {
4067 /* determine if we have enough tx dma buffers
4068 * to accommodate the next tx frame
4069 */
4070 struct tx_holding_buffer *ptx =
4071 &info->tx_holding_buffers[info->get_tx_holding_index];
4072 int num_free = num_free_tx_dma_buffers(info);
4073 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4074 if ( ptx->buffer_size % DMABUFFERSIZE )
4075 ++num_needed;
4076
4077 if (num_needed <= num_free) {
4078 info->xmit_cnt = ptx->buffer_size;
4079 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4080
4081 --info->tx_holding_count;
4082 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4083 info->get_tx_holding_index=0;
4084
4085 /* restart transmit timer */
4086 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4087
4088 ret = 1;
4089 }
4090 }
4091
4092 return ret;
4093}
4094
4095/*
4096 * save_tx_buffer_request()
4097 *
4098 * attempt to store transmit frame request for later transmission
4099 *
4100 * Arguments:
4101 *
4102 * info pointer to device instance data
4103 * Buffer pointer to buffer containing frame to load
4104 * BufferSize size in bytes of frame in Buffer
4105 *
4106 * Return Value: 1 if able to store, 0 otherwise
4107 */
4108static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4109{
4110 struct tx_holding_buffer *ptx;
4111
4112 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4113 return 0; /* all buffers in use */
4114 }
4115
4116 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4117 ptx->buffer_size = BufferSize;
4118 memcpy( ptx->buffer, Buffer, BufferSize);
4119
4120 ++info->tx_holding_count;
4121 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4122 info->put_tx_holding_index=0;
4123
4124 return 1;
4125}
4126
4127static int mgsl_claim_resources(struct mgsl_struct *info)
4128{
4129 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4130 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4131 __FILE__,__LINE__,info->device_name, info->io_base);
4132 return -ENODEV;
4133 }
4134 info->io_addr_requested = 1;
4135
4136 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4137 info->device_name, info ) < 0 ) {
4138 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4139 __FILE__,__LINE__,info->device_name, info->irq_level );
4140 goto errout;
4141 }
4142 info->irq_requested = 1;
4143
4144 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4145 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4146 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4147 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4148 goto errout;
4149 }
4150 info->shared_mem_requested = 1;
4151 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4152 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4153 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4154 goto errout;
4155 }
4156 info->lcr_mem_requested = 1;
4157
4158 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4159 if (!info->memory_base) {
4160 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4161 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4162 goto errout;
4163 }
4164
4165 if ( !mgsl_memory_test(info) ) {
4166 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4167 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4168 goto errout;
4169 }
4170
4171 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4172 if (!info->lcr_base) {
4173 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4174 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4175 goto errout;
4176 }
4177
4178 } else {
4179 /* claim DMA channel */
4180
4181 if (request_dma(info->dma_level,info->device_name) < 0){
4182 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4183 __FILE__,__LINE__,info->device_name, info->dma_level );
4184 mgsl_release_resources( info );
4185 return -ENODEV;
4186 }
4187 info->dma_requested = 1;
4188
4189 /* ISA adapter uses bus master DMA */
4190 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4191 enable_dma(info->dma_level);
4192 }
4193
4194 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4195 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4196 __FILE__,__LINE__,info->device_name, info->dma_level );
4197 goto errout;
4198 }
4199
4200 return 0;
4201errout:
4202 mgsl_release_resources(info);
4203 return -ENODEV;
4204
4205} /* end of mgsl_claim_resources() */
4206
4207static void mgsl_release_resources(struct mgsl_struct *info)
4208{
4209 if ( debug_level >= DEBUG_LEVEL_INFO )
4210 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4211 __FILE__,__LINE__,info->device_name );
4212
4213 if ( info->irq_requested ) {
4214 free_irq(info->irq_level, info);
4215 info->irq_requested = 0;
4216 }
4217 if ( info->dma_requested ) {
4218 disable_dma(info->dma_level);
4219 free_dma(info->dma_level);
4220 info->dma_requested = 0;
4221 }
4222 mgsl_free_dma_buffers(info);
4223 mgsl_free_intermediate_rxbuffer_memory(info);
4224 mgsl_free_intermediate_txbuffer_memory(info);
4225
4226 if ( info->io_addr_requested ) {
4227 release_region(info->io_base,info->io_addr_size);
4228 info->io_addr_requested = 0;
4229 }
4230 if ( info->shared_mem_requested ) {
4231 release_mem_region(info->phys_memory_base,0x40000);
4232 info->shared_mem_requested = 0;
4233 }
4234 if ( info->lcr_mem_requested ) {
4235 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4236 info->lcr_mem_requested = 0;
4237 }
4238 if (info->memory_base){
4239 iounmap(info->memory_base);
4240 info->memory_base = NULL;
4241 }
4242 if (info->lcr_base){
4243 iounmap(info->lcr_base - info->lcr_offset);
4244 info->lcr_base = NULL;
4245 }
4246
4247 if ( debug_level >= DEBUG_LEVEL_INFO )
4248 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4249 __FILE__,__LINE__,info->device_name );
4250
4251} /* end of mgsl_release_resources() */
4252
4253/* mgsl_add_device()
4254 *
4255 * Add the specified device instance data structure to the
4256 * global linked list of devices and increment the device count.
4257 *
4258 * Arguments: info pointer to device instance data
4259 * Return Value: None
4260 */
4261static void mgsl_add_device( struct mgsl_struct *info )
4262{
4263 info->next_device = NULL;
4264 info->line = mgsl_device_count;
4265 sprintf(info->device_name,"ttySL%d",info->line);
4266
4267 if (info->line < MAX_TOTAL_DEVICES) {
4268 if (maxframe[info->line])
4269 info->max_frame_size = maxframe[info->line];
4270 info->dosyncppp = dosyncppp[info->line];
4271
4272 if (txdmabufs[info->line]) {
4273 info->num_tx_dma_buffers = txdmabufs[info->line];
4274 if (info->num_tx_dma_buffers < 1)
4275 info->num_tx_dma_buffers = 1;
4276 }
4277
4278 if (txholdbufs[info->line]) {
4279 info->num_tx_holding_buffers = txholdbufs[info->line];
4280 if (info->num_tx_holding_buffers < 1)
4281 info->num_tx_holding_buffers = 1;
4282 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4283 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4284 }
4285 }
4286
4287 mgsl_device_count++;
4288
4289 if ( !mgsl_device_list )
4290 mgsl_device_list = info;
4291 else {
4292 struct mgsl_struct *current_dev = mgsl_device_list;
4293 while( current_dev->next_device )
4294 current_dev = current_dev->next_device;
4295 current_dev->next_device = info;
4296 }
4297
4298 if ( info->max_frame_size < 4096 )
4299 info->max_frame_size = 4096;
4300 else if ( info->max_frame_size > 65535 )
4301 info->max_frame_size = 65535;
4302
4303 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4304 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4305 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4306 info->phys_memory_base, info->phys_lcr_base,
4307 info->max_frame_size );
4308 } else {
4309 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4310 info->device_name, info->io_base, info->irq_level, info->dma_level,
4311 info->max_frame_size );
4312 }
4313
4314#ifdef CONFIG_HDLC
4315 hdlcdev_init(info);
4316#endif
4317
4318} /* end of mgsl_add_device() */
4319
4320/* mgsl_allocate_device()
4321 *
4322 * Allocate and initialize a device instance structure
4323 *
4324 * Arguments: none
4325 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4326 */
4327static struct mgsl_struct* mgsl_allocate_device(void)
4328{
4329 struct mgsl_struct *info;
4330
4331 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4332 GFP_KERNEL);
4333
4334 if (!info) {
4335 printk("Error can't allocate device instance data\n");
4336 } else {
4337 memset(info, 0, sizeof(struct mgsl_struct));
4338 info->magic = MGSL_MAGIC;
4339 INIT_WORK(&info->task, mgsl_bh_handler, info);
4340 info->max_frame_size = 4096;
4341 info->close_delay = 5*HZ/10;
4342 info->closing_wait = 30*HZ;
4343 init_waitqueue_head(&info->open_wait);
4344 init_waitqueue_head(&info->close_wait);
4345 init_waitqueue_head(&info->status_event_wait_q);
4346 init_waitqueue_head(&info->event_wait_q);
4347 spin_lock_init(&info->irq_spinlock);
4348 spin_lock_init(&info->netlock);
4349 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4350 info->idle_mode = HDLC_TXIDLE_FLAGS;
4351 info->num_tx_dma_buffers = 1;
4352 info->num_tx_holding_buffers = 0;
4353 }
4354
4355 return info;
4356
4357} /* end of mgsl_allocate_device()*/
4358
4359static struct tty_operations mgsl_ops = {
4360 .open = mgsl_open,
4361 .close = mgsl_close,
4362 .write = mgsl_write,
4363 .put_char = mgsl_put_char,
4364 .flush_chars = mgsl_flush_chars,
4365 .write_room = mgsl_write_room,
4366 .chars_in_buffer = mgsl_chars_in_buffer,
4367 .flush_buffer = mgsl_flush_buffer,
4368 .ioctl = mgsl_ioctl,
4369 .throttle = mgsl_throttle,
4370 .unthrottle = mgsl_unthrottle,
4371 .send_xchar = mgsl_send_xchar,
4372 .break_ctl = mgsl_break,
4373 .wait_until_sent = mgsl_wait_until_sent,
4374 .read_proc = mgsl_read_proc,
4375 .set_termios = mgsl_set_termios,
4376 .stop = mgsl_stop,
4377 .start = mgsl_start,
4378 .hangup = mgsl_hangup,
4379 .tiocmget = tiocmget,
4380 .tiocmset = tiocmset,
4381};
4382
4383/*
4384 * perform tty device initialization
4385 */
4386static int mgsl_init_tty(void)
4387{
4388 int rc;
4389
4390 serial_driver = alloc_tty_driver(128);
4391 if (!serial_driver)
4392 return -ENOMEM;
4393
4394 serial_driver->owner = THIS_MODULE;
4395 serial_driver->driver_name = "synclink";
4396 serial_driver->name = "ttySL";
4397 serial_driver->major = ttymajor;
4398 serial_driver->minor_start = 64;
4399 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4400 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4401 serial_driver->init_termios = tty_std_termios;
4402 serial_driver->init_termios.c_cflag =
4403 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4404 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4405 tty_set_operations(serial_driver, &mgsl_ops);
4406 if ((rc = tty_register_driver(serial_driver)) < 0) {
4407 printk("%s(%d):Couldn't register serial driver\n",
4408 __FILE__,__LINE__);
4409 put_tty_driver(serial_driver);
4410 serial_driver = NULL;
4411 return rc;
4412 }
4413
4414 printk("%s %s, tty major#%d\n",
4415 driver_name, driver_version,
4416 serial_driver->major);
4417 return 0;
4418}
4419
4420/* enumerate user specified ISA adapters
4421 */
4422static void mgsl_enum_isa_devices(void)
4423{
4424 struct mgsl_struct *info;
4425 int i;
4426
4427 /* Check for user specified ISA devices */
4428
4429 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4430 if ( debug_level >= DEBUG_LEVEL_INFO )
4431 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4432 io[i], irq[i], dma[i] );
4433
4434 info = mgsl_allocate_device();
4435 if ( !info ) {
4436 /* error allocating device instance data */
4437 if ( debug_level >= DEBUG_LEVEL_ERROR )
4438 printk( "can't allocate device instance data.\n");
4439 continue;
4440 }
4441
4442 /* Copy user configuration info to device instance data */
4443 info->io_base = (unsigned int)io[i];
4444 info->irq_level = (unsigned int)irq[i];
4445 info->irq_level = irq_canonicalize(info->irq_level);
4446 info->dma_level = (unsigned int)dma[i];
4447 info->bus_type = MGSL_BUS_TYPE_ISA;
4448 info->io_addr_size = 16;
4449 info->irq_flags = 0;
4450
4451 mgsl_add_device( info );
4452 }
4453}
4454
4455static void synclink_cleanup(void)
4456{
4457 int rc;
4458 struct mgsl_struct *info;
4459 struct mgsl_struct *tmp;
4460
4461 printk("Unloading %s: %s\n", driver_name, driver_version);
4462
4463 if (serial_driver) {
4464 if ((rc = tty_unregister_driver(serial_driver)))
4465 printk("%s(%d) failed to unregister tty driver err=%d\n",
4466 __FILE__,__LINE__,rc);
4467 put_tty_driver(serial_driver);
4468 }
4469
4470 info = mgsl_device_list;
4471 while(info) {
4472#ifdef CONFIG_HDLC
4473 hdlcdev_exit(info);
4474#endif
4475 mgsl_release_resources(info);
4476 tmp = info;
4477 info = info->next_device;
4478 kfree(tmp);
4479 }
4480
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 if (pci_registered)
4482 pci_unregister_driver(&synclink_pci_driver);
4483}
4484
4485static int __init synclink_init(void)
4486{
4487 int rc;
4488
4489 if (break_on_load) {
4490 mgsl_get_text_ptr();
4491 BREAKPOINT();
4492 }
4493
4494 printk("%s %s\n", driver_name, driver_version);
4495
4496 mgsl_enum_isa_devices();
4497 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4498 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4499 else
4500 pci_registered = 1;
4501
4502 if ((rc = mgsl_init_tty()) < 0)
4503 goto error;
4504
4505 return 0;
4506
4507error:
4508 synclink_cleanup();
4509 return rc;
4510}
4511
4512static void __exit synclink_exit(void)
4513{
4514 synclink_cleanup();
4515}
4516
4517module_init(synclink_init);
4518module_exit(synclink_exit);
4519
4520/*
4521 * usc_RTCmd()
4522 *
4523 * Issue a USC Receive/Transmit command to the
4524 * Channel Command/Address Register (CCAR).
4525 *
4526 * Notes:
4527 *
4528 * The command is encoded in the most significant 5 bits <15..11>
4529 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4530 * and Bits <6..0> must be written as zeros.
4531 *
4532 * Arguments:
4533 *
4534 * info pointer to device information structure
4535 * Cmd command mask (use symbolic macros)
4536 *
4537 * Return Value:
4538 *
4539 * None
4540 */
4541static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4542{
4543 /* output command to CCAR in bits <15..11> */
4544 /* preserve bits <10..7>, bits <6..0> must be zero */
4545
4546 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4547
4548 /* Read to flush write to CCAR */
4549 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4550 inw( info->io_base + CCAR );
4551
4552} /* end of usc_RTCmd() */
4553
4554/*
4555 * usc_DmaCmd()
4556 *
4557 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4558 *
4559 * Arguments:
4560 *
4561 * info pointer to device information structure
4562 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4563 *
4564 * Return Value:
4565 *
4566 * None
4567 */
4568static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4569{
4570 /* write command mask to DCAR */
4571 outw( Cmd + info->mbre_bit, info->io_base );
4572
4573 /* Read to flush write to DCAR */
4574 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4575 inw( info->io_base );
4576
4577} /* end of usc_DmaCmd() */
4578
4579/*
4580 * usc_OutDmaReg()
4581 *
4582 * Write a 16-bit value to a USC DMA register
4583 *
4584 * Arguments:
4585 *
4586 * info pointer to device info structure
4587 * RegAddr register address (number) for write
4588 * RegValue 16-bit value to write to register
4589 *
4590 * Return Value:
4591 *
4592 * None
4593 *
4594 */
4595static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4596{
4597 /* Note: The DCAR is located at the adapter base address */
4598 /* Note: must preserve state of BIT8 in DCAR */
4599
4600 outw( RegAddr + info->mbre_bit, info->io_base );
4601 outw( RegValue, info->io_base );
4602
4603 /* Read to flush write to DCAR */
4604 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4605 inw( info->io_base );
4606
4607} /* end of usc_OutDmaReg() */
4608
4609/*
4610 * usc_InDmaReg()
4611 *
4612 * Read a 16-bit value from a DMA register
4613 *
4614 * Arguments:
4615 *
4616 * info pointer to device info structure
4617 * RegAddr register address (number) to read from
4618 *
4619 * Return Value:
4620 *
4621 * The 16-bit value read from register
4622 *
4623 */
4624static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4625{
4626 /* Note: The DCAR is located at the adapter base address */
4627 /* Note: must preserve state of BIT8 in DCAR */
4628
4629 outw( RegAddr + info->mbre_bit, info->io_base );
4630 return inw( info->io_base );
4631
4632} /* end of usc_InDmaReg() */
4633
4634/*
4635 *
4636 * usc_OutReg()
4637 *
4638 * Write a 16-bit value to a USC serial channel register
4639 *
4640 * Arguments:
4641 *
4642 * info pointer to device info structure
4643 * RegAddr register address (number) to write to
4644 * RegValue 16-bit value to write to register
4645 *
4646 * Return Value:
4647 *
4648 * None
4649 *
4650 */
4651static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4652{
4653 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4654 outw( RegValue, info->io_base + CCAR );
4655
4656 /* Read to flush write to CCAR */
4657 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4658 inw( info->io_base + CCAR );
4659
4660} /* end of usc_OutReg() */
4661
4662/*
4663 * usc_InReg()
4664 *
4665 * Reads a 16-bit value from a USC serial channel register
4666 *
4667 * Arguments:
4668 *
4669 * info pointer to device extension
4670 * RegAddr register address (number) to read from
4671 *
4672 * Return Value:
4673 *
4674 * 16-bit value read from register
4675 */
4676static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4677{
4678 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4679 return inw( info->io_base + CCAR );
4680
4681} /* end of usc_InReg() */
4682
4683/* usc_set_sdlc_mode()
4684 *
4685 * Set up the adapter for SDLC DMA communications.
4686 *
4687 * Arguments: info pointer to device instance data
4688 * Return Value: NONE
4689 */
4690static void usc_set_sdlc_mode( struct mgsl_struct *info )
4691{
4692 u16 RegValue;
4693 int PreSL1660;
4694
4695 /*
4696 * determine if the IUSC on the adapter is pre-SL1660. If
4697 * not, take advantage of the UnderWait feature of more
4698 * modern chips. If an underrun occurs and this bit is set,
4699 * the transmitter will idle the programmed idle pattern
4700 * until the driver has time to service the underrun. Otherwise,
4701 * the dma controller may get the cycles previously requested
4702 * and begin transmitting queued tx data.
4703 */
4704 usc_OutReg(info,TMCR,0x1f);
4705 RegValue=usc_InReg(info,TMDR);
4706 if ( RegValue == IUSC_PRE_SL1660 )
4707 PreSL1660 = 1;
4708 else
4709 PreSL1660 = 0;
4710
4711
4712 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4713 {
4714 /*
4715 ** Channel Mode Register (CMR)
4716 **
4717 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4718 ** <13> 0 0 = Transmit Disabled (initially)
4719 ** <12> 0 1 = Consecutive Idles share common 0
4720 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4721 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4722 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4723 **
4724 ** 1000 1110 0000 0110 = 0x8e06
4725 */
4726 RegValue = 0x8e06;
4727
4728 /*--------------------------------------------------
4729 * ignore user options for UnderRun Actions and
4730 * preambles
4731 *--------------------------------------------------*/
4732 }
4733 else
4734 {
4735 /* Channel mode Register (CMR)
4736 *
4737 * <15..14> 00 Tx Sub modes, Underrun Action
4738 * <13> 0 1 = Send Preamble before opening flag
4739 * <12> 0 1 = Consecutive Idles share common 0
4740 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4741 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4742 * <3..0> 0110 Receiver mode = HDLC/SDLC
4743 *
4744 * 0000 0110 0000 0110 = 0x0606
4745 */
4746 if (info->params.mode == MGSL_MODE_RAW) {
4747 RegValue = 0x0001; /* Set Receive mode = external sync */
4748
4749 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4750 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4751
4752 /*
4753 * TxSubMode:
4754 * CMR <15> 0 Don't send CRC on Tx Underrun
4755 * CMR <14> x undefined
4756 * CMR <13> 0 Send preamble before openning sync
4757 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4758 *
4759 * TxMode:
4760 * CMR <11-8) 0100 MonoSync
4761 *
4762 * 0x00 0100 xxxx xxxx 04xx
4763 */
4764 RegValue |= 0x0400;
4765 }
4766 else {
4767
4768 RegValue = 0x0606;
4769
4770 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4771 RegValue |= BIT14;
4772 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4773 RegValue |= BIT15;
4774 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4775 RegValue |= BIT15 + BIT14;
4776 }
4777
4778 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4779 RegValue |= BIT13;
4780 }
4781
4782 if ( info->params.mode == MGSL_MODE_HDLC &&
4783 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4784 RegValue |= BIT12;
4785
4786 if ( info->params.addr_filter != 0xff )
4787 {
4788 /* set up receive address filtering */
4789 usc_OutReg( info, RSR, info->params.addr_filter );
4790 RegValue |= BIT4;
4791 }
4792
4793 usc_OutReg( info, CMR, RegValue );
4794 info->cmr_value = RegValue;
4795
4796 /* Receiver mode Register (RMR)
4797 *
4798 * <15..13> 000 encoding
4799 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4800 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4801 * <9> 0 1 = Include Receive chars in CRC
4802 * <8> 1 1 = Use Abort/PE bit as abort indicator
4803 * <7..6> 00 Even parity
4804 * <5> 0 parity disabled
4805 * <4..2> 000 Receive Char Length = 8 bits
4806 * <1..0> 00 Disable Receiver
4807 *
4808 * 0000 0101 0000 0000 = 0x0500
4809 */
4810
4811 RegValue = 0x0500;
4812
4813 switch ( info->params.encoding ) {
4814 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4815 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4816 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4817 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4818 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4819 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4820 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4821 }
4822
4823 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4824 RegValue |= BIT9;
4825 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4826 RegValue |= ( BIT12 | BIT10 | BIT9 );
4827
4828 usc_OutReg( info, RMR, RegValue );
4829
4830 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4831 /* When an opening flag of an SDLC frame is recognized the */
4832 /* Receive Character count (RCC) is loaded with the value in */
4833 /* RCLR. The RCC is decremented for each received byte. The */
4834 /* value of RCC is stored after the closing flag of the frame */
4835 /* allowing the frame size to be computed. */
4836
4837 usc_OutReg( info, RCLR, RCLRVALUE );
4838
4839 usc_RCmd( info, RCmd_SelectRicrdma_level );
4840
4841 /* Receive Interrupt Control Register (RICR)
4842 *
4843 * <15..8> ? RxFIFO DMA Request Level
4844 * <7> 0 Exited Hunt IA (Interrupt Arm)
4845 * <6> 0 Idle Received IA
4846 * <5> 0 Break/Abort IA
4847 * <4> 0 Rx Bound IA
4848 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4849 * <2> 0 Abort/PE IA
4850 * <1> 1 Rx Overrun IA
4851 * <0> 0 Select TC0 value for readback
4852 *
4853 * 0000 0000 0000 1000 = 0x000a
4854 */
4855
4856 /* Carry over the Exit Hunt and Idle Received bits */
4857 /* in case they have been armed by usc_ArmEvents. */
4858
4859 RegValue = usc_InReg( info, RICR ) & 0xc0;
4860
4861 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4862 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4863 else
4864 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4865
4866 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4867
4868 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4869 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4870
4871 /* Transmit mode Register (TMR)
4872 *
4873 * <15..13> 000 encoding
4874 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4875 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4876 * <9> 0 1 = Tx CRC Enabled
4877 * <8> 0 1 = Append CRC to end of transmit frame
4878 * <7..6> 00 Transmit parity Even
4879 * <5> 0 Transmit parity Disabled
4880 * <4..2> 000 Tx Char Length = 8 bits
4881 * <1..0> 00 Disable Transmitter
4882 *
4883 * 0000 0100 0000 0000 = 0x0400
4884 */
4885
4886 RegValue = 0x0400;
4887
4888 switch ( info->params.encoding ) {
4889 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4890 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4891 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4892 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4893 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4894 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4895 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4896 }
4897
4898 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4899 RegValue |= BIT9 + BIT8;
4900 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4901 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4902
4903 usc_OutReg( info, TMR, RegValue );
4904
4905 usc_set_txidle( info );
4906
4907
4908 usc_TCmd( info, TCmd_SelectTicrdma_level );
4909
4910 /* Transmit Interrupt Control Register (TICR)
4911 *
4912 * <15..8> ? Transmit FIFO DMA Level
4913 * <7> 0 Present IA (Interrupt Arm)
4914 * <6> 0 Idle Sent IA
4915 * <5> 1 Abort Sent IA
4916 * <4> 1 EOF/EOM Sent IA
4917 * <3> 0 CRC Sent IA
4918 * <2> 1 1 = Wait for SW Trigger to Start Frame
4919 * <1> 1 Tx Underrun IA
4920 * <0> 0 TC0 constant on read back
4921 *
4922 * 0000 0000 0011 0110 = 0x0036
4923 */
4924
4925 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4926 usc_OutReg( info, TICR, 0x0736 );
4927 else
4928 usc_OutReg( info, TICR, 0x1436 );
4929
4930 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4931 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4932
4933 /*
4934 ** Transmit Command/Status Register (TCSR)
4935 **
4936 ** <15..12> 0000 TCmd
4937 ** <11> 0/1 UnderWait
4938 ** <10..08> 000 TxIdle
4939 ** <7> x PreSent
4940 ** <6> x IdleSent
4941 ** <5> x AbortSent
4942 ** <4> x EOF/EOM Sent
4943 ** <3> x CRC Sent
4944 ** <2> x All Sent
4945 ** <1> x TxUnder
4946 ** <0> x TxEmpty
4947 **
4948 ** 0000 0000 0000 0000 = 0x0000
4949 */
4950 info->tcsr_value = 0;
4951
4952 if ( !PreSL1660 )
4953 info->tcsr_value |= TCSR_UNDERWAIT;
4954
4955 usc_OutReg( info, TCSR, info->tcsr_value );
4956
4957 /* Clock mode Control Register (CMCR)
4958 *
4959 * <15..14> 00 counter 1 Source = Disabled
4960 * <13..12> 00 counter 0 Source = Disabled
4961 * <11..10> 11 BRG1 Input is TxC Pin
4962 * <9..8> 11 BRG0 Input is TxC Pin
4963 * <7..6> 01 DPLL Input is BRG1 Output
4964 * <5..3> XXX TxCLK comes from Port 0
4965 * <2..0> XXX RxCLK comes from Port 1
4966 *
4967 * 0000 1111 0111 0111 = 0x0f77
4968 */
4969
4970 RegValue = 0x0f40;
4971
4972 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4973 RegValue |= 0x0003; /* RxCLK from DPLL */
4974 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4975 RegValue |= 0x0004; /* RxCLK from BRG0 */
4976 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4977 RegValue |= 0x0006; /* RxCLK from TXC Input */
4978 else
4979 RegValue |= 0x0007; /* RxCLK from Port1 */
4980
4981 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4982 RegValue |= 0x0018; /* TxCLK from DPLL */
4983 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4984 RegValue |= 0x0020; /* TxCLK from BRG0 */
4985 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4986 RegValue |= 0x0038; /* RxCLK from TXC Input */
4987 else
4988 RegValue |= 0x0030; /* TxCLK from Port0 */
4989
4990 usc_OutReg( info, CMCR, RegValue );
4991
4992
4993 /* Hardware Configuration Register (HCR)
4994 *
4995 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4996 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4997 * <12> 0 CVOK:0=report code violation in biphase
4998 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4999 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5000 * <7..6> 00 reserved
5001 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5002 * <4> X BRG1 Enable
5003 * <3..2> 00 reserved
5004 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5005 * <0> 0 BRG0 Enable
5006 */
5007
5008 RegValue = 0x0000;
5009
5010 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5011 u32 XtalSpeed;
5012 u32 DpllDivisor;
5013 u16 Tc;
5014
5015 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5016 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5017
5018 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5019 XtalSpeed = 11059200;
5020 else
5021 XtalSpeed = 14745600;
5022
5023 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5024 DpllDivisor = 16;
5025 RegValue |= BIT10;
5026 }
5027 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5028 DpllDivisor = 8;
5029 RegValue |= BIT11;
5030 }
5031 else
5032 DpllDivisor = 32;
5033
5034 /* Tc = (Xtal/Speed) - 1 */
5035 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5036 /* then rounding up gives a more precise time constant. Instead */
5037 /* of rounding up and then subtracting 1 we just don't subtract */
5038 /* the one in this case. */
5039
5040 /*--------------------------------------------------
5041 * ejz: for DPLL mode, application should use the
5042 * same clock speed as the partner system, even
5043 * though clocking is derived from the input RxData.
5044 * In case the user uses a 0 for the clock speed,
5045 * default to 0xffffffff and don't try to divide by
5046 * zero
5047 *--------------------------------------------------*/
5048 if ( info->params.clock_speed )
5049 {
5050 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5051 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5052 / info->params.clock_speed) )
5053 Tc--;
5054 }
5055 else
5056 Tc = -1;
5057
5058
5059 /* Write 16-bit Time Constant for BRG1 */
5060 usc_OutReg( info, TC1R, Tc );
5061
5062 RegValue |= BIT4; /* enable BRG1 */
5063
5064 switch ( info->params.encoding ) {
5065 case HDLC_ENCODING_NRZ:
5066 case HDLC_ENCODING_NRZB:
5067 case HDLC_ENCODING_NRZI_MARK:
5068 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5069 case HDLC_ENCODING_BIPHASE_MARK:
5070 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5071 case HDLC_ENCODING_BIPHASE_LEVEL:
5072 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5073 }
5074 }
5075
5076 usc_OutReg( info, HCR, RegValue );
5077
5078
5079 /* Channel Control/status Register (CCSR)
5080 *
5081 * <15> X RCC FIFO Overflow status (RO)
5082 * <14> X RCC FIFO Not Empty status (RO)
5083 * <13> 0 1 = Clear RCC FIFO (WO)
5084 * <12> X DPLL Sync (RW)
5085 * <11> X DPLL 2 Missed Clocks status (RO)
5086 * <10> X DPLL 1 Missed Clock status (RO)
5087 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5088 * <7> X SDLC Loop On status (RO)
5089 * <6> X SDLC Loop Send status (RO)
5090 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5091 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5092 * <1..0> 00 reserved
5093 *
5094 * 0000 0000 0010 0000 = 0x0020
5095 */
5096
5097 usc_OutReg( info, CCSR, 0x1020 );
5098
5099
5100 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5101 usc_OutReg( info, SICR,
5102 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5103 }
5104
5105
5106 /* enable Master Interrupt Enable bit (MIE) */
5107 usc_EnableMasterIrqBit( info );
5108
5109 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5110 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5111
5112 /* arm RCC underflow interrupt */
5113 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5114 usc_EnableInterrupts(info, MISC);
5115
5116 info->mbre_bit = 0;
5117 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5118 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5119 info->mbre_bit = BIT8;
5120 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5121
5122 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5123 /* Enable DMAEN (Port 7, Bit 14) */
5124 /* This connects the DMA request signal to the ISA bus */
5125 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5126 }
5127
5128 /* DMA Control Register (DCR)
5129 *
5130 * <15..14> 10 Priority mode = Alternating Tx/Rx
5131 * 01 Rx has priority
5132 * 00 Tx has priority
5133 *
5134 * <13> 1 Enable Priority Preempt per DCR<15..14>
5135 * (WARNING DCR<11..10> must be 00 when this is 1)
5136 * 0 Choose activate channel per DCR<11..10>
5137 *
5138 * <12> 0 Little Endian for Array/List
5139 * <11..10> 00 Both Channels can use each bus grant
5140 * <9..6> 0000 reserved
5141 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5142 * <4> 0 1 = drive D/C and S/D pins
5143 * <3> 1 1 = Add one wait state to all DMA cycles.
5144 * <2> 0 1 = Strobe /UAS on every transfer.
5145 * <1..0> 11 Addr incrementing only affects LS24 bits
5146 *
5147 * 0110 0000 0000 1011 = 0x600b
5148 */
5149
5150 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5151 /* PCI adapter does not need DMA wait state */
5152 usc_OutDmaReg( info, DCR, 0xa00b );
5153 }
5154 else
5155 usc_OutDmaReg( info, DCR, 0x800b );
5156
5157
5158 /* Receive DMA mode Register (RDMR)
5159 *
5160 * <15..14> 11 DMA mode = Linked List Buffer mode
5161 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5162 * <12> 1 Clear count of List Entry after fetching
5163 * <11..10> 00 Address mode = Increment
5164 * <9> 1 Terminate Buffer on RxBound
5165 * <8> 0 Bus Width = 16bits
5166 * <7..0> ? status Bits (write as 0s)
5167 *
5168 * 1111 0010 0000 0000 = 0xf200
5169 */
5170
5171 usc_OutDmaReg( info, RDMR, 0xf200 );
5172
5173
5174 /* Transmit DMA mode Register (TDMR)
5175 *
5176 * <15..14> 11 DMA mode = Linked List Buffer mode
5177 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5178 * <12> 1 Clear count of List Entry after fetching
5179 * <11..10> 00 Address mode = Increment
5180 * <9> 1 Terminate Buffer on end of frame
5181 * <8> 0 Bus Width = 16bits
5182 * <7..0> ? status Bits (Read Only so write as 0)
5183 *
5184 * 1111 0010 0000 0000 = 0xf200
5185 */
5186
5187 usc_OutDmaReg( info, TDMR, 0xf200 );
5188
5189
5190 /* DMA Interrupt Control Register (DICR)
5191 *
5192 * <15> 1 DMA Interrupt Enable
5193 * <14> 0 1 = Disable IEO from USC
5194 * <13> 0 1 = Don't provide vector during IntAck
5195 * <12> 1 1 = Include status in Vector
5196 * <10..2> 0 reserved, Must be 0s
5197 * <1> 0 1 = Rx DMA Interrupt Enabled
5198 * <0> 0 1 = Tx DMA Interrupt Enabled
5199 *
5200 * 1001 0000 0000 0000 = 0x9000
5201 */
5202
5203 usc_OutDmaReg( info, DICR, 0x9000 );
5204
5205 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5206 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5207 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5208
5209 /* Channel Control Register (CCR)
5210 *
5211 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5212 * <13> 0 Trigger Tx on SW Command Disabled
5213 * <12> 0 Flag Preamble Disabled
5214 * <11..10> 00 Preamble Length
5215 * <9..8> 00 Preamble Pattern
5216 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5217 * <5> 0 Trigger Rx on SW Command Disabled
5218 * <4..0> 0 reserved
5219 *
5220 * 1000 0000 1000 0000 = 0x8080
5221 */
5222
5223 RegValue = 0x8080;
5224
5225 switch ( info->params.preamble_length ) {
5226 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5227 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5228 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5229 }
5230
5231 switch ( info->params.preamble ) {
5232 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5233 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5234 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5235 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5236 }
5237
5238 usc_OutReg( info, CCR, RegValue );
5239
5240
5241 /*
5242 * Burst/Dwell Control Register
5243 *
5244 * <15..8> 0x20 Maximum number of transfers per bus grant
5245 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5246 */
5247
5248 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5249 /* don't limit bus occupancy on PCI adapter */
5250 usc_OutDmaReg( info, BDCR, 0x0000 );
5251 }
5252 else
5253 usc_OutDmaReg( info, BDCR, 0x2000 );
5254
5255 usc_stop_transmitter(info);
5256 usc_stop_receiver(info);
5257
5258} /* end of usc_set_sdlc_mode() */
5259
5260/* usc_enable_loopback()
5261 *
5262 * Set the 16C32 for internal loopback mode.
5263 * The TxCLK and RxCLK signals are generated from the BRG0 and
5264 * the TxD is looped back to the RxD internally.
5265 *
5266 * Arguments: info pointer to device instance data
5267 * enable 1 = enable loopback, 0 = disable
5268 * Return Value: None
5269 */
5270static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5271{
5272 if (enable) {
5273 /* blank external TXD output */
5274 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5275
5276 /* Clock mode Control Register (CMCR)
5277 *
5278 * <15..14> 00 counter 1 Disabled
5279 * <13..12> 00 counter 0 Disabled
5280 * <11..10> 11 BRG1 Input is TxC Pin
5281 * <9..8> 11 BRG0 Input is TxC Pin
5282 * <7..6> 01 DPLL Input is BRG1 Output
5283 * <5..3> 100 TxCLK comes from BRG0
5284 * <2..0> 100 RxCLK comes from BRG0
5285 *
5286 * 0000 1111 0110 0100 = 0x0f64
5287 */
5288
5289 usc_OutReg( info, CMCR, 0x0f64 );
5290
5291 /* Write 16-bit Time Constant for BRG0 */
5292 /* use clock speed if available, otherwise use 8 for diagnostics */
5293 if (info->params.clock_speed) {
5294 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5295 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5296 else
5297 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5298 } else
5299 usc_OutReg(info, TC0R, (u16)8);
5300
5301 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5302 mode = Continuous Set Bit 0 to enable BRG0. */
5303 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5304
5305 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5306 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5307
5308 /* set Internal Data loopback mode */
5309 info->loopback_bits = 0x300;
5310 outw( 0x0300, info->io_base + CCAR );
5311 } else {
5312 /* enable external TXD output */
5313 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5314
5315 /* clear Internal Data loopback mode */
5316 info->loopback_bits = 0;
5317 outw( 0,info->io_base + CCAR );
5318 }
5319
5320} /* end of usc_enable_loopback() */
5321
5322/* usc_enable_aux_clock()
5323 *
5324 * Enabled the AUX clock output at the specified frequency.
5325 *
5326 * Arguments:
5327 *
5328 * info pointer to device extension
5329 * data_rate data rate of clock in bits per second
5330 * A data rate of 0 disables the AUX clock.
5331 *
5332 * Return Value: None
5333 */
5334static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5335{
5336 u32 XtalSpeed;
5337 u16 Tc;
5338
5339 if ( data_rate ) {
5340 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5341 XtalSpeed = 11059200;
5342 else
5343 XtalSpeed = 14745600;
5344
5345
5346 /* Tc = (Xtal/Speed) - 1 */
5347 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5348 /* then rounding up gives a more precise time constant. Instead */
5349 /* of rounding up and then subtracting 1 we just don't subtract */
5350 /* the one in this case. */
5351
5352
5353 Tc = (u16)(XtalSpeed/data_rate);
5354 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5355 Tc--;
5356
5357 /* Write 16-bit Time Constant for BRG0 */
5358 usc_OutReg( info, TC0R, Tc );
5359
5360 /*
5361 * Hardware Configuration Register (HCR)
5362 * Clear Bit 1, BRG0 mode = Continuous
5363 * Set Bit 0 to enable BRG0.
5364 */
5365
5366 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5367
5368 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5369 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5370 } else {
5371 /* data rate == 0 so turn off BRG0 */
5372 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5373 }
5374
5375} /* end of usc_enable_aux_clock() */
5376
5377/*
5378 *
5379 * usc_process_rxoverrun_sync()
5380 *
5381 * This function processes a receive overrun by resetting the
5382 * receive DMA buffers and issuing a Purge Rx FIFO command
5383 * to allow the receiver to continue receiving.
5384 *
5385 * Arguments:
5386 *
5387 * info pointer to device extension
5388 *
5389 * Return Value: None
5390 */
5391static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5392{
5393 int start_index;
5394 int end_index;
5395 int frame_start_index;
5396 int start_of_frame_found = FALSE;
5397 int end_of_frame_found = FALSE;
5398 int reprogram_dma = FALSE;
5399
5400 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5401 u32 phys_addr;
5402
5403 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5404 usc_RCmd( info, RCmd_EnterHuntmode );
5405 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5406
5407 /* CurrentRxBuffer points to the 1st buffer of the next */
5408 /* possibly available receive frame. */
5409
5410 frame_start_index = start_index = end_index = info->current_rx_buffer;
5411
5412 /* Search for an unfinished string of buffers. This means */
5413 /* that a receive frame started (at least one buffer with */
5414 /* count set to zero) but there is no terminiting buffer */
5415 /* (status set to non-zero). */
5416
5417 while( !buffer_list[end_index].count )
5418 {
5419 /* Count field has been reset to zero by 16C32. */
5420 /* This buffer is currently in use. */
5421
5422 if ( !start_of_frame_found )
5423 {
5424 start_of_frame_found = TRUE;
5425 frame_start_index = end_index;
5426 end_of_frame_found = FALSE;
5427 }
5428
5429 if ( buffer_list[end_index].status )
5430 {
5431 /* Status field has been set by 16C32. */
5432 /* This is the last buffer of a received frame. */
5433
5434 /* We want to leave the buffers for this frame intact. */
5435 /* Move on to next possible frame. */
5436
5437 start_of_frame_found = FALSE;
5438 end_of_frame_found = TRUE;
5439 }
5440
5441 /* advance to next buffer entry in linked list */
5442 end_index++;
5443 if ( end_index == info->rx_buffer_count )
5444 end_index = 0;
5445
5446 if ( start_index == end_index )
5447 {
5448 /* The entire list has been searched with all Counts == 0 and */
5449 /* all Status == 0. The receive buffers are */
5450 /* completely screwed, reset all receive buffers! */
5451 mgsl_reset_rx_dma_buffers( info );
5452 frame_start_index = 0;
5453 start_of_frame_found = FALSE;
5454 reprogram_dma = TRUE;
5455 break;
5456 }
5457 }
5458
5459 if ( start_of_frame_found && !end_of_frame_found )
5460 {
5461 /* There is an unfinished string of receive DMA buffers */
5462 /* as a result of the receiver overrun. */
5463
5464 /* Reset the buffers for the unfinished frame */
5465 /* and reprogram the receive DMA controller to start */
5466 /* at the 1st buffer of unfinished frame. */
5467
5468 start_index = frame_start_index;
5469
5470 do
5471 {
5472 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5473
5474 /* Adjust index for wrap around. */
5475 if ( start_index == info->rx_buffer_count )
5476 start_index = 0;
5477
5478 } while( start_index != end_index );
5479
5480 reprogram_dma = TRUE;
5481 }
5482
5483 if ( reprogram_dma )
5484 {
5485 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5486 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5487 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5488
5489 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5490
5491 /* This empties the receive FIFO and loads the RCC with RCLR */
5492 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5493
5494 /* program 16C32 with physical address of 1st DMA buffer entry */
5495 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5496 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5497 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5498
5499 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5500 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5501 usc_EnableInterrupts( info, RECEIVE_STATUS );
5502
5503 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5504 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5505
5506 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5507 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5508 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5509 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5510 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5511 else
5512 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5513 }
5514 else
5515 {
5516 /* This empties the receive FIFO and loads the RCC with RCLR */
5517 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5518 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5519 }
5520
5521} /* end of usc_process_rxoverrun_sync() */
5522
5523/* usc_stop_receiver()
5524 *
5525 * Disable USC receiver
5526 *
5527 * Arguments: info pointer to device instance data
5528 * Return Value: None
5529 */
5530static void usc_stop_receiver( struct mgsl_struct *info )
5531{
5532 if (debug_level >= DEBUG_LEVEL_ISR)
5533 printk("%s(%d):usc_stop_receiver(%s)\n",
5534 __FILE__,__LINE__, info->device_name );
5535
5536 /* Disable receive DMA channel. */
5537 /* This also disables receive DMA channel interrupts */
5538 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5539
5540 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5541 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5542 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5543
5544 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5545
5546 /* This empties the receive FIFO and loads the RCC with RCLR */
5547 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5548 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5549
5550 info->rx_enabled = 0;
5551 info->rx_overflow = 0;
5552 info->rx_rcc_underrun = 0;
5553
5554} /* end of stop_receiver() */
5555
5556/* usc_start_receiver()
5557 *
5558 * Enable the USC receiver
5559 *
5560 * Arguments: info pointer to device instance data
5561 * Return Value: None
5562 */
5563static void usc_start_receiver( struct mgsl_struct *info )
5564{
5565 u32 phys_addr;
5566
5567 if (debug_level >= DEBUG_LEVEL_ISR)
5568 printk("%s(%d):usc_start_receiver(%s)\n",
5569 __FILE__,__LINE__, info->device_name );
5570
5571 mgsl_reset_rx_dma_buffers( info );
5572 usc_stop_receiver( info );
5573
5574 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5575 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5576
5577 if ( info->params.mode == MGSL_MODE_HDLC ||
5578 info->params.mode == MGSL_MODE_RAW ) {
5579 /* DMA mode Transfers */
5580 /* Program the DMA controller. */
5581 /* Enable the DMA controller end of buffer interrupt. */
5582
5583 /* program 16C32 with physical address of 1st DMA buffer entry */
5584 phys_addr = info->rx_buffer_list[0].phys_entry;
5585 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5586 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5587
5588 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5589 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5590 usc_EnableInterrupts( info, RECEIVE_STATUS );
5591
5592 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5593 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5594
5595 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5596 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5597 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5598 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5599 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5600 else
5601 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5602 } else {
5603 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5604 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5605 usc_EnableInterrupts(info, RECEIVE_DATA);
5606
5607 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5608 usc_RCmd( info, RCmd_EnterHuntmode );
5609
5610 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5611 }
5612
5613 usc_OutReg( info, CCSR, 0x1020 );
5614
5615 info->rx_enabled = 1;
5616
5617} /* end of usc_start_receiver() */
5618
5619/* usc_start_transmitter()
5620 *
5621 * Enable the USC transmitter and send a transmit frame if
5622 * one is loaded in the DMA buffers.
5623 *
5624 * Arguments: info pointer to device instance data
5625 * Return Value: None
5626 */
5627static void usc_start_transmitter( struct mgsl_struct *info )
5628{
5629 u32 phys_addr;
5630 unsigned int FrameSize;
5631
5632 if (debug_level >= DEBUG_LEVEL_ISR)
5633 printk("%s(%d):usc_start_transmitter(%s)\n",
5634 __FILE__,__LINE__, info->device_name );
5635
5636 if ( info->xmit_cnt ) {
5637
5638 /* If auto RTS enabled and RTS is inactive, then assert */
5639 /* RTS and set a flag indicating that the driver should */
5640 /* negate RTS when the transmission completes. */
5641
5642 info->drop_rts_on_tx_done = 0;
5643
5644 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5645 usc_get_serial_signals( info );
5646 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5647 info->serial_signals |= SerialSignal_RTS;
5648 usc_set_serial_signals( info );
5649 info->drop_rts_on_tx_done = 1;
5650 }
5651 }
5652
5653
5654 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5655 if ( !info->tx_active ) {
5656 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5657 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5658 usc_EnableInterrupts(info, TRANSMIT_DATA);
5659 usc_load_txfifo(info);
5660 }
5661 } else {
5662 /* Disable transmit DMA controller while programming. */
5663 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5664
5665 /* Transmit DMA buffer is loaded, so program USC */
5666 /* to send the frame contained in the buffers. */
5667
5668 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5669
5670 /* if operating in Raw sync mode, reset the rcc component
5671 * of the tx dma buffer entry, otherwise, the serial controller
5672 * will send a closing sync char after this count.
5673 */
5674 if ( info->params.mode == MGSL_MODE_RAW )
5675 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5676
5677 /* Program the Transmit Character Length Register (TCLR) */
5678 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5679 usc_OutReg( info, TCLR, (u16)FrameSize );
5680
5681 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5682
5683 /* Program the address of the 1st DMA Buffer Entry in linked list */
5684 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5685 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5686 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5687
5688 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5689 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5690 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5691
5692 if ( info->params.mode == MGSL_MODE_RAW &&
5693 info->num_tx_dma_buffers > 1 ) {
5694 /* When running external sync mode, attempt to 'stream' transmit */
5695 /* by filling tx dma buffers as they become available. To do this */
5696 /* we need to enable Tx DMA EOB Status interrupts : */
5697 /* */
5698 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5699 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5700
5701 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5702 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5703 }
5704
5705 /* Initialize Transmit DMA Channel */
5706 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5707
5708 usc_TCmd( info, TCmd_SendFrame );
5709
5710 info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
5711 add_timer(&info->tx_timer);
5712 }
5713 info->tx_active = 1;
5714 }
5715
5716 if ( !info->tx_enabled ) {
5717 info->tx_enabled = 1;
5718 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5719 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5720 else
5721 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5722 }
5723
5724} /* end of usc_start_transmitter() */
5725
5726/* usc_stop_transmitter()
5727 *
5728 * Stops the transmitter and DMA
5729 *
5730 * Arguments: info pointer to device isntance data
5731 * Return Value: None
5732 */
5733static void usc_stop_transmitter( struct mgsl_struct *info )
5734{
5735 if (debug_level >= DEBUG_LEVEL_ISR)
5736 printk("%s(%d):usc_stop_transmitter(%s)\n",
5737 __FILE__,__LINE__, info->device_name );
5738
5739 del_timer(&info->tx_timer);
5740
5741 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5742 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5743 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5744
5745 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5746 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5747 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5748
5749 info->tx_enabled = 0;
5750 info->tx_active = 0;
5751
5752} /* end of usc_stop_transmitter() */
5753
5754/* usc_load_txfifo()
5755 *
5756 * Fill the transmit FIFO until the FIFO is full or
5757 * there is no more data to load.
5758 *
5759 * Arguments: info pointer to device extension (instance data)
5760 * Return Value: None
5761 */
5762static void usc_load_txfifo( struct mgsl_struct *info )
5763{
5764 int Fifocount;
5765 u8 TwoBytes[2];
5766
5767 if ( !info->xmit_cnt && !info->x_char )
5768 return;
5769
5770 /* Select transmit FIFO status readback in TICR */
5771 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5772
5773 /* load the Transmit FIFO until FIFOs full or all data sent */
5774
5775 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5776 /* there is more space in the transmit FIFO and */
5777 /* there is more data in transmit buffer */
5778
5779 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5780 /* write a 16-bit word from transmit buffer to 16C32 */
5781
5782 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5783 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5784 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5785 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5786
5787 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5788
5789 info->xmit_cnt -= 2;
5790 info->icount.tx += 2;
5791 } else {
5792 /* only 1 byte left to transmit or 1 FIFO slot left */
5793
5794 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5795 info->io_base + CCAR );
5796
5797 if (info->x_char) {
5798 /* transmit pending high priority char */
5799 outw( info->x_char,info->io_base + CCAR );
5800 info->x_char = 0;
5801 } else {
5802 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5803 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5804 info->xmit_cnt--;
5805 }
5806 info->icount.tx++;
5807 }
5808 }
5809
5810} /* end of usc_load_txfifo() */
5811
5812/* usc_reset()
5813 *
5814 * Reset the adapter to a known state and prepare it for further use.
5815 *
5816 * Arguments: info pointer to device instance data
5817 * Return Value: None
5818 */
5819static void usc_reset( struct mgsl_struct *info )
5820{
5821 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5822 int i;
5823 u32 readval;
5824
5825 /* Set BIT30 of Misc Control Register */
5826 /* (Local Control Register 0x50) to force reset of USC. */
5827
5828 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5829 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5830
5831 info->misc_ctrl_value |= BIT30;
5832 *MiscCtrl = info->misc_ctrl_value;
5833
5834 /*
5835 * Force at least 170ns delay before clearing
5836 * reset bit. Each read from LCR takes at least
5837 * 30ns so 10 times for 300ns to be safe.
5838 */
5839 for(i=0;i<10;i++)
5840 readval = *MiscCtrl;
5841
5842 info->misc_ctrl_value &= ~BIT30;
5843 *MiscCtrl = info->misc_ctrl_value;
5844
5845 *LCR0BRDR = BUS_DESCRIPTOR(
5846 1, // Write Strobe Hold (0-3)
5847 2, // Write Strobe Delay (0-3)
5848 2, // Read Strobe Delay (0-3)
5849 0, // NWDD (Write data-data) (0-3)
5850 4, // NWAD (Write Addr-data) (0-31)
5851 0, // NXDA (Read/Write Data-Addr) (0-3)
5852 0, // NRDD (Read Data-Data) (0-3)
5853 5 // NRAD (Read Addr-Data) (0-31)
5854 );
5855 } else {
5856 /* do HW reset */
5857 outb( 0,info->io_base + 8 );
5858 }
5859
5860 info->mbre_bit = 0;
5861 info->loopback_bits = 0;
5862 info->usc_idle_mode = 0;
5863
5864 /*
5865 * Program the Bus Configuration Register (BCR)
5866 *
5867 * <15> 0 Don't use separate address
5868 * <14..6> 0 reserved
5869 * <5..4> 00 IAckmode = Default, don't care
5870 * <3> 1 Bus Request Totem Pole output
5871 * <2> 1 Use 16 Bit data bus
5872 * <1> 0 IRQ Totem Pole output
5873 * <0> 0 Don't Shift Right Addr
5874 *
5875 * 0000 0000 0000 1100 = 0x000c
5876 *
5877 * By writing to io_base + SDPIN the Wait/Ack pin is
5878 * programmed to work as a Wait pin.
5879 */
5880
5881 outw( 0x000c,info->io_base + SDPIN );
5882
5883
5884 outw( 0,info->io_base );
5885 outw( 0,info->io_base + CCAR );
5886
5887 /* select little endian byte ordering */
5888 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5889
5890
5891 /* Port Control Register (PCR)
5892 *
5893 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5894 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5895 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5896 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5897 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5898 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5899 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5900 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5901 *
5902 * 1111 0000 1111 0101 = 0xf0f5
5903 */
5904
5905 usc_OutReg( info, PCR, 0xf0f5 );
5906
5907
5908 /*
5909 * Input/Output Control Register
5910 *
5911 * <15..14> 00 CTS is active low input
5912 * <13..12> 00 DCD is active low input
5913 * <11..10> 00 TxREQ pin is input (DSR)
5914 * <9..8> 00 RxREQ pin is input (RI)
5915 * <7..6> 00 TxD is output (Transmit Data)
5916 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5917 * <2..0> 100 RxC is Output (drive with BRG0)
5918 *
5919 * 0000 0000 0000 0100 = 0x0004
5920 */
5921
5922 usc_OutReg( info, IOCR, 0x0004 );
5923
5924} /* end of usc_reset() */
5925
5926/* usc_set_async_mode()
5927 *
5928 * Program adapter for asynchronous communications.
5929 *
5930 * Arguments: info pointer to device instance data
5931 * Return Value: None
5932 */
5933static void usc_set_async_mode( struct mgsl_struct *info )
5934{
5935 u16 RegValue;
5936
5937 /* disable interrupts while programming USC */
5938 usc_DisableMasterIrqBit( info );
5939
5940 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5941 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5942
5943 usc_loopback_frame( info );
5944
5945 /* Channel mode Register (CMR)
5946 *
5947 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5948 * <13..12> 00 00 = 16X Clock
5949 * <11..8> 0000 Transmitter mode = Asynchronous
5950 * <7..6> 00 reserved?
5951 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5952 * <3..0> 0000 Receiver mode = Asynchronous
5953 *
5954 * 0000 0000 0000 0000 = 0x0
5955 */
5956
5957 RegValue = 0;
5958 if ( info->params.stop_bits != 1 )
5959 RegValue |= BIT14;
5960 usc_OutReg( info, CMR, RegValue );
5961
5962
5963 /* Receiver mode Register (RMR)
5964 *
5965 * <15..13> 000 encoding = None
5966 * <12..08> 00000 reserved (Sync Only)
5967 * <7..6> 00 Even parity
5968 * <5> 0 parity disabled
5969 * <4..2> 000 Receive Char Length = 8 bits
5970 * <1..0> 00 Disable Receiver
5971 *
5972 * 0000 0000 0000 0000 = 0x0
5973 */
5974
5975 RegValue = 0;
5976
5977 if ( info->params.data_bits != 8 )
5978 RegValue |= BIT4+BIT3+BIT2;
5979
5980 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5981 RegValue |= BIT5;
5982 if ( info->params.parity != ASYNC_PARITY_ODD )
5983 RegValue |= BIT6;
5984 }
5985
5986 usc_OutReg( info, RMR, RegValue );
5987
5988
5989 /* Set IRQ trigger level */
5990
5991 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5992
5993
5994 /* Receive Interrupt Control Register (RICR)
5995 *
5996 * <15..8> ? RxFIFO IRQ Request Level
5997 *
5998 * Note: For async mode the receive FIFO level must be set
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08005999 * to 0 to avoid the situation where the FIFO contains fewer bytes
Linus Torvalds1da177e2005-04-16 15:20:36 -07006000 * than the trigger level and no more data is expected.
6001 *
6002 * <7> 0 Exited Hunt IA (Interrupt Arm)
6003 * <6> 0 Idle Received IA
6004 * <5> 0 Break/Abort IA
6005 * <4> 0 Rx Bound IA
6006 * <3> 0 Queued status reflects oldest byte in FIFO
6007 * <2> 0 Abort/PE IA
6008 * <1> 0 Rx Overrun IA
6009 * <0> 0 Select TC0 value for readback
6010 *
6011 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6012 */
6013
6014 usc_OutReg( info, RICR, 0x0000 );
6015
6016 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6017 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6018
6019
6020 /* Transmit mode Register (TMR)
6021 *
6022 * <15..13> 000 encoding = None
6023 * <12..08> 00000 reserved (Sync Only)
6024 * <7..6> 00 Transmit parity Even
6025 * <5> 0 Transmit parity Disabled
6026 * <4..2> 000 Tx Char Length = 8 bits
6027 * <1..0> 00 Disable Transmitter
6028 *
6029 * 0000 0000 0000 0000 = 0x0
6030 */
6031
6032 RegValue = 0;
6033
6034 if ( info->params.data_bits != 8 )
6035 RegValue |= BIT4+BIT3+BIT2;
6036
6037 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6038 RegValue |= BIT5;
6039 if ( info->params.parity != ASYNC_PARITY_ODD )
6040 RegValue |= BIT6;
6041 }
6042
6043 usc_OutReg( info, TMR, RegValue );
6044
6045 usc_set_txidle( info );
6046
6047
6048 /* Set IRQ trigger level */
6049
6050 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6051
6052
6053 /* Transmit Interrupt Control Register (TICR)
6054 *
6055 * <15..8> ? Transmit FIFO IRQ Level
6056 * <7> 0 Present IA (Interrupt Arm)
6057 * <6> 1 Idle Sent IA
6058 * <5> 0 Abort Sent IA
6059 * <4> 0 EOF/EOM Sent IA
6060 * <3> 0 CRC Sent IA
6061 * <2> 0 1 = Wait for SW Trigger to Start Frame
6062 * <1> 0 Tx Underrun IA
6063 * <0> 0 TC0 constant on read back
6064 *
6065 * 0000 0000 0100 0000 = 0x0040
6066 */
6067
6068 usc_OutReg( info, TICR, 0x1f40 );
6069
6070 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6071 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6072
6073 usc_enable_async_clock( info, info->params.data_rate );
6074
6075
6076 /* Channel Control/status Register (CCSR)
6077 *
6078 * <15> X RCC FIFO Overflow status (RO)
6079 * <14> X RCC FIFO Not Empty status (RO)
6080 * <13> 0 1 = Clear RCC FIFO (WO)
6081 * <12> X DPLL in Sync status (RO)
6082 * <11> X DPLL 2 Missed Clocks status (RO)
6083 * <10> X DPLL 1 Missed Clock status (RO)
6084 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6085 * <7> X SDLC Loop On status (RO)
6086 * <6> X SDLC Loop Send status (RO)
6087 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6088 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6089 * <1..0> 00 reserved
6090 *
6091 * 0000 0000 0010 0000 = 0x0020
6092 */
6093
6094 usc_OutReg( info, CCSR, 0x0020 );
6095
6096 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6097 RECEIVE_DATA + RECEIVE_STATUS );
6098
6099 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6100 RECEIVE_DATA + RECEIVE_STATUS );
6101
6102 usc_EnableMasterIrqBit( info );
6103
6104 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6105 /* Enable INTEN (Port 6, Bit12) */
6106 /* This connects the IRQ request signal to the ISA bus */
6107 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6108 }
6109
Paul Fulghum7c1fff52005-09-09 13:02:14 -07006110 if (info->params.loopback) {
6111 info->loopback_bits = 0x300;
6112 outw(0x0300, info->io_base + CCAR);
6113 }
6114
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115} /* end of usc_set_async_mode() */
6116
6117/* usc_loopback_frame()
6118 *
6119 * Loop back a small (2 byte) dummy SDLC frame.
6120 * Interrupts and DMA are NOT used. The purpose of this is to
6121 * clear any 'stale' status info left over from running in async mode.
6122 *
6123 * The 16C32 shows the strange behaviour of marking the 1st
6124 * received SDLC frame with a CRC error even when there is no
6125 * CRC error. To get around this a small dummy from of 2 bytes
6126 * is looped back when switching from async to sync mode.
6127 *
6128 * Arguments: info pointer to device instance data
6129 * Return Value: None
6130 */
6131static void usc_loopback_frame( struct mgsl_struct *info )
6132{
6133 int i;
6134 unsigned long oldmode = info->params.mode;
6135
6136 info->params.mode = MGSL_MODE_HDLC;
6137
6138 usc_DisableMasterIrqBit( info );
6139
6140 usc_set_sdlc_mode( info );
6141 usc_enable_loopback( info, 1 );
6142
6143 /* Write 16-bit Time Constant for BRG0 */
6144 usc_OutReg( info, TC0R, 0 );
6145
6146 /* Channel Control Register (CCR)
6147 *
6148 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6149 * <13> 0 Trigger Tx on SW Command Disabled
6150 * <12> 0 Flag Preamble Disabled
6151 * <11..10> 00 Preamble Length = 8-Bits
6152 * <9..8> 01 Preamble Pattern = flags
6153 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6154 * <5> 0 Trigger Rx on SW Command Disabled
6155 * <4..0> 0 reserved
6156 *
6157 * 0000 0001 0000 0000 = 0x0100
6158 */
6159
6160 usc_OutReg( info, CCR, 0x0100 );
6161
6162 /* SETUP RECEIVER */
6163 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6164 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6165
6166 /* SETUP TRANSMITTER */
6167 /* Program the Transmit Character Length Register (TCLR) */
6168 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6169 usc_OutReg( info, TCLR, 2 );
6170 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6171
6172 /* unlatch Tx status bits, and start transmit channel. */
6173 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6174 outw(0,info->io_base + DATAREG);
6175
6176 /* ENABLE TRANSMITTER */
6177 usc_TCmd( info, TCmd_SendFrame );
6178 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6179
6180 /* WAIT FOR RECEIVE COMPLETE */
6181 for (i=0 ; i<1000 ; i++)
6182 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6183 break;
6184
6185 /* clear Internal Data loopback mode */
6186 usc_enable_loopback(info, 0);
6187
6188 usc_EnableMasterIrqBit(info);
6189
6190 info->params.mode = oldmode;
6191
6192} /* end of usc_loopback_frame() */
6193
6194/* usc_set_sync_mode() Programs the USC for SDLC communications.
6195 *
6196 * Arguments: info pointer to adapter info structure
6197 * Return Value: None
6198 */
6199static void usc_set_sync_mode( struct mgsl_struct *info )
6200{
6201 usc_loopback_frame( info );
6202 usc_set_sdlc_mode( info );
6203
6204 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6205 /* Enable INTEN (Port 6, Bit12) */
6206 /* This connects the IRQ request signal to the ISA bus */
6207 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6208 }
6209
6210 usc_enable_aux_clock(info, info->params.clock_speed);
6211
6212 if (info->params.loopback)
6213 usc_enable_loopback(info,1);
6214
6215} /* end of mgsl_set_sync_mode() */
6216
6217/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6218 *
6219 * Arguments: info pointer to device instance data
6220 * Return Value: None
6221 */
6222static void usc_set_txidle( struct mgsl_struct *info )
6223{
6224 u16 usc_idle_mode = IDLEMODE_FLAGS;
6225
6226 /* Map API idle mode to USC register bits */
6227
6228 switch( info->idle_mode ){
6229 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6230 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6231 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6232 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6233 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6234 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6235 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6236 }
6237
6238 info->usc_idle_mode = usc_idle_mode;
6239 //usc_OutReg(info, TCSR, usc_idle_mode);
6240 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6241 info->tcsr_value += usc_idle_mode;
6242 usc_OutReg(info, TCSR, info->tcsr_value);
6243
6244 /*
6245 * if SyncLink WAN adapter is running in external sync mode, the
6246 * transmitter has been set to Monosync in order to try to mimic
6247 * a true raw outbound bit stream. Monosync still sends an open/close
6248 * sync char at the start/end of a frame. Try to match those sync
6249 * patterns to the idle mode set here
6250 */
6251 if ( info->params.mode == MGSL_MODE_RAW ) {
6252 unsigned char syncpat = 0;
6253 switch( info->idle_mode ) {
6254 case HDLC_TXIDLE_FLAGS:
6255 syncpat = 0x7e;
6256 break;
6257 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6258 syncpat = 0x55;
6259 break;
6260 case HDLC_TXIDLE_ZEROS:
6261 case HDLC_TXIDLE_SPACE:
6262 syncpat = 0x00;
6263 break;
6264 case HDLC_TXIDLE_ONES:
6265 case HDLC_TXIDLE_MARK:
6266 syncpat = 0xff;
6267 break;
6268 case HDLC_TXIDLE_ALT_MARK_SPACE:
6269 syncpat = 0xaa;
6270 break;
6271 }
6272
6273 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6274 }
6275
6276} /* end of usc_set_txidle() */
6277
6278/* usc_get_serial_signals()
6279 *
6280 * Query the adapter for the state of the V24 status (input) signals.
6281 *
6282 * Arguments: info pointer to device instance data
6283 * Return Value: None
6284 */
6285static void usc_get_serial_signals( struct mgsl_struct *info )
6286{
6287 u16 status;
6288
6289 /* clear all serial signals except DTR and RTS */
6290 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6291
6292 /* Read the Misc Interrupt status Register (MISR) to get */
6293 /* the V24 status signals. */
6294
6295 status = usc_InReg( info, MISR );
6296
6297 /* set serial signal bits to reflect MISR */
6298
6299 if ( status & MISCSTATUS_CTS )
6300 info->serial_signals |= SerialSignal_CTS;
6301
6302 if ( status & MISCSTATUS_DCD )
6303 info->serial_signals |= SerialSignal_DCD;
6304
6305 if ( status & MISCSTATUS_RI )
6306 info->serial_signals |= SerialSignal_RI;
6307
6308 if ( status & MISCSTATUS_DSR )
6309 info->serial_signals |= SerialSignal_DSR;
6310
6311} /* end of usc_get_serial_signals() */
6312
6313/* usc_set_serial_signals()
6314 *
6315 * Set the state of DTR and RTS based on contents of
6316 * serial_signals member of device extension.
6317 *
6318 * Arguments: info pointer to device instance data
6319 * Return Value: None
6320 */
6321static void usc_set_serial_signals( struct mgsl_struct *info )
6322{
6323 u16 Control;
6324 unsigned char V24Out = info->serial_signals;
6325
6326 /* get the current value of the Port Control Register (PCR) */
6327
6328 Control = usc_InReg( info, PCR );
6329
6330 if ( V24Out & SerialSignal_RTS )
6331 Control &= ~(BIT6);
6332 else
6333 Control |= BIT6;
6334
6335 if ( V24Out & SerialSignal_DTR )
6336 Control &= ~(BIT4);
6337 else
6338 Control |= BIT4;
6339
6340 usc_OutReg( info, PCR, Control );
6341
6342} /* end of usc_set_serial_signals() */
6343
6344/* usc_enable_async_clock()
6345 *
6346 * Enable the async clock at the specified frequency.
6347 *
6348 * Arguments: info pointer to device instance data
6349 * data_rate data rate of clock in bps
6350 * 0 disables the AUX clock.
6351 * Return Value: None
6352 */
6353static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6354{
6355 if ( data_rate ) {
6356 /*
6357 * Clock mode Control Register (CMCR)
6358 *
6359 * <15..14> 00 counter 1 Disabled
6360 * <13..12> 00 counter 0 Disabled
6361 * <11..10> 11 BRG1 Input is TxC Pin
6362 * <9..8> 11 BRG0 Input is TxC Pin
6363 * <7..6> 01 DPLL Input is BRG1 Output
6364 * <5..3> 100 TxCLK comes from BRG0
6365 * <2..0> 100 RxCLK comes from BRG0
6366 *
6367 * 0000 1111 0110 0100 = 0x0f64
6368 */
6369
6370 usc_OutReg( info, CMCR, 0x0f64 );
6371
6372
6373 /*
6374 * Write 16-bit Time Constant for BRG0
6375 * Time Constant = (ClkSpeed / data_rate) - 1
6376 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6377 */
6378
6379 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6380 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6381 else
6382 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6383
6384
6385 /*
6386 * Hardware Configuration Register (HCR)
6387 * Clear Bit 1, BRG0 mode = Continuous
6388 * Set Bit 0 to enable BRG0.
6389 */
6390
6391 usc_OutReg( info, HCR,
6392 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6393
6394
6395 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6396
6397 usc_OutReg( info, IOCR,
6398 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6399 } else {
6400 /* data rate == 0 so turn off BRG0 */
6401 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6402 }
6403
6404} /* end of usc_enable_async_clock() */
6405
6406/*
6407 * Buffer Structures:
6408 *
6409 * Normal memory access uses virtual addresses that can make discontiguous
6410 * physical memory pages appear to be contiguous in the virtual address
6411 * space (the processors memory mapping handles the conversions).
6412 *
6413 * DMA transfers require physically contiguous memory. This is because
6414 * the DMA system controller and DMA bus masters deal with memory using
6415 * only physical addresses.
6416 *
6417 * This causes a problem under Windows NT when large DMA buffers are
6418 * needed. Fragmentation of the nonpaged pool prevents allocations of
6419 * physically contiguous buffers larger than the PAGE_SIZE.
6420 *
6421 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6422 * allows DMA transfers to physically discontiguous buffers. Information
6423 * about each data transfer buffer is contained in a memory structure
6424 * called a 'buffer entry'. A list of buffer entries is maintained
6425 * to track and control the use of the data transfer buffers.
6426 *
6427 * To support this strategy we will allocate sufficient PAGE_SIZE
6428 * contiguous memory buffers to allow for the total required buffer
6429 * space.
6430 *
6431 * The 16C32 accesses the list of buffer entries using Bus Master
6432 * DMA. Control information is read from the buffer entries by the
6433 * 16C32 to control data transfers. status information is written to
6434 * the buffer entries by the 16C32 to indicate the status of completed
6435 * transfers.
6436 *
6437 * The CPU writes control information to the buffer entries to control
6438 * the 16C32 and reads status information from the buffer entries to
6439 * determine information about received and transmitted frames.
6440 *
6441 * Because the CPU and 16C32 (adapter) both need simultaneous access
6442 * to the buffer entries, the buffer entry memory is allocated with
6443 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6444 * entry list to PAGE_SIZE.
6445 *
6446 * The actual data buffers on the other hand will only be accessed
6447 * by the CPU or the adapter but not by both simultaneously. This allows
6448 * Scatter/Gather packet based DMA procedures for using physically
6449 * discontiguous pages.
6450 */
6451
6452/*
6453 * mgsl_reset_tx_dma_buffers()
6454 *
6455 * Set the count for all transmit buffers to 0 to indicate the
6456 * buffer is available for use and set the current buffer to the
6457 * first buffer. This effectively makes all buffers free and
6458 * discards any data in buffers.
6459 *
6460 * Arguments: info pointer to device instance data
6461 * Return Value: None
6462 */
6463static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6464{
6465 unsigned int i;
6466
6467 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6468 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6469 }
6470
6471 info->current_tx_buffer = 0;
6472 info->start_tx_dma_buffer = 0;
6473 info->tx_dma_buffers_used = 0;
6474
6475 info->get_tx_holding_index = 0;
6476 info->put_tx_holding_index = 0;
6477 info->tx_holding_count = 0;
6478
6479} /* end of mgsl_reset_tx_dma_buffers() */
6480
6481/*
6482 * num_free_tx_dma_buffers()
6483 *
6484 * returns the number of free tx dma buffers available
6485 *
6486 * Arguments: info pointer to device instance data
6487 * Return Value: number of free tx dma buffers
6488 */
6489static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6490{
6491 return info->tx_buffer_count - info->tx_dma_buffers_used;
6492}
6493
6494/*
6495 * mgsl_reset_rx_dma_buffers()
6496 *
6497 * Set the count for all receive buffers to DMABUFFERSIZE
6498 * and set the current buffer to the first buffer. This effectively
6499 * makes all buffers free and discards any data in buffers.
6500 *
6501 * Arguments: info pointer to device instance data
6502 * Return Value: None
6503 */
6504static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6505{
6506 unsigned int i;
6507
6508 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6509 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6510// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6511// info->rx_buffer_list[i].status = 0;
6512 }
6513
6514 info->current_rx_buffer = 0;
6515
6516} /* end of mgsl_reset_rx_dma_buffers() */
6517
6518/*
6519 * mgsl_free_rx_frame_buffers()
6520 *
6521 * Free the receive buffers used by a received SDLC
6522 * frame such that the buffers can be reused.
6523 *
6524 * Arguments:
6525 *
6526 * info pointer to device instance data
6527 * StartIndex index of 1st receive buffer of frame
6528 * EndIndex index of last receive buffer of frame
6529 *
6530 * Return Value: None
6531 */
6532static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6533{
6534 int Done = 0;
6535 DMABUFFERENTRY *pBufEntry;
6536 unsigned int Index;
6537
6538 /* Starting with 1st buffer entry of the frame clear the status */
6539 /* field and set the count field to DMA Buffer Size. */
6540
6541 Index = StartIndex;
6542
6543 while( !Done ) {
6544 pBufEntry = &(info->rx_buffer_list[Index]);
6545
6546 if ( Index == EndIndex ) {
6547 /* This is the last buffer of the frame! */
6548 Done = 1;
6549 }
6550
6551 /* reset current buffer for reuse */
6552// pBufEntry->status = 0;
6553// pBufEntry->count = DMABUFFERSIZE;
6554 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6555
6556 /* advance to next buffer entry in linked list */
6557 Index++;
6558 if ( Index == info->rx_buffer_count )
6559 Index = 0;
6560 }
6561
6562 /* set current buffer to next buffer after last buffer of frame */
6563 info->current_rx_buffer = Index;
6564
6565} /* end of free_rx_frame_buffers() */
6566
6567/* mgsl_get_rx_frame()
6568 *
6569 * This function attempts to return a received SDLC frame from the
6570 * receive DMA buffers. Only frames received without errors are returned.
6571 *
6572 * Arguments: info pointer to device extension
6573 * Return Value: 1 if frame returned, otherwise 0
6574 */
6575static int mgsl_get_rx_frame(struct mgsl_struct *info)
6576{
6577 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6578 unsigned short status;
6579 DMABUFFERENTRY *pBufEntry;
6580 unsigned int framesize = 0;
6581 int ReturnCode = 0;
6582 unsigned long flags;
6583 struct tty_struct *tty = info->tty;
6584 int return_frame = 0;
6585
6586 /*
6587 * current_rx_buffer points to the 1st buffer of the next available
6588 * receive frame. To find the last buffer of the frame look for
6589 * a non-zero status field in the buffer entries. (The status
6590 * field is set by the 16C32 after completing a receive frame.
6591 */
6592
6593 StartIndex = EndIndex = info->current_rx_buffer;
6594
6595 while( !info->rx_buffer_list[EndIndex].status ) {
6596 /*
6597 * If the count field of the buffer entry is non-zero then
6598 * this buffer has not been used. (The 16C32 clears the count
6599 * field when it starts using the buffer.) If an unused buffer
6600 * is encountered then there are no frames available.
6601 */
6602
6603 if ( info->rx_buffer_list[EndIndex].count )
6604 goto Cleanup;
6605
6606 /* advance to next buffer entry in linked list */
6607 EndIndex++;
6608 if ( EndIndex == info->rx_buffer_count )
6609 EndIndex = 0;
6610
6611 /* if entire list searched then no frame available */
6612 if ( EndIndex == StartIndex ) {
6613 /* If this occurs then something bad happened,
6614 * all buffers have been 'used' but none mark
6615 * the end of a frame. Reset buffers and receiver.
6616 */
6617
6618 if ( info->rx_enabled ){
6619 spin_lock_irqsave(&info->irq_spinlock,flags);
6620 usc_start_receiver(info);
6621 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6622 }
6623 goto Cleanup;
6624 }
6625 }
6626
6627
6628 /* check status of receive frame */
6629
6630 status = info->rx_buffer_list[EndIndex].status;
6631
6632 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6633 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6634 if ( status & RXSTATUS_SHORT_FRAME )
6635 info->icount.rxshort++;
6636 else if ( status & RXSTATUS_ABORT )
6637 info->icount.rxabort++;
6638 else if ( status & RXSTATUS_OVERRUN )
6639 info->icount.rxover++;
6640 else {
6641 info->icount.rxcrc++;
6642 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6643 return_frame = 1;
6644 }
6645 framesize = 0;
6646#ifdef CONFIG_HDLC
6647 {
6648 struct net_device_stats *stats = hdlc_stats(info->netdev);
6649 stats->rx_errors++;
6650 stats->rx_frame_errors++;
6651 }
6652#endif
6653 } else
6654 return_frame = 1;
6655
6656 if ( return_frame ) {
6657 /* receive frame has no errors, get frame size.
6658 * The frame size is the starting value of the RCC (which was
6659 * set to 0xffff) minus the ending value of the RCC (decremented
6660 * once for each receive character) minus 2 for the 16-bit CRC.
6661 */
6662
6663 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6664
6665 /* adjust frame size for CRC if any */
6666 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6667 framesize -= 2;
6668 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6669 framesize -= 4;
6670 }
6671
6672 if ( debug_level >= DEBUG_LEVEL_BH )
6673 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6674 __FILE__,__LINE__,info->device_name,status,framesize);
6675
6676 if ( debug_level >= DEBUG_LEVEL_DATA )
6677 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6678 min_t(int, framesize, DMABUFFERSIZE),0);
6679
6680 if (framesize) {
6681 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6682 ((framesize+1) > info->max_frame_size) ) ||
6683 (framesize > info->max_frame_size) )
6684 info->icount.rxlong++;
6685 else {
6686 /* copy dma buffer(s) to contiguous intermediate buffer */
6687 int copy_count = framesize;
6688 int index = StartIndex;
6689 unsigned char *ptmp = info->intermediate_rxbuffer;
6690
6691 if ( !(status & RXSTATUS_CRC_ERROR))
6692 info->icount.rxok++;
6693
6694 while(copy_count) {
6695 int partial_count;
6696 if ( copy_count > DMABUFFERSIZE )
6697 partial_count = DMABUFFERSIZE;
6698 else
6699 partial_count = copy_count;
6700
6701 pBufEntry = &(info->rx_buffer_list[index]);
6702 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6703 ptmp += partial_count;
6704 copy_count -= partial_count;
6705
6706 if ( ++index == info->rx_buffer_count )
6707 index = 0;
6708 }
6709
6710 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6711 ++framesize;
6712 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6713 RX_CRC_ERROR :
6714 RX_OK);
6715
6716 if ( debug_level >= DEBUG_LEVEL_DATA )
6717 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6718 __FILE__,__LINE__,info->device_name,
6719 *ptmp);
6720 }
6721
6722#ifdef CONFIG_HDLC
6723 if (info->netcount)
6724 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6725 else
6726#endif
6727 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6728 }
6729 }
6730 /* Free the buffers used by this frame. */
6731 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6732
6733 ReturnCode = 1;
6734
6735Cleanup:
6736
6737 if ( info->rx_enabled && info->rx_overflow ) {
6738 /* The receiver needs to restarted because of
6739 * a receive overflow (buffer or FIFO). If the
6740 * receive buffers are now empty, then restart receiver.
6741 */
6742
6743 if ( !info->rx_buffer_list[EndIndex].status &&
6744 info->rx_buffer_list[EndIndex].count ) {
6745 spin_lock_irqsave(&info->irq_spinlock,flags);
6746 usc_start_receiver(info);
6747 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6748 }
6749 }
6750
6751 return ReturnCode;
6752
6753} /* end of mgsl_get_rx_frame() */
6754
6755/* mgsl_get_raw_rx_frame()
6756 *
6757 * This function attempts to return a received frame from the
6758 * receive DMA buffers when running in external loop mode. In this mode,
6759 * we will return at most one DMABUFFERSIZE frame to the application.
6760 * The USC receiver is triggering off of DCD going active to start a new
6761 * frame, and DCD going inactive to terminate the frame (similar to
6762 * processing a closing flag character).
6763 *
6764 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6765 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6766 * status field and the RCC field will indicate the length of the
6767 * entire received frame. We take this RCC field and get the modulus
6768 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6769 * last Rx DMA buffer and return that last portion of the frame.
6770 *
6771 * Arguments: info pointer to device extension
6772 * Return Value: 1 if frame returned, otherwise 0
6773 */
6774static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6775{
6776 unsigned int CurrentIndex, NextIndex;
6777 unsigned short status;
6778 DMABUFFERENTRY *pBufEntry;
6779 unsigned int framesize = 0;
6780 int ReturnCode = 0;
6781 unsigned long flags;
6782 struct tty_struct *tty = info->tty;
6783
6784 /*
6785 * current_rx_buffer points to the 1st buffer of the next available
6786 * receive frame. The status field is set by the 16C32 after
6787 * completing a receive frame. If the status field of this buffer
6788 * is zero, either the USC is still filling this buffer or this
6789 * is one of a series of buffers making up a received frame.
6790 *
6791 * If the count field of this buffer is zero, the USC is either
6792 * using this buffer or has used this buffer. Look at the count
6793 * field of the next buffer. If that next buffer's count is
6794 * non-zero, the USC is still actively using the current buffer.
6795 * Otherwise, if the next buffer's count field is zero, the
6796 * current buffer is complete and the USC is using the next
6797 * buffer.
6798 */
6799 CurrentIndex = NextIndex = info->current_rx_buffer;
6800 ++NextIndex;
6801 if ( NextIndex == info->rx_buffer_count )
6802 NextIndex = 0;
6803
6804 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6805 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6806 info->rx_buffer_list[NextIndex].count == 0)) {
6807 /*
6808 * Either the status field of this dma buffer is non-zero
6809 * (indicating the last buffer of a receive frame) or the next
6810 * buffer is marked as in use -- implying this buffer is complete
6811 * and an intermediate buffer for this received frame.
6812 */
6813
6814 status = info->rx_buffer_list[CurrentIndex].status;
6815
6816 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6817 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6818 if ( status & RXSTATUS_SHORT_FRAME )
6819 info->icount.rxshort++;
6820 else if ( status & RXSTATUS_ABORT )
6821 info->icount.rxabort++;
6822 else if ( status & RXSTATUS_OVERRUN )
6823 info->icount.rxover++;
6824 else
6825 info->icount.rxcrc++;
6826 framesize = 0;
6827 } else {
6828 /*
6829 * A receive frame is available, get frame size and status.
6830 *
6831 * The frame size is the starting value of the RCC (which was
6832 * set to 0xffff) minus the ending value of the RCC (decremented
6833 * once for each receive character) minus 2 or 4 for the 16-bit
6834 * or 32-bit CRC.
6835 *
6836 * If the status field is zero, this is an intermediate buffer.
6837 * It's size is 4K.
6838 *
6839 * If the DMA Buffer Entry's Status field is non-zero, the
6840 * receive operation completed normally (ie: DCD dropped). The
6841 * RCC field is valid and holds the received frame size.
6842 * It is possible that the RCC field will be zero on a DMA buffer
6843 * entry with a non-zero status. This can occur if the total
6844 * frame size (number of bytes between the time DCD goes active
6845 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6846 * case the 16C32 has underrun on the RCC count and appears to
6847 * stop updating this counter to let us know the actual received
6848 * frame size. If this happens (non-zero status and zero RCC),
6849 * simply return the entire RxDMA Buffer
6850 */
6851 if ( status ) {
6852 /*
6853 * In the event that the final RxDMA Buffer is
6854 * terminated with a non-zero status and the RCC
6855 * field is zero, we interpret this as the RCC
6856 * having underflowed (received frame > 65535 bytes).
6857 *
6858 * Signal the event to the user by passing back
6859 * a status of RxStatus_CrcError returning the full
6860 * buffer and let the app figure out what data is
6861 * actually valid
6862 */
6863 if ( info->rx_buffer_list[CurrentIndex].rcc )
6864 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6865 else
6866 framesize = DMABUFFERSIZE;
6867 }
6868 else
6869 framesize = DMABUFFERSIZE;
6870 }
6871
6872 if ( framesize > DMABUFFERSIZE ) {
6873 /*
6874 * if running in raw sync mode, ISR handler for
6875 * End Of Buffer events terminates all buffers at 4K.
6876 * If this frame size is said to be >4K, get the
6877 * actual number of bytes of the frame in this buffer.
6878 */
6879 framesize = framesize % DMABUFFERSIZE;
6880 }
6881
6882
6883 if ( debug_level >= DEBUG_LEVEL_BH )
6884 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6885 __FILE__,__LINE__,info->device_name,status,framesize);
6886
6887 if ( debug_level >= DEBUG_LEVEL_DATA )
6888 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6889 min_t(int, framesize, DMABUFFERSIZE),0);
6890
6891 if (framesize) {
6892 /* copy dma buffer(s) to contiguous intermediate buffer */
6893 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6894
6895 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6896 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6897 info->icount.rxok++;
6898
6899 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6900 }
6901
6902 /* Free the buffers used by this frame. */
6903 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6904
6905 ReturnCode = 1;
6906 }
6907
6908
6909 if ( info->rx_enabled && info->rx_overflow ) {
6910 /* The receiver needs to restarted because of
6911 * a receive overflow (buffer or FIFO). If the
6912 * receive buffers are now empty, then restart receiver.
6913 */
6914
6915 if ( !info->rx_buffer_list[CurrentIndex].status &&
6916 info->rx_buffer_list[CurrentIndex].count ) {
6917 spin_lock_irqsave(&info->irq_spinlock,flags);
6918 usc_start_receiver(info);
6919 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6920 }
6921 }
6922
6923 return ReturnCode;
6924
6925} /* end of mgsl_get_raw_rx_frame() */
6926
6927/* mgsl_load_tx_dma_buffer()
6928 *
6929 * Load the transmit DMA buffer with the specified data.
6930 *
6931 * Arguments:
6932 *
6933 * info pointer to device extension
6934 * Buffer pointer to buffer containing frame to load
6935 * BufferSize size in bytes of frame in Buffer
6936 *
6937 * Return Value: None
6938 */
6939static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6940 const char *Buffer, unsigned int BufferSize)
6941{
6942 unsigned short Copycount;
6943 unsigned int i = 0;
6944 DMABUFFERENTRY *pBufEntry;
6945
6946 if ( debug_level >= DEBUG_LEVEL_DATA )
6947 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6948
6949 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6950 /* set CMR:13 to start transmit when
6951 * next GoAhead (abort) is received
6952 */
6953 info->cmr_value |= BIT13;
6954 }
6955
6956 /* begin loading the frame in the next available tx dma
6957 * buffer, remember it's starting location for setting
6958 * up tx dma operation
6959 */
6960 i = info->current_tx_buffer;
6961 info->start_tx_dma_buffer = i;
6962
6963 /* Setup the status and RCC (Frame Size) fields of the 1st */
6964 /* buffer entry in the transmit DMA buffer list. */
6965
6966 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6967 info->tx_buffer_list[i].rcc = BufferSize;
6968 info->tx_buffer_list[i].count = BufferSize;
6969
6970 /* Copy frame data from 1st source buffer to the DMA buffers. */
6971 /* The frame data may span multiple DMA buffers. */
6972
6973 while( BufferSize ){
6974 /* Get a pointer to next DMA buffer entry. */
6975 pBufEntry = &info->tx_buffer_list[i++];
6976
6977 if ( i == info->tx_buffer_count )
6978 i=0;
6979
6980 /* Calculate the number of bytes that can be copied from */
6981 /* the source buffer to this DMA buffer. */
6982 if ( BufferSize > DMABUFFERSIZE )
6983 Copycount = DMABUFFERSIZE;
6984 else
6985 Copycount = BufferSize;
6986
6987 /* Actually copy data from source buffer to DMA buffer. */
6988 /* Also set the data count for this individual DMA buffer. */
6989 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6990 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6991 else
6992 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6993
6994 pBufEntry->count = Copycount;
6995
6996 /* Advance source pointer and reduce remaining data count. */
6997 Buffer += Copycount;
6998 BufferSize -= Copycount;
6999
7000 ++info->tx_dma_buffers_used;
7001 }
7002
7003 /* remember next available tx dma buffer */
7004 info->current_tx_buffer = i;
7005
7006} /* end of mgsl_load_tx_dma_buffer() */
7007
7008/*
7009 * mgsl_register_test()
7010 *
7011 * Performs a register test of the 16C32.
7012 *
7013 * Arguments: info pointer to device instance data
7014 * Return Value: TRUE if test passed, otherwise FALSE
7015 */
7016static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7017{
7018 static unsigned short BitPatterns[] =
7019 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
Tobias Klauserfe971072006-01-09 20:54:02 -08007020 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021 unsigned int i;
7022 BOOLEAN rc = TRUE;
7023 unsigned long flags;
7024
7025 spin_lock_irqsave(&info->irq_spinlock,flags);
7026 usc_reset(info);
7027
7028 /* Verify the reset state of some registers. */
7029
7030 if ( (usc_InReg( info, SICR ) != 0) ||
7031 (usc_InReg( info, IVR ) != 0) ||
7032 (usc_InDmaReg( info, DIVR ) != 0) ){
7033 rc = FALSE;
7034 }
7035
7036 if ( rc == TRUE ){
7037 /* Write bit patterns to various registers but do it out of */
7038 /* sync, then read back and verify values. */
7039
7040 for ( i = 0 ; i < Patterncount ; i++ ) {
7041 usc_OutReg( info, TC0R, BitPatterns[i] );
7042 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7043 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7044 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7045 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7046 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7047
7048 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7049 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7050 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7051 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7052 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7053 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7054 rc = FALSE;
7055 break;
7056 }
7057 }
7058 }
7059
7060 usc_reset(info);
7061 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7062
7063 return rc;
7064
7065} /* end of mgsl_register_test() */
7066
7067/* mgsl_irq_test() Perform interrupt test of the 16C32.
7068 *
7069 * Arguments: info pointer to device instance data
7070 * Return Value: TRUE if test passed, otherwise FALSE
7071 */
7072static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7073{
7074 unsigned long EndTime;
7075 unsigned long flags;
7076
7077 spin_lock_irqsave(&info->irq_spinlock,flags);
7078 usc_reset(info);
7079
7080 /*
7081 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7082 * The ISR sets irq_occurred to 1.
7083 */
7084
7085 info->irq_occurred = FALSE;
7086
7087 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7088 /* Enable INTEN (Port 6, Bit12) */
7089 /* This connects the IRQ request signal to the ISA bus */
7090 /* on the ISA adapter. This has no effect for the PCI adapter */
7091 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7092
7093 usc_EnableMasterIrqBit(info);
7094 usc_EnableInterrupts(info, IO_PIN);
7095 usc_ClearIrqPendingBits(info, IO_PIN);
7096
7097 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7098 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7099
7100 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7101
7102 EndTime=100;
7103 while( EndTime-- && !info->irq_occurred ) {
7104 msleep_interruptible(10);
7105 }
7106
7107 spin_lock_irqsave(&info->irq_spinlock,flags);
7108 usc_reset(info);
7109 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7110
7111 if ( !info->irq_occurred )
7112 return FALSE;
7113 else
7114 return TRUE;
7115
7116} /* end of mgsl_irq_test() */
7117
7118/* mgsl_dma_test()
7119 *
7120 * Perform a DMA test of the 16C32. A small frame is
7121 * transmitted via DMA from a transmit buffer to a receive buffer
7122 * using single buffer DMA mode.
7123 *
7124 * Arguments: info pointer to device instance data
7125 * Return Value: TRUE if test passed, otherwise FALSE
7126 */
7127static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7128{
7129 unsigned short FifoLevel;
7130 unsigned long phys_addr;
7131 unsigned int FrameSize;
7132 unsigned int i;
7133 char *TmpPtr;
7134 BOOLEAN rc = TRUE;
7135 unsigned short status=0;
7136 unsigned long EndTime;
7137 unsigned long flags;
7138 MGSL_PARAMS tmp_params;
7139
7140 /* save current port options */
7141 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7142 /* load default port options */
7143 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7144
7145#define TESTFRAMESIZE 40
7146
7147 spin_lock_irqsave(&info->irq_spinlock,flags);
7148
7149 /* setup 16C32 for SDLC DMA transfer mode */
7150
7151 usc_reset(info);
7152 usc_set_sdlc_mode(info);
7153 usc_enable_loopback(info,1);
7154
7155 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7156 * field of the buffer entry after fetching buffer address. This
7157 * way we can detect a DMA failure for a DMA read (which should be
7158 * non-destructive to system memory) before we try and write to
7159 * memory (where a failure could corrupt system memory).
7160 */
7161
7162 /* Receive DMA mode Register (RDMR)
7163 *
7164 * <15..14> 11 DMA mode = Linked List Buffer mode
7165 * <13> 1 RSBinA/L = store Rx status Block in List entry
7166 * <12> 0 1 = Clear count of List Entry after fetching
7167 * <11..10> 00 Address mode = Increment
7168 * <9> 1 Terminate Buffer on RxBound
7169 * <8> 0 Bus Width = 16bits
7170 * <7..0> ? status Bits (write as 0s)
7171 *
7172 * 1110 0010 0000 0000 = 0xe200
7173 */
7174
7175 usc_OutDmaReg( info, RDMR, 0xe200 );
7176
7177 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7178
7179
7180 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7181
7182 FrameSize = TESTFRAMESIZE;
7183
7184 /* setup 1st transmit buffer entry: */
7185 /* with frame size and transmit control word */
7186
7187 info->tx_buffer_list[0].count = FrameSize;
7188 info->tx_buffer_list[0].rcc = FrameSize;
7189 info->tx_buffer_list[0].status = 0x4000;
7190
7191 /* build a transmit frame in 1st transmit DMA buffer */
7192
7193 TmpPtr = info->tx_buffer_list[0].virt_addr;
7194 for (i = 0; i < FrameSize; i++ )
7195 *TmpPtr++ = i;
7196
7197 /* setup 1st receive buffer entry: */
7198 /* clear status, set max receive buffer size */
7199
7200 info->rx_buffer_list[0].status = 0;
7201 info->rx_buffer_list[0].count = FrameSize + 4;
7202
7203 /* zero out the 1st receive buffer */
7204
7205 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7206
7207 /* Set count field of next buffer entries to prevent */
7208 /* 16C32 from using buffers after the 1st one. */
7209
7210 info->tx_buffer_list[1].count = 0;
7211 info->rx_buffer_list[1].count = 0;
7212
7213
7214 /***************************/
7215 /* Program 16C32 receiver. */
7216 /***************************/
7217
7218 spin_lock_irqsave(&info->irq_spinlock,flags);
7219
7220 /* setup DMA transfers */
7221 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7222
7223 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7224 phys_addr = info->rx_buffer_list[0].phys_entry;
7225 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7226 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7227
7228 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7229 usc_InDmaReg( info, RDMR );
7230 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7231
7232 /* Enable Receiver (RMR <1..0> = 10) */
7233 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7234
7235 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7236
7237
7238 /*************************************************************/
7239 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7240 /*************************************************************/
7241
7242 /* Wait 100ms for interrupt. */
7243 EndTime = jiffies + msecs_to_jiffies(100);
7244
7245 for(;;) {
7246 if (time_after(jiffies, EndTime)) {
7247 rc = FALSE;
7248 break;
7249 }
7250
7251 spin_lock_irqsave(&info->irq_spinlock,flags);
7252 status = usc_InDmaReg( info, RDMR );
7253 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7254
7255 if ( !(status & BIT4) && (status & BIT5) ) {
7256 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7257 /* BUSY (BIT 5) is active (channel still active). */
7258 /* This means the buffer entry read has completed. */
7259 break;
7260 }
7261 }
7262
7263
7264 /******************************/
7265 /* Program 16C32 transmitter. */
7266 /******************************/
7267
7268 spin_lock_irqsave(&info->irq_spinlock,flags);
7269
7270 /* Program the Transmit Character Length Register (TCLR) */
7271 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7272
7273 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7274 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7275
7276 /* Program the address of the 1st DMA Buffer Entry in linked list */
7277
7278 phys_addr = info->tx_buffer_list[0].phys_entry;
7279 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7280 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7281
7282 /* unlatch Tx status bits, and start transmit channel. */
7283
7284 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7285 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7286
7287 /* wait for DMA controller to fill transmit FIFO */
7288
7289 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7290
7291 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7292
7293
7294 /**********************************/
7295 /* WAIT FOR TRANSMIT FIFO TO FILL */
7296 /**********************************/
7297
7298 /* Wait 100ms */
7299 EndTime = jiffies + msecs_to_jiffies(100);
7300
7301 for(;;) {
7302 if (time_after(jiffies, EndTime)) {
7303 rc = FALSE;
7304 break;
7305 }
7306
7307 spin_lock_irqsave(&info->irq_spinlock,flags);
7308 FifoLevel = usc_InReg(info, TICR) >> 8;
7309 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7310
7311 if ( FifoLevel < 16 )
7312 break;
7313 else
7314 if ( FrameSize < 32 ) {
7315 /* This frame is smaller than the entire transmit FIFO */
7316 /* so wait for the entire frame to be loaded. */
7317 if ( FifoLevel <= (32 - FrameSize) )
7318 break;
7319 }
7320 }
7321
7322
7323 if ( rc == TRUE )
7324 {
7325 /* Enable 16C32 transmitter. */
7326
7327 spin_lock_irqsave(&info->irq_spinlock,flags);
7328
7329 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7330 usc_TCmd( info, TCmd_SendFrame );
7331 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7332
7333 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7334
7335
7336 /******************************/
7337 /* WAIT FOR TRANSMIT COMPLETE */
7338 /******************************/
7339
7340 /* Wait 100ms */
7341 EndTime = jiffies + msecs_to_jiffies(100);
7342
7343 /* While timer not expired wait for transmit complete */
7344
7345 spin_lock_irqsave(&info->irq_spinlock,flags);
7346 status = usc_InReg( info, TCSR );
7347 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7348
7349 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7350 if (time_after(jiffies, EndTime)) {
7351 rc = FALSE;
7352 break;
7353 }
7354
7355 spin_lock_irqsave(&info->irq_spinlock,flags);
7356 status = usc_InReg( info, TCSR );
7357 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7358 }
7359 }
7360
7361
7362 if ( rc == TRUE ){
7363 /* CHECK FOR TRANSMIT ERRORS */
7364 if ( status & (BIT5 + BIT1) )
7365 rc = FALSE;
7366 }
7367
7368 if ( rc == TRUE ) {
7369 /* WAIT FOR RECEIVE COMPLETE */
7370
7371 /* Wait 100ms */
7372 EndTime = jiffies + msecs_to_jiffies(100);
7373
7374 /* Wait for 16C32 to write receive status to buffer entry. */
7375 status=info->rx_buffer_list[0].status;
7376 while ( status == 0 ) {
7377 if (time_after(jiffies, EndTime)) {
7378 rc = FALSE;
7379 break;
7380 }
7381 status=info->rx_buffer_list[0].status;
7382 }
7383 }
7384
7385
7386 if ( rc == TRUE ) {
7387 /* CHECK FOR RECEIVE ERRORS */
7388 status = info->rx_buffer_list[0].status;
7389
7390 if ( status & (BIT8 + BIT3 + BIT1) ) {
7391 /* receive error has occurred */
7392 rc = FALSE;
7393 } else {
7394 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7395 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7396 rc = FALSE;
7397 }
7398 }
7399 }
7400
7401 spin_lock_irqsave(&info->irq_spinlock,flags);
7402 usc_reset( info );
7403 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7404
7405 /* restore current port options */
7406 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7407
7408 return rc;
7409
7410} /* end of mgsl_dma_test() */
7411
7412/* mgsl_adapter_test()
7413 *
7414 * Perform the register, IRQ, and DMA tests for the 16C32.
7415 *
7416 * Arguments: info pointer to device instance data
7417 * Return Value: 0 if success, otherwise -ENODEV
7418 */
7419static int mgsl_adapter_test( struct mgsl_struct *info )
7420{
7421 if ( debug_level >= DEBUG_LEVEL_INFO )
7422 printk( "%s(%d):Testing device %s\n",
7423 __FILE__,__LINE__,info->device_name );
7424
7425 if ( !mgsl_register_test( info ) ) {
7426 info->init_error = DiagStatus_AddressFailure;
7427 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7428 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7429 return -ENODEV;
7430 }
7431
7432 if ( !mgsl_irq_test( info ) ) {
7433 info->init_error = DiagStatus_IrqFailure;
7434 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7435 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7436 return -ENODEV;
7437 }
7438
7439 if ( !mgsl_dma_test( info ) ) {
7440 info->init_error = DiagStatus_DmaFailure;
7441 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7442 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7443 return -ENODEV;
7444 }
7445
7446 if ( debug_level >= DEBUG_LEVEL_INFO )
7447 printk( "%s(%d):device %s passed diagnostics\n",
7448 __FILE__,__LINE__,info->device_name );
7449
7450 return 0;
7451
7452} /* end of mgsl_adapter_test() */
7453
7454/* mgsl_memory_test()
7455 *
7456 * Test the shared memory on a PCI adapter.
7457 *
7458 * Arguments: info pointer to device instance data
7459 * Return Value: TRUE if test passed, otherwise FALSE
7460 */
7461static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7462{
Tobias Klauserfe971072006-01-09 20:54:02 -08007463 static unsigned long BitPatterns[] =
7464 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7465 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007466 unsigned long i;
7467 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7468 unsigned long * TestAddr;
7469
7470 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7471 return TRUE;
7472
7473 TestAddr = (unsigned long *)info->memory_base;
7474
7475 /* Test data lines with test pattern at one location. */
7476
7477 for ( i = 0 ; i < Patterncount ; i++ ) {
7478 *TestAddr = BitPatterns[i];
7479 if ( *TestAddr != BitPatterns[i] )
7480 return FALSE;
7481 }
7482
7483 /* Test address lines with incrementing pattern over */
7484 /* entire address range. */
7485
7486 for ( i = 0 ; i < TestLimit ; i++ ) {
7487 *TestAddr = i * 4;
7488 TestAddr++;
7489 }
7490
7491 TestAddr = (unsigned long *)info->memory_base;
7492
7493 for ( i = 0 ; i < TestLimit ; i++ ) {
7494 if ( *TestAddr != i * 4 )
7495 return FALSE;
7496 TestAddr++;
7497 }
7498
7499 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7500
7501 return TRUE;
7502
7503} /* End Of mgsl_memory_test() */
7504
7505
7506/* mgsl_load_pci_memory()
7507 *
7508 * Load a large block of data into the PCI shared memory.
7509 * Use this instead of memcpy() or memmove() to move data
7510 * into the PCI shared memory.
7511 *
7512 * Notes:
7513 *
7514 * This function prevents the PCI9050 interface chip from hogging
7515 * the adapter local bus, which can starve the 16C32 by preventing
7516 * 16C32 bus master cycles.
7517 *
7518 * The PCI9050 documentation says that the 9050 will always release
7519 * control of the local bus after completing the current read
7520 * or write operation.
7521 *
7522 * It appears that as long as the PCI9050 write FIFO is full, the
7523 * PCI9050 treats all of the writes as a single burst transaction
7524 * and will not release the bus. This causes DMA latency problems
7525 * at high speeds when copying large data blocks to the shared
7526 * memory.
7527 *
7528 * This function in effect, breaks the a large shared memory write
7529 * into multiple transations by interleaving a shared memory read
7530 * which will flush the write FIFO and 'complete' the write
7531 * transation. This allows any pending DMA request to gain control
7532 * of the local bus in a timely fasion.
7533 *
7534 * Arguments:
7535 *
7536 * TargetPtr pointer to target address in PCI shared memory
7537 * SourcePtr pointer to source buffer for data
7538 * count count in bytes of data to copy
7539 *
7540 * Return Value: None
7541 */
7542static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7543 unsigned short count )
7544{
7545 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7546#define PCI_LOAD_INTERVAL 64
7547
7548 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7549 unsigned short Index;
7550 unsigned long Dummy;
7551
7552 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7553 {
7554 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7555 Dummy = *((volatile unsigned long *)TargetPtr);
7556 TargetPtr += PCI_LOAD_INTERVAL;
7557 SourcePtr += PCI_LOAD_INTERVAL;
7558 }
7559
7560 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7561
7562} /* End Of mgsl_load_pci_memory() */
7563
7564static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7565{
7566 int i;
7567 int linecount;
7568 if (xmit)
7569 printk("%s tx data:\n",info->device_name);
7570 else
7571 printk("%s rx data:\n",info->device_name);
7572
7573 while(count) {
7574 if (count > 16)
7575 linecount = 16;
7576 else
7577 linecount = count;
7578
7579 for(i=0;i<linecount;i++)
7580 printk("%02X ",(unsigned char)data[i]);
7581 for(;i<17;i++)
7582 printk(" ");
7583 for(i=0;i<linecount;i++) {
7584 if (data[i]>=040 && data[i]<=0176)
7585 printk("%c",data[i]);
7586 else
7587 printk(".");
7588 }
7589 printk("\n");
7590
7591 data += linecount;
7592 count -= linecount;
7593 }
7594} /* end of mgsl_trace_block() */
7595
7596/* mgsl_tx_timeout()
7597 *
7598 * called when HDLC frame times out
7599 * update stats and do tx completion processing
7600 *
7601 * Arguments: context pointer to device instance data
7602 * Return Value: None
7603 */
7604static void mgsl_tx_timeout(unsigned long context)
7605{
7606 struct mgsl_struct *info = (struct mgsl_struct*)context;
7607 unsigned long flags;
7608
7609 if ( debug_level >= DEBUG_LEVEL_INFO )
7610 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7611 __FILE__,__LINE__,info->device_name);
7612 if(info->tx_active &&
7613 (info->params.mode == MGSL_MODE_HDLC ||
7614 info->params.mode == MGSL_MODE_RAW) ) {
7615 info->icount.txtimeout++;
7616 }
7617 spin_lock_irqsave(&info->irq_spinlock,flags);
7618 info->tx_active = 0;
7619 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7620
7621 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7622 usc_loopmode_cancel_transmit( info );
7623
7624 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7625
7626#ifdef CONFIG_HDLC
7627 if (info->netcount)
7628 hdlcdev_tx_done(info);
7629 else
7630#endif
7631 mgsl_bh_transmit(info);
7632
7633} /* end of mgsl_tx_timeout() */
7634
7635/* signal that there are no more frames to send, so that
7636 * line is 'released' by echoing RxD to TxD when current
7637 * transmission is complete (or immediately if no tx in progress).
7638 */
7639static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7640{
7641 unsigned long flags;
7642
7643 spin_lock_irqsave(&info->irq_spinlock,flags);
7644 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7645 if (info->tx_active)
7646 info->loopmode_send_done_requested = TRUE;
7647 else
7648 usc_loopmode_send_done(info);
7649 }
7650 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7651
7652 return 0;
7653}
7654
7655/* release the line by echoing RxD to TxD
7656 * upon completion of a transmit frame
7657 */
7658static void usc_loopmode_send_done( struct mgsl_struct * info )
7659{
7660 info->loopmode_send_done_requested = FALSE;
7661 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7662 info->cmr_value &= ~BIT13;
7663 usc_OutReg(info, CMR, info->cmr_value);
7664}
7665
7666/* abort a transmit in progress while in HDLC LoopMode
7667 */
7668static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7669{
7670 /* reset tx dma channel and purge TxFifo */
7671 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7672 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7673 usc_loopmode_send_done( info );
7674}
7675
7676/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7677 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7678 * we must clear CMR:13 to begin repeating TxData to RxData
7679 */
7680static void usc_loopmode_insert_request( struct mgsl_struct * info )
7681{
7682 info->loopmode_insert_requested = TRUE;
7683
7684 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7685 * begin repeating TxData on RxData (complete insertion)
7686 */
7687 usc_OutReg( info, RICR,
7688 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7689
7690 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7691 info->cmr_value |= BIT13;
7692 usc_OutReg(info, CMR, info->cmr_value);
7693}
7694
7695/* return 1 if station is inserted into the loop, otherwise 0
7696 */
7697static int usc_loopmode_active( struct mgsl_struct * info)
7698{
7699 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7700}
7701
7702#ifdef CONFIG_HDLC
7703
7704/**
7705 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7706 * set encoding and frame check sequence (FCS) options
7707 *
7708 * dev pointer to network device structure
7709 * encoding serial encoding setting
7710 * parity FCS setting
7711 *
7712 * returns 0 if success, otherwise error code
7713 */
7714static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7715 unsigned short parity)
7716{
7717 struct mgsl_struct *info = dev_to_port(dev);
7718 unsigned char new_encoding;
7719 unsigned short new_crctype;
7720
7721 /* return error if TTY interface open */
7722 if (info->count)
7723 return -EBUSY;
7724
7725 switch (encoding)
7726 {
7727 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7728 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7729 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7730 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7731 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7732 default: return -EINVAL;
7733 }
7734
7735 switch (parity)
7736 {
7737 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7738 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7739 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7740 default: return -EINVAL;
7741 }
7742
7743 info->params.encoding = new_encoding;
Alexey Dobriyan53b35312006-03-24 03:16:13 -08007744 info->params.crc_type = new_crctype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745
7746 /* if network interface up, reprogram hardware */
7747 if (info->netcount)
7748 mgsl_program_hw(info);
7749
7750 return 0;
7751}
7752
7753/**
7754 * called by generic HDLC layer to send frame
7755 *
7756 * skb socket buffer containing HDLC frame
7757 * dev pointer to network device structure
7758 *
7759 * returns 0 if success, otherwise error code
7760 */
7761static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7762{
7763 struct mgsl_struct *info = dev_to_port(dev);
7764 struct net_device_stats *stats = hdlc_stats(dev);
7765 unsigned long flags;
7766
7767 if (debug_level >= DEBUG_LEVEL_INFO)
7768 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7769
7770 /* stop sending until this frame completes */
7771 netif_stop_queue(dev);
7772
7773 /* copy data to device buffers */
7774 info->xmit_cnt = skb->len;
7775 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7776
7777 /* update network statistics */
7778 stats->tx_packets++;
7779 stats->tx_bytes += skb->len;
7780
7781 /* done with socket buffer, so free it */
7782 dev_kfree_skb(skb);
7783
7784 /* save start time for transmit timeout detection */
7785 dev->trans_start = jiffies;
7786
7787 /* start hardware transmitter if necessary */
7788 spin_lock_irqsave(&info->irq_spinlock,flags);
7789 if (!info->tx_active)
7790 usc_start_transmitter(info);
7791 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7792
7793 return 0;
7794}
7795
7796/**
7797 * called by network layer when interface enabled
7798 * claim resources and initialize hardware
7799 *
7800 * dev pointer to network device structure
7801 *
7802 * returns 0 if success, otherwise error code
7803 */
7804static int hdlcdev_open(struct net_device *dev)
7805{
7806 struct mgsl_struct *info = dev_to_port(dev);
7807 int rc;
7808 unsigned long flags;
7809
7810 if (debug_level >= DEBUG_LEVEL_INFO)
7811 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7812
7813 /* generic HDLC layer open processing */
7814 if ((rc = hdlc_open(dev)))
7815 return rc;
7816
7817 /* arbitrate between network and tty opens */
7818 spin_lock_irqsave(&info->netlock, flags);
7819 if (info->count != 0 || info->netcount != 0) {
7820 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7821 spin_unlock_irqrestore(&info->netlock, flags);
7822 return -EBUSY;
7823 }
7824 info->netcount=1;
7825 spin_unlock_irqrestore(&info->netlock, flags);
7826
7827 /* claim resources and init adapter */
7828 if ((rc = startup(info)) != 0) {
7829 spin_lock_irqsave(&info->netlock, flags);
7830 info->netcount=0;
7831 spin_unlock_irqrestore(&info->netlock, flags);
7832 return rc;
7833 }
7834
7835 /* assert DTR and RTS, apply hardware settings */
7836 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7837 mgsl_program_hw(info);
7838
7839 /* enable network layer transmit */
7840 dev->trans_start = jiffies;
7841 netif_start_queue(dev);
7842
7843 /* inform generic HDLC layer of current DCD status */
7844 spin_lock_irqsave(&info->irq_spinlock, flags);
7845 usc_get_serial_signals(info);
7846 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7847 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
7848
7849 return 0;
7850}
7851
7852/**
7853 * called by network layer when interface is disabled
7854 * shutdown hardware and release resources
7855 *
7856 * dev pointer to network device structure
7857 *
7858 * returns 0 if success, otherwise error code
7859 */
7860static int hdlcdev_close(struct net_device *dev)
7861{
7862 struct mgsl_struct *info = dev_to_port(dev);
7863 unsigned long flags;
7864
7865 if (debug_level >= DEBUG_LEVEL_INFO)
7866 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7867
7868 netif_stop_queue(dev);
7869
7870 /* shutdown adapter and release resources */
7871 shutdown(info);
7872
7873 hdlc_close(dev);
7874
7875 spin_lock_irqsave(&info->netlock, flags);
7876 info->netcount=0;
7877 spin_unlock_irqrestore(&info->netlock, flags);
7878
7879 return 0;
7880}
7881
7882/**
7883 * called by network layer to process IOCTL call to network device
7884 *
7885 * dev pointer to network device structure
7886 * ifr pointer to network interface request structure
7887 * cmd IOCTL command code
7888 *
7889 * returns 0 if success, otherwise error code
7890 */
7891static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7892{
7893 const size_t size = sizeof(sync_serial_settings);
7894 sync_serial_settings new_line;
7895 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7896 struct mgsl_struct *info = dev_to_port(dev);
7897 unsigned int flags;
7898
7899 if (debug_level >= DEBUG_LEVEL_INFO)
7900 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7901
7902 /* return error if TTY interface open */
7903 if (info->count)
7904 return -EBUSY;
7905
7906 if (cmd != SIOCWANDEV)
7907 return hdlc_ioctl(dev, ifr, cmd);
7908
7909 switch(ifr->ifr_settings.type) {
7910 case IF_GET_IFACE: /* return current sync_serial_settings */
7911
7912 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7913 if (ifr->ifr_settings.size < size) {
7914 ifr->ifr_settings.size = size; /* data size wanted */
7915 return -ENOBUFS;
7916 }
7917
7918 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7919 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7920 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7921 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7922
7923 switch (flags){
7924 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7925 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7926 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7927 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7928 default: new_line.clock_type = CLOCK_DEFAULT;
7929 }
7930
7931 new_line.clock_rate = info->params.clock_speed;
7932 new_line.loopback = info->params.loopback ? 1:0;
7933
7934 if (copy_to_user(line, &new_line, size))
7935 return -EFAULT;
7936 return 0;
7937
7938 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7939
7940 if(!capable(CAP_NET_ADMIN))
7941 return -EPERM;
7942 if (copy_from_user(&new_line, line, size))
7943 return -EFAULT;
7944
7945 switch (new_line.clock_type)
7946 {
7947 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7948 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7949 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7950 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7951 case CLOCK_DEFAULT: flags = info->params.flags &
7952 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7953 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7954 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7955 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7956 default: return -EINVAL;
7957 }
7958
7959 if (new_line.loopback != 0 && new_line.loopback != 1)
7960 return -EINVAL;
7961
7962 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7963 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7964 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7965 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7966 info->params.flags |= flags;
7967
7968 info->params.loopback = new_line.loopback;
7969
7970 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7971 info->params.clock_speed = new_line.clock_rate;
7972 else
7973 info->params.clock_speed = 0;
7974
7975 /* if network interface up, reprogram hardware */
7976 if (info->netcount)
7977 mgsl_program_hw(info);
7978 return 0;
7979
7980 default:
7981 return hdlc_ioctl(dev, ifr, cmd);
7982 }
7983}
7984
7985/**
7986 * called by network layer when transmit timeout is detected
7987 *
7988 * dev pointer to network device structure
7989 */
7990static void hdlcdev_tx_timeout(struct net_device *dev)
7991{
7992 struct mgsl_struct *info = dev_to_port(dev);
7993 struct net_device_stats *stats = hdlc_stats(dev);
7994 unsigned long flags;
7995
7996 if (debug_level >= DEBUG_LEVEL_INFO)
7997 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7998
7999 stats->tx_errors++;
8000 stats->tx_aborted_errors++;
8001
8002 spin_lock_irqsave(&info->irq_spinlock,flags);
8003 usc_stop_transmitter(info);
8004 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8005
8006 netif_wake_queue(dev);
8007}
8008
8009/**
8010 * called by device driver when transmit completes
8011 * reenable network layer transmit if stopped
8012 *
8013 * info pointer to device instance information
8014 */
8015static void hdlcdev_tx_done(struct mgsl_struct *info)
8016{
8017 if (netif_queue_stopped(info->netdev))
8018 netif_wake_queue(info->netdev);
8019}
8020
8021/**
8022 * called by device driver when frame received
8023 * pass frame to network layer
8024 *
8025 * info pointer to device instance information
8026 * buf pointer to buffer contianing frame data
8027 * size count of data bytes in buf
8028 */
8029static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8030{
8031 struct sk_buff *skb = dev_alloc_skb(size);
8032 struct net_device *dev = info->netdev;
8033 struct net_device_stats *stats = hdlc_stats(dev);
8034
8035 if (debug_level >= DEBUG_LEVEL_INFO)
8036 printk("hdlcdev_rx(%s)\n",dev->name);
8037
8038 if (skb == NULL) {
8039 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8040 stats->rx_dropped++;
8041 return;
8042 }
8043
8044 memcpy(skb_put(skb, size),buf,size);
8045
8046 skb->protocol = hdlc_type_trans(skb, info->netdev);
8047
8048 stats->rx_packets++;
8049 stats->rx_bytes += size;
8050
8051 netif_rx(skb);
8052
8053 info->netdev->last_rx = jiffies;
8054}
8055
8056/**
8057 * called by device driver when adding device instance
8058 * do generic HDLC initialization
8059 *
8060 * info pointer to device instance information
8061 *
8062 * returns 0 if success, otherwise error code
8063 */
8064static int hdlcdev_init(struct mgsl_struct *info)
8065{
8066 int rc;
8067 struct net_device *dev;
8068 hdlc_device *hdlc;
8069
8070 /* allocate and initialize network and HDLC layer objects */
8071
8072 if (!(dev = alloc_hdlcdev(info))) {
8073 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8074 return -ENOMEM;
8075 }
8076
8077 /* for network layer reporting purposes only */
8078 dev->base_addr = info->io_base;
8079 dev->irq = info->irq_level;
8080 dev->dma = info->dma_level;
8081
8082 /* network layer callbacks and settings */
8083 dev->do_ioctl = hdlcdev_ioctl;
8084 dev->open = hdlcdev_open;
8085 dev->stop = hdlcdev_close;
8086 dev->tx_timeout = hdlcdev_tx_timeout;
8087 dev->watchdog_timeo = 10*HZ;
8088 dev->tx_queue_len = 50;
8089
8090 /* generic HDLC layer callbacks and settings */
8091 hdlc = dev_to_hdlc(dev);
8092 hdlc->attach = hdlcdev_attach;
8093 hdlc->xmit = hdlcdev_xmit;
8094
8095 /* register objects with HDLC layer */
8096 if ((rc = register_hdlc_device(dev))) {
8097 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8098 free_netdev(dev);
8099 return rc;
8100 }
8101
8102 info->netdev = dev;
8103 return 0;
8104}
8105
8106/**
8107 * called by device driver when removing device instance
8108 * do generic HDLC cleanup
8109 *
8110 * info pointer to device instance information
8111 */
8112static void hdlcdev_exit(struct mgsl_struct *info)
8113{
8114 unregister_hdlc_device(info->netdev);
8115 free_netdev(info->netdev);
8116 info->netdev = NULL;
8117}
8118
8119#endif /* CONFIG_HDLC */
8120
8121
8122static int __devinit synclink_init_one (struct pci_dev *dev,
8123 const struct pci_device_id *ent)
8124{
8125 struct mgsl_struct *info;
8126
8127 if (pci_enable_device(dev)) {
8128 printk("error enabling pci device %p\n", dev);
8129 return -EIO;
8130 }
8131
8132 if (!(info = mgsl_allocate_device())) {
8133 printk("can't allocate device instance data.\n");
8134 return -EIO;
8135 }
8136
8137 /* Copy user configuration info to device instance data */
8138
8139 info->io_base = pci_resource_start(dev, 2);
8140 info->irq_level = dev->irq;
8141 info->phys_memory_base = pci_resource_start(dev, 3);
8142
8143 /* Because veremap only works on page boundaries we must map
8144 * a larger area than is actually implemented for the LCR
8145 * memory range. We map a full page starting at the page boundary.
8146 */
8147 info->phys_lcr_base = pci_resource_start(dev, 0);
8148 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8149 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8150
8151 info->bus_type = MGSL_BUS_TYPE_PCI;
8152 info->io_addr_size = 8;
Thomas Gleixner0f2ed4c2006-07-01 19:29:33 -07008153 info->irq_flags = IRQF_SHARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008154
8155 if (dev->device == 0x0210) {
8156 /* Version 1 PCI9030 based universal PCI adapter */
8157 info->misc_ctrl_value = 0x007c4080;
8158 info->hw_version = 1;
8159 } else {
8160 /* Version 0 PCI9050 based 5V PCI adapter
8161 * A PCI9050 bug prevents reading LCR registers if
8162 * LCR base address bit 7 is set. Maintain shadow
8163 * value so we can write to LCR misc control reg.
8164 */
8165 info->misc_ctrl_value = 0x087e4546;
8166 info->hw_version = 0;
8167 }
8168
8169 mgsl_add_device(info);
8170
8171 return 0;
8172}
8173
8174static void __devexit synclink_remove_one (struct pci_dev *dev)
8175{
8176}
8177