blob: ce4db6f523627ebf0f1ca7841e65a30922c12204 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/char/synclink.c
3 *
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08004 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#if defined(__i386__)
57# define BREAKPOINT() asm(" int $3");
58#else
59# define BREAKPOINT() { }
60#endif
61
62#define MAX_ISA_DEVICES 10
63#define MAX_PCI_DEVICES 10
64#define MAX_TOTAL_DEVICES 20
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/module.h>
67#include <linux/errno.h>
68#include <linux/signal.h>
69#include <linux/sched.h>
70#include <linux/timer.h>
71#include <linux/interrupt.h>
72#include <linux/pci.h>
73#include <linux/tty.h>
74#include <linux/tty_flip.h>
75#include <linux/serial.h>
76#include <linux/major.h>
77#include <linux/string.h>
78#include <linux/fcntl.h>
79#include <linux/ptrace.h>
80#include <linux/ioport.h>
81#include <linux/mm.h>
82#include <linux/slab.h>
83#include <linux/delay.h>
84
85#include <linux/netdevice.h>
86
87#include <linux/vmalloc.h>
88#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <linux/delay.h>
91#include <linux/ioctl.h>
92
93#include <asm/system.h>
94#include <asm/io.h>
95#include <asm/irq.h>
96#include <asm/dma.h>
97#include <linux/bitops.h>
98#include <asm/types.h>
99#include <linux/termios.h>
100#include <linux/workqueue.h>
101#include <linux/hdlc.h>
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800102#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Paul Fulghumaf69c7f2006-12-06 20:40:24 -0800104#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
105#define SYNCLINK_GENERIC_HDLC 1
106#else
107#define SYNCLINK_GENERIC_HDLC 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#endif
109
110#define GET_USER(error,value,addr) error = get_user(value,addr)
111#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
112#define PUT_USER(error,value,addr) error = put_user(value,addr)
113#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
114
115#include <asm/uaccess.h>
116
117#include "linux/synclink.h"
118
119#define RCLRVALUE 0xffff
120
121static MGSL_PARAMS default_params = {
122 MGSL_MODE_HDLC, /* unsigned long mode */
123 0, /* unsigned char loopback; */
124 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
125 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
126 0, /* unsigned long clock_speed; */
127 0xff, /* unsigned char addr_filter; */
128 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
129 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
130 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
131 9600, /* unsigned long data_rate; */
132 8, /* unsigned char data_bits; */
133 1, /* unsigned char stop_bits; */
134 ASYNC_PARITY_NONE /* unsigned char parity; */
135};
136
137#define SHARED_MEM_ADDRESS_SIZE 0x40000
Paul Fulghum623a4392006-10-17 00:09:27 -0700138#define BUFFERLISTSIZE 4096
139#define DMABUFFERSIZE 4096
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140#define MAXRXFRAMES 7
141
142typedef struct _DMABUFFERENTRY
143{
144 u32 phys_addr; /* 32-bit flat physical address of data buffer */
Paul Fulghum4a918bc2005-09-09 13:02:12 -0700145 volatile u16 count; /* buffer size/data count */
146 volatile u16 status; /* Control/status field */
147 volatile u16 rcc; /* character count field */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 u16 reserved; /* padding required by 16C32 */
149 u32 link; /* 32-bit flat link to next buffer entry */
150 char *virt_addr; /* virtual address of data buffer */
151 u32 phys_entry; /* physical address of this buffer entry */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800152 dma_addr_t dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153} DMABUFFERENTRY, *DMAPBUFFERENTRY;
154
155/* The queue of BH actions to be performed */
156
157#define BH_RECEIVE 1
158#define BH_TRANSMIT 2
159#define BH_STATUS 4
160
161#define IO_PIN_SHUTDOWN_LIMIT 100
162
163#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
164
165struct _input_signal_events {
166 int ri_up;
167 int ri_down;
168 int dsr_up;
169 int dsr_down;
170 int dcd_up;
171 int dcd_down;
172 int cts_up;
173 int cts_down;
174};
175
176/* transmit holding buffer definitions*/
177#define MAX_TX_HOLDING_BUFFERS 5
178struct tx_holding_buffer {
179 int buffer_size;
180 unsigned char * buffer;
181};
182
183
184/*
185 * Device instance data structure
186 */
187
188struct mgsl_struct {
189 int magic;
190 int flags;
191 int count; /* count of opens */
192 int line;
193 int hw_version;
194 unsigned short close_delay;
195 unsigned short closing_wait; /* time to wait before closing */
196
197 struct mgsl_icount icount;
198
199 struct tty_struct *tty;
200 int timeout;
201 int x_char; /* xon/xoff character */
202 int blocked_open; /* # of blocked opens */
203 u16 read_status_mask;
204 u16 ignore_status_mask;
205 unsigned char *xmit_buf;
206 int xmit_head;
207 int xmit_tail;
208 int xmit_cnt;
209
210 wait_queue_head_t open_wait;
211 wait_queue_head_t close_wait;
212
213 wait_queue_head_t status_event_wait_q;
214 wait_queue_head_t event_wait_q;
215 struct timer_list tx_timer; /* HDLC transmit timeout timer */
216 struct mgsl_struct *next_device; /* device list link */
217
218 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
219 struct work_struct task; /* task structure for scheduling bh */
220
221 u32 EventMask; /* event trigger mask */
222 u32 RecordedEvents; /* pending events */
223
224 u32 max_frame_size; /* as set by device config */
225
226 u32 pending_bh;
227
228 int bh_running; /* Protection from multiple */
229 int isr_overflow;
230 int bh_requested;
231
232 int dcd_chkcount; /* check counts to prevent */
233 int cts_chkcount; /* too many IRQs if a signal */
234 int dsr_chkcount; /* is floating */
235 int ri_chkcount;
236
237 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800238 u32 buffer_list_phys;
239 dma_addr_t buffer_list_dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
242 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
243 unsigned int current_rx_buffer;
244
245 int num_tx_dma_buffers; /* number of tx dma frames required */
246 int tx_dma_buffers_used;
247 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
248 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
249 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
250 int current_tx_buffer; /* next tx dma buffer to be loaded */
251
252 unsigned char *intermediate_rxbuffer;
253
254 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
255 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
256 int put_tx_holding_index; /* next tx holding buffer to store user request */
257 int tx_holding_count; /* number of tx holding buffers waiting */
258 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
259
260 int rx_enabled;
261 int rx_overflow;
262 int rx_rcc_underrun;
263
264 int tx_enabled;
265 int tx_active;
266 u32 idle_mode;
267
268 u16 cmr_value;
269 u16 tcsr_value;
270
271 char device_name[25]; /* device instance name */
272
273 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
274 unsigned char bus; /* expansion bus number (zero based) */
275 unsigned char function; /* PCI device number */
276
277 unsigned int io_base; /* base I/O address of adapter */
278 unsigned int io_addr_size; /* size of the I/O address range */
279 int io_addr_requested; /* nonzero if I/O address requested */
280
281 unsigned int irq_level; /* interrupt level */
282 unsigned long irq_flags;
283 int irq_requested; /* nonzero if IRQ requested */
284
285 unsigned int dma_level; /* DMA channel */
286 int dma_requested; /* nonzero if dma channel requested */
287
288 u16 mbre_bit;
289 u16 loopback_bits;
290 u16 usc_idle_mode;
291
292 MGSL_PARAMS params; /* communications parameters */
293
294 unsigned char serial_signals; /* current serial signal states */
295
296 int irq_occurred; /* for diagnostics use */
297 unsigned int init_error; /* Initialization startup error (DIAGS) */
298 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
299
300 u32 last_mem_alloc;
301 unsigned char* memory_base; /* shared memory address (PCI only) */
302 u32 phys_memory_base;
303 int shared_mem_requested;
304
305 unsigned char* lcr_base; /* local config registers (PCI only) */
306 u32 phys_lcr_base;
307 u32 lcr_offset;
308 int lcr_mem_requested;
309
310 u32 misc_ctrl_value;
311 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
312 char char_buf[MAX_ASYNC_BUFFER_SIZE];
313 BOOLEAN drop_rts_on_tx_done;
314
315 BOOLEAN loopmode_insert_requested;
316 BOOLEAN loopmode_send_done_requested;
317
318 struct _input_signal_events input_signal_events;
319
320 /* generic HDLC device parts */
321 int netcount;
322 int dosyncppp;
323 spinlock_t netlock;
324
Paul Fulghumaf69c7f2006-12-06 20:40:24 -0800325#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 struct net_device *netdev;
327#endif
328};
329
330#define MGSL_MAGIC 0x5401
331
332/*
333 * The size of the serial xmit buffer is 1 page, or 4096 bytes
334 */
335#ifndef SERIAL_XMIT_SIZE
336#define SERIAL_XMIT_SIZE 4096
337#endif
338
339/*
340 * These macros define the offsets used in calculating the
341 * I/O address of the specified USC registers.
342 */
343
344
345#define DCPIN 2 /* Bit 1 of I/O address */
346#define SDPIN 4 /* Bit 2 of I/O address */
347
348#define DCAR 0 /* DMA command/address register */
349#define CCAR SDPIN /* channel command/address register */
350#define DATAREG DCPIN + SDPIN /* serial data register */
351#define MSBONLY 0x41
352#define LSBONLY 0x40
353
354/*
355 * These macros define the register address (ordinal number)
356 * used for writing address/value pairs to the USC.
357 */
358
359#define CMR 0x02 /* Channel mode Register */
360#define CCSR 0x04 /* Channel Command/status Register */
361#define CCR 0x06 /* Channel Control Register */
362#define PSR 0x08 /* Port status Register */
363#define PCR 0x0a /* Port Control Register */
364#define TMDR 0x0c /* Test mode Data Register */
365#define TMCR 0x0e /* Test mode Control Register */
366#define CMCR 0x10 /* Clock mode Control Register */
367#define HCR 0x12 /* Hardware Configuration Register */
368#define IVR 0x14 /* Interrupt Vector Register */
369#define IOCR 0x16 /* Input/Output Control Register */
370#define ICR 0x18 /* Interrupt Control Register */
371#define DCCR 0x1a /* Daisy Chain Control Register */
372#define MISR 0x1c /* Misc Interrupt status Register */
373#define SICR 0x1e /* status Interrupt Control Register */
374#define RDR 0x20 /* Receive Data Register */
375#define RMR 0x22 /* Receive mode Register */
376#define RCSR 0x24 /* Receive Command/status Register */
377#define RICR 0x26 /* Receive Interrupt Control Register */
378#define RSR 0x28 /* Receive Sync Register */
379#define RCLR 0x2a /* Receive count Limit Register */
380#define RCCR 0x2c /* Receive Character count Register */
381#define TC0R 0x2e /* Time Constant 0 Register */
382#define TDR 0x30 /* Transmit Data Register */
383#define TMR 0x32 /* Transmit mode Register */
384#define TCSR 0x34 /* Transmit Command/status Register */
385#define TICR 0x36 /* Transmit Interrupt Control Register */
386#define TSR 0x38 /* Transmit Sync Register */
387#define TCLR 0x3a /* Transmit count Limit Register */
388#define TCCR 0x3c /* Transmit Character count Register */
389#define TC1R 0x3e /* Time Constant 1 Register */
390
391
392/*
393 * MACRO DEFINITIONS FOR DMA REGISTERS
394 */
395
396#define DCR 0x06 /* DMA Control Register (shared) */
397#define DACR 0x08 /* DMA Array count Register (shared) */
398#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
399#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
400#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
401#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
402#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
403
404#define TDMR 0x02 /* Transmit DMA mode Register */
405#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
406#define TBCR 0x2a /* Transmit Byte count Register */
407#define TARL 0x2c /* Transmit Address Register (low) */
408#define TARU 0x2e /* Transmit Address Register (high) */
409#define NTBCR 0x3a /* Next Transmit Byte count Register */
410#define NTARL 0x3c /* Next Transmit Address Register (low) */
411#define NTARU 0x3e /* Next Transmit Address Register (high) */
412
413#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
414#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
415#define RBCR 0xaa /* Receive Byte count Register */
416#define RARL 0xac /* Receive Address Register (low) */
417#define RARU 0xae /* Receive Address Register (high) */
418#define NRBCR 0xba /* Next Receive Byte count Register */
419#define NRARL 0xbc /* Next Receive Address Register (low) */
420#define NRARU 0xbe /* Next Receive Address Register (high) */
421
422
423/*
424 * MACRO DEFINITIONS FOR MODEM STATUS BITS
425 */
426
427#define MODEMSTATUS_DTR 0x80
428#define MODEMSTATUS_DSR 0x40
429#define MODEMSTATUS_RTS 0x20
430#define MODEMSTATUS_CTS 0x10
431#define MODEMSTATUS_RI 0x04
432#define MODEMSTATUS_DCD 0x01
433
434
435/*
436 * Channel Command/Address Register (CCAR) Command Codes
437 */
438
439#define RTCmd_Null 0x0000
440#define RTCmd_ResetHighestIus 0x1000
441#define RTCmd_TriggerChannelLoadDma 0x2000
442#define RTCmd_TriggerRxDma 0x2800
443#define RTCmd_TriggerTxDma 0x3000
444#define RTCmd_TriggerRxAndTxDma 0x3800
445#define RTCmd_PurgeRxFifo 0x4800
446#define RTCmd_PurgeTxFifo 0x5000
447#define RTCmd_PurgeRxAndTxFifo 0x5800
448#define RTCmd_LoadRcc 0x6800
449#define RTCmd_LoadTcc 0x7000
450#define RTCmd_LoadRccAndTcc 0x7800
451#define RTCmd_LoadTC0 0x8800
452#define RTCmd_LoadTC1 0x9000
453#define RTCmd_LoadTC0AndTC1 0x9800
454#define RTCmd_SerialDataLSBFirst 0xa000
455#define RTCmd_SerialDataMSBFirst 0xa800
456#define RTCmd_SelectBigEndian 0xb000
457#define RTCmd_SelectLittleEndian 0xb800
458
459
460/*
461 * DMA Command/Address Register (DCAR) Command Codes
462 */
463
464#define DmaCmd_Null 0x0000
465#define DmaCmd_ResetTxChannel 0x1000
466#define DmaCmd_ResetRxChannel 0x1200
467#define DmaCmd_StartTxChannel 0x2000
468#define DmaCmd_StartRxChannel 0x2200
469#define DmaCmd_ContinueTxChannel 0x3000
470#define DmaCmd_ContinueRxChannel 0x3200
471#define DmaCmd_PauseTxChannel 0x4000
472#define DmaCmd_PauseRxChannel 0x4200
473#define DmaCmd_AbortTxChannel 0x5000
474#define DmaCmd_AbortRxChannel 0x5200
475#define DmaCmd_InitTxChannel 0x7000
476#define DmaCmd_InitRxChannel 0x7200
477#define DmaCmd_ResetHighestDmaIus 0x8000
478#define DmaCmd_ResetAllChannels 0x9000
479#define DmaCmd_StartAllChannels 0xa000
480#define DmaCmd_ContinueAllChannels 0xb000
481#define DmaCmd_PauseAllChannels 0xc000
482#define DmaCmd_AbortAllChannels 0xd000
483#define DmaCmd_InitAllChannels 0xf000
484
485#define TCmd_Null 0x0000
486#define TCmd_ClearTxCRC 0x2000
487#define TCmd_SelectTicrTtsaData 0x4000
488#define TCmd_SelectTicrTxFifostatus 0x5000
489#define TCmd_SelectTicrIntLevel 0x6000
490#define TCmd_SelectTicrdma_level 0x7000
491#define TCmd_SendFrame 0x8000
492#define TCmd_SendAbort 0x9000
493#define TCmd_EnableDleInsertion 0xc000
494#define TCmd_DisableDleInsertion 0xd000
495#define TCmd_ClearEofEom 0xe000
496#define TCmd_SetEofEom 0xf000
497
498#define RCmd_Null 0x0000
499#define RCmd_ClearRxCRC 0x2000
500#define RCmd_EnterHuntmode 0x3000
501#define RCmd_SelectRicrRtsaData 0x4000
502#define RCmd_SelectRicrRxFifostatus 0x5000
503#define RCmd_SelectRicrIntLevel 0x6000
504#define RCmd_SelectRicrdma_level 0x7000
505
506/*
507 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
508 */
509
510#define RECEIVE_STATUS BIT5
511#define RECEIVE_DATA BIT4
512#define TRANSMIT_STATUS BIT3
513#define TRANSMIT_DATA BIT2
514#define IO_PIN BIT1
515#define MISC BIT0
516
517
518/*
519 * Receive status Bits in Receive Command/status Register RCSR
520 */
521
522#define RXSTATUS_SHORT_FRAME BIT8
523#define RXSTATUS_CODE_VIOLATION BIT8
524#define RXSTATUS_EXITED_HUNT BIT7
525#define RXSTATUS_IDLE_RECEIVED BIT6
526#define RXSTATUS_BREAK_RECEIVED BIT5
527#define RXSTATUS_ABORT_RECEIVED BIT5
528#define RXSTATUS_RXBOUND BIT4
529#define RXSTATUS_CRC_ERROR BIT3
530#define RXSTATUS_FRAMING_ERROR BIT3
531#define RXSTATUS_ABORT BIT2
532#define RXSTATUS_PARITY_ERROR BIT2
533#define RXSTATUS_OVERRUN BIT1
534#define RXSTATUS_DATA_AVAILABLE BIT0
535#define RXSTATUS_ALL 0x01f6
536#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
537
538/*
539 * Values for setting transmit idle mode in
540 * Transmit Control/status Register (TCSR)
541 */
542#define IDLEMODE_FLAGS 0x0000
543#define IDLEMODE_ALT_ONE_ZERO 0x0100
544#define IDLEMODE_ZERO 0x0200
545#define IDLEMODE_ONE 0x0300
546#define IDLEMODE_ALT_MARK_SPACE 0x0500
547#define IDLEMODE_SPACE 0x0600
548#define IDLEMODE_MARK 0x0700
549#define IDLEMODE_MASK 0x0700
550
551/*
552 * IUSC revision identifiers
553 */
554#define IUSC_SL1660 0x4d44
555#define IUSC_PRE_SL1660 0x4553
556
557/*
558 * Transmit status Bits in Transmit Command/status Register (TCSR)
559 */
560
561#define TCSR_PRESERVE 0x0F00
562
563#define TCSR_UNDERWAIT BIT11
564#define TXSTATUS_PREAMBLE_SENT BIT7
565#define TXSTATUS_IDLE_SENT BIT6
566#define TXSTATUS_ABORT_SENT BIT5
567#define TXSTATUS_EOF_SENT BIT4
568#define TXSTATUS_EOM_SENT BIT4
569#define TXSTATUS_CRC_SENT BIT3
570#define TXSTATUS_ALL_SENT BIT2
571#define TXSTATUS_UNDERRUN BIT1
572#define TXSTATUS_FIFO_EMPTY BIT0
573#define TXSTATUS_ALL 0x00fa
574#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
575
576
577#define MISCSTATUS_RXC_LATCHED BIT15
578#define MISCSTATUS_RXC BIT14
579#define MISCSTATUS_TXC_LATCHED BIT13
580#define MISCSTATUS_TXC BIT12
581#define MISCSTATUS_RI_LATCHED BIT11
582#define MISCSTATUS_RI BIT10
583#define MISCSTATUS_DSR_LATCHED BIT9
584#define MISCSTATUS_DSR BIT8
585#define MISCSTATUS_DCD_LATCHED BIT7
586#define MISCSTATUS_DCD BIT6
587#define MISCSTATUS_CTS_LATCHED BIT5
588#define MISCSTATUS_CTS BIT4
589#define MISCSTATUS_RCC_UNDERRUN BIT3
590#define MISCSTATUS_DPLL_NO_SYNC BIT2
591#define MISCSTATUS_BRG1_ZERO BIT1
592#define MISCSTATUS_BRG0_ZERO BIT0
593
594#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
595#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
596
597#define SICR_RXC_ACTIVE BIT15
598#define SICR_RXC_INACTIVE BIT14
599#define SICR_RXC (BIT15+BIT14)
600#define SICR_TXC_ACTIVE BIT13
601#define SICR_TXC_INACTIVE BIT12
602#define SICR_TXC (BIT13+BIT12)
603#define SICR_RI_ACTIVE BIT11
604#define SICR_RI_INACTIVE BIT10
605#define SICR_RI (BIT11+BIT10)
606#define SICR_DSR_ACTIVE BIT9
607#define SICR_DSR_INACTIVE BIT8
608#define SICR_DSR (BIT9+BIT8)
609#define SICR_DCD_ACTIVE BIT7
610#define SICR_DCD_INACTIVE BIT6
611#define SICR_DCD (BIT7+BIT6)
612#define SICR_CTS_ACTIVE BIT5
613#define SICR_CTS_INACTIVE BIT4
614#define SICR_CTS (BIT5+BIT4)
615#define SICR_RCC_UNDERFLOW BIT3
616#define SICR_DPLL_NO_SYNC BIT2
617#define SICR_BRG1_ZERO BIT1
618#define SICR_BRG0_ZERO BIT0
619
620void usc_DisableMasterIrqBit( struct mgsl_struct *info );
621void usc_EnableMasterIrqBit( struct mgsl_struct *info );
622void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
623void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
624void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
625
626#define usc_EnableInterrupts( a, b ) \
627 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
628
629#define usc_DisableInterrupts( a, b ) \
630 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
631
632#define usc_EnableMasterIrqBit(a) \
633 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
634
635#define usc_DisableMasterIrqBit(a) \
636 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
637
638#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
639
640/*
641 * Transmit status Bits in Transmit Control status Register (TCSR)
642 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
643 */
644
645#define TXSTATUS_PREAMBLE_SENT BIT7
646#define TXSTATUS_IDLE_SENT BIT6
647#define TXSTATUS_ABORT_SENT BIT5
648#define TXSTATUS_EOF BIT4
649#define TXSTATUS_CRC_SENT BIT3
650#define TXSTATUS_ALL_SENT BIT2
651#define TXSTATUS_UNDERRUN BIT1
652#define TXSTATUS_FIFO_EMPTY BIT0
653
654#define DICR_MASTER BIT15
655#define DICR_TRANSMIT BIT0
656#define DICR_RECEIVE BIT1
657
658#define usc_EnableDmaInterrupts(a,b) \
659 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
660
661#define usc_DisableDmaInterrupts(a,b) \
662 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
663
664#define usc_EnableStatusIrqs(a,b) \
665 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
666
667#define usc_DisablestatusIrqs(a,b) \
668 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
669
670/* Transmit status Bits in Transmit Control status Register (TCSR) */
671/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
672
673
674#define DISABLE_UNCONDITIONAL 0
675#define DISABLE_END_OF_FRAME 1
676#define ENABLE_UNCONDITIONAL 2
677#define ENABLE_AUTO_CTS 3
678#define ENABLE_AUTO_DCD 3
679#define usc_EnableTransmitter(a,b) \
680 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
681#define usc_EnableReceiver(a,b) \
682 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
683
684static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
685static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
686static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
687
688static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
689static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
690static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
691void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
692void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
693
694#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
695#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
696
697#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
698
699static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
700static void usc_start_receiver( struct mgsl_struct *info );
701static void usc_stop_receiver( struct mgsl_struct *info );
702
703static void usc_start_transmitter( struct mgsl_struct *info );
704static void usc_stop_transmitter( struct mgsl_struct *info );
705static void usc_set_txidle( struct mgsl_struct *info );
706static void usc_load_txfifo( struct mgsl_struct *info );
707
708static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
709static void usc_enable_loopback( struct mgsl_struct *info, int enable );
710
711static void usc_get_serial_signals( struct mgsl_struct *info );
712static void usc_set_serial_signals( struct mgsl_struct *info );
713
714static void usc_reset( struct mgsl_struct *info );
715
716static void usc_set_sync_mode( struct mgsl_struct *info );
717static void usc_set_sdlc_mode( struct mgsl_struct *info );
718static void usc_set_async_mode( struct mgsl_struct *info );
719static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
720
721static void usc_loopback_frame( struct mgsl_struct *info );
722
723static void mgsl_tx_timeout(unsigned long context);
724
725
726static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
727static void usc_loopmode_insert_request( struct mgsl_struct * info );
728static int usc_loopmode_active( struct mgsl_struct * info);
729static void usc_loopmode_send_done( struct mgsl_struct * info );
730
731static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
732
Paul Fulghumaf69c7f2006-12-06 20:40:24 -0800733#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734#define dev_to_port(D) (dev_to_hdlc(D)->priv)
735static void hdlcdev_tx_done(struct mgsl_struct *info);
736static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
737static int hdlcdev_init(struct mgsl_struct *info);
738static void hdlcdev_exit(struct mgsl_struct *info);
739#endif
740
741/*
742 * Defines a BUS descriptor value for the PCI adapter
743 * local bus address ranges.
744 */
745
746#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
747(0x00400020 + \
748((WrHold) << 30) + \
749((WrDly) << 28) + \
750((RdDly) << 26) + \
751((Nwdd) << 20) + \
752((Nwad) << 15) + \
753((Nxda) << 13) + \
754((Nrdd) << 11) + \
755((Nrad) << 6) )
756
757static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
758
759/*
760 * Adapter diagnostic routines
761 */
762static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
763static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
764static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
765static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
766static int mgsl_adapter_test( struct mgsl_struct *info );
767
768/*
769 * device and resource management routines
770 */
771static int mgsl_claim_resources(struct mgsl_struct *info);
772static void mgsl_release_resources(struct mgsl_struct *info);
773static void mgsl_add_device(struct mgsl_struct *info);
774static struct mgsl_struct* mgsl_allocate_device(void);
775
776/*
777 * DMA buffer manupulation functions.
778 */
779static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
780static int mgsl_get_rx_frame( struct mgsl_struct *info );
781static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
782static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
783static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
784static int num_free_tx_dma_buffers(struct mgsl_struct *info);
785static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
786static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
787
788/*
789 * DMA and Shared Memory buffer allocation and formatting
790 */
791static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
792static void mgsl_free_dma_buffers(struct mgsl_struct *info);
793static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
794static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
795static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
796static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
797static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
798static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
799static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
800static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
801static int load_next_tx_holding_buffer(struct mgsl_struct *info);
802static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
803
804/*
805 * Bottom half interrupt handlers
806 */
David Howellsc4028952006-11-22 14:57:56 +0000807static void mgsl_bh_handler(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808static void mgsl_bh_receive(struct mgsl_struct *info);
809static void mgsl_bh_transmit(struct mgsl_struct *info);
810static void mgsl_bh_status(struct mgsl_struct *info);
811
812/*
813 * Interrupt handler routines and dispatch table.
814 */
815static void mgsl_isr_null( struct mgsl_struct *info );
816static void mgsl_isr_transmit_data( struct mgsl_struct *info );
817static void mgsl_isr_receive_data( struct mgsl_struct *info );
818static void mgsl_isr_receive_status( struct mgsl_struct *info );
819static void mgsl_isr_transmit_status( struct mgsl_struct *info );
820static void mgsl_isr_io_pin( struct mgsl_struct *info );
821static void mgsl_isr_misc( struct mgsl_struct *info );
822static void mgsl_isr_receive_dma( struct mgsl_struct *info );
823static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
824
825typedef void (*isr_dispatch_func)(struct mgsl_struct *);
826
827static isr_dispatch_func UscIsrTable[7] =
828{
829 mgsl_isr_null,
830 mgsl_isr_misc,
831 mgsl_isr_io_pin,
832 mgsl_isr_transmit_data,
833 mgsl_isr_transmit_status,
834 mgsl_isr_receive_data,
835 mgsl_isr_receive_status
836};
837
838/*
839 * ioctl call handlers
840 */
841static int tiocmget(struct tty_struct *tty, struct file *file);
842static int tiocmset(struct tty_struct *tty, struct file *file,
843 unsigned int set, unsigned int clear);
844static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
845 __user *user_icount);
846static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
847static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
848static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
849static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
850static int mgsl_txenable(struct mgsl_struct * info, int enable);
851static int mgsl_txabort(struct mgsl_struct * info);
852static int mgsl_rxenable(struct mgsl_struct * info, int enable);
853static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
854static int mgsl_loopmode_send_done( struct mgsl_struct * info );
855
856/* set non-zero on successful registration with PCI subsystem */
857static int pci_registered;
858
859/*
860 * Global linked list of SyncLink devices
861 */
862static struct mgsl_struct *mgsl_device_list;
863static int mgsl_device_count;
864
865/*
866 * Set this param to non-zero to load eax with the
867 * .text section address and breakpoint on module load.
868 * This is useful for use with gdb and add-symbol-file command.
869 */
870static int break_on_load;
871
872/*
873 * Driver major number, defaults to zero to get auto
874 * assigned major number. May be forced as module parameter.
875 */
876static int ttymajor;
877
878/*
879 * Array of user specified options for ISA adapters.
880 */
881static int io[MAX_ISA_DEVICES];
882static int irq[MAX_ISA_DEVICES];
883static int dma[MAX_ISA_DEVICES];
884static int debug_level;
885static int maxframe[MAX_TOTAL_DEVICES];
886static int dosyncppp[MAX_TOTAL_DEVICES];
887static int txdmabufs[MAX_TOTAL_DEVICES];
888static int txholdbufs[MAX_TOTAL_DEVICES];
889
890module_param(break_on_load, bool, 0);
891module_param(ttymajor, int, 0);
892module_param_array(io, int, NULL, 0);
893module_param_array(irq, int, NULL, 0);
894module_param_array(dma, int, NULL, 0);
895module_param(debug_level, int, 0);
896module_param_array(maxframe, int, NULL, 0);
897module_param_array(dosyncppp, int, NULL, 0);
898module_param_array(txdmabufs, int, NULL, 0);
899module_param_array(txholdbufs, int, NULL, 0);
900
901static char *driver_name = "SyncLink serial driver";
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -0800902static char *driver_version = "$Revision: 4.38 $";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904static int synclink_init_one (struct pci_dev *dev,
905 const struct pci_device_id *ent);
906static void synclink_remove_one (struct pci_dev *dev);
907
908static struct pci_device_id synclink_pci_tbl[] = {
909 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
910 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
911 { 0, }, /* terminate list */
912};
913MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
914
915MODULE_LICENSE("GPL");
916
917static struct pci_driver synclink_pci_driver = {
918 .name = "synclink",
919 .id_table = synclink_pci_tbl,
920 .probe = synclink_init_one,
921 .remove = __devexit_p(synclink_remove_one),
922};
923
924static struct tty_driver *serial_driver;
925
926/* number of characters left in xmit buffer before we ask for more */
927#define WAKEUP_CHARS 256
928
929
930static void mgsl_change_params(struct mgsl_struct *info);
931static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
932
933/*
934 * 1st function defined in .text section. Calling this function in
935 * init_module() followed by a breakpoint allows a remote debugger
936 * (gdb) to get the .text address for the add-symbol-file command.
937 * This allows remote debugging of dynamically loadable modules.
938 */
939static void* mgsl_get_text_ptr(void)
940{
941 return mgsl_get_text_ptr;
942}
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944static inline int mgsl_paranoia_check(struct mgsl_struct *info,
945 char *name, const char *routine)
946{
947#ifdef MGSL_PARANOIA_CHECK
948 static const char *badmagic =
949 "Warning: bad magic number for mgsl struct (%s) in %s\n";
950 static const char *badinfo =
951 "Warning: null mgsl_struct for (%s) in %s\n";
952
953 if (!info) {
954 printk(badinfo, name, routine);
955 return 1;
956 }
957 if (info->magic != MGSL_MAGIC) {
958 printk(badmagic, name, routine);
959 return 1;
960 }
961#else
962 if (!info)
963 return 1;
964#endif
965 return 0;
966}
967
968/**
969 * line discipline callback wrappers
970 *
971 * The wrappers maintain line discipline references
972 * while calling into the line discipline.
973 *
974 * ldisc_receive_buf - pass receive data to line discipline
975 */
976
977static void ldisc_receive_buf(struct tty_struct *tty,
978 const __u8 *data, char *flags, int count)
979{
980 struct tty_ldisc *ld;
981 if (!tty)
982 return;
983 ld = tty_ldisc_ref(tty);
984 if (ld) {
985 if (ld->receive_buf)
986 ld->receive_buf(tty, data, flags, count);
987 tty_ldisc_deref(ld);
988 }
989}
990
991/* mgsl_stop() throttle (stop) transmitter
992 *
993 * Arguments: tty pointer to tty info structure
994 * Return Value: None
995 */
996static void mgsl_stop(struct tty_struct *tty)
997{
998 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
999 unsigned long flags;
1000
1001 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1002 return;
1003
1004 if ( debug_level >= DEBUG_LEVEL_INFO )
1005 printk("mgsl_stop(%s)\n",info->device_name);
1006
1007 spin_lock_irqsave(&info->irq_spinlock,flags);
1008 if (info->tx_enabled)
1009 usc_stop_transmitter(info);
1010 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1011
1012} /* end of mgsl_stop() */
1013
1014/* mgsl_start() release (start) transmitter
1015 *
1016 * Arguments: tty pointer to tty info structure
1017 * Return Value: None
1018 */
1019static void mgsl_start(struct tty_struct *tty)
1020{
1021 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1022 unsigned long flags;
1023
1024 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1025 return;
1026
1027 if ( debug_level >= DEBUG_LEVEL_INFO )
1028 printk("mgsl_start(%s)\n",info->device_name);
1029
1030 spin_lock_irqsave(&info->irq_spinlock,flags);
1031 if (!info->tx_enabled)
1032 usc_start_transmitter(info);
1033 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1034
1035} /* end of mgsl_start() */
1036
1037/*
1038 * Bottom half work queue access functions
1039 */
1040
1041/* mgsl_bh_action() Return next bottom half action to perform.
1042 * Return Value: BH action code or 0 if nothing to do.
1043 */
1044static int mgsl_bh_action(struct mgsl_struct *info)
1045{
1046 unsigned long flags;
1047 int rc = 0;
1048
1049 spin_lock_irqsave(&info->irq_spinlock,flags);
1050
1051 if (info->pending_bh & BH_RECEIVE) {
1052 info->pending_bh &= ~BH_RECEIVE;
1053 rc = BH_RECEIVE;
1054 } else if (info->pending_bh & BH_TRANSMIT) {
1055 info->pending_bh &= ~BH_TRANSMIT;
1056 rc = BH_TRANSMIT;
1057 } else if (info->pending_bh & BH_STATUS) {
1058 info->pending_bh &= ~BH_STATUS;
1059 rc = BH_STATUS;
1060 }
1061
1062 if (!rc) {
1063 /* Mark BH routine as complete */
1064 info->bh_running = 0;
1065 info->bh_requested = 0;
1066 }
1067
1068 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1069
1070 return rc;
1071}
1072
1073/*
1074 * Perform bottom half processing of work items queued by ISR.
1075 */
David Howellsc4028952006-11-22 14:57:56 +00001076static void mgsl_bh_handler(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077{
David Howellsc4028952006-11-22 14:57:56 +00001078 struct mgsl_struct *info =
1079 container_of(work, struct mgsl_struct, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 int action;
1081
1082 if (!info)
1083 return;
1084
1085 if ( debug_level >= DEBUG_LEVEL_BH )
1086 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1087 __FILE__,__LINE__,info->device_name);
1088
1089 info->bh_running = 1;
1090
1091 while((action = mgsl_bh_action(info)) != 0) {
1092
1093 /* Process work item */
1094 if ( debug_level >= DEBUG_LEVEL_BH )
1095 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1096 __FILE__,__LINE__,action);
1097
1098 switch (action) {
1099
1100 case BH_RECEIVE:
1101 mgsl_bh_receive(info);
1102 break;
1103 case BH_TRANSMIT:
1104 mgsl_bh_transmit(info);
1105 break;
1106 case BH_STATUS:
1107 mgsl_bh_status(info);
1108 break;
1109 default:
1110 /* unknown work item ID */
1111 printk("Unknown work item ID=%08X!\n", action);
1112 break;
1113 }
1114 }
1115
1116 if ( debug_level >= DEBUG_LEVEL_BH )
1117 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1118 __FILE__,__LINE__,info->device_name);
1119}
1120
1121static void mgsl_bh_receive(struct mgsl_struct *info)
1122{
1123 int (*get_rx_frame)(struct mgsl_struct *info) =
1124 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1125
1126 if ( debug_level >= DEBUG_LEVEL_BH )
1127 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1128 __FILE__,__LINE__,info->device_name);
1129
1130 do
1131 {
1132 if (info->rx_rcc_underrun) {
1133 unsigned long flags;
1134 spin_lock_irqsave(&info->irq_spinlock,flags);
1135 usc_start_receiver(info);
1136 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1137 return;
1138 }
1139 } while(get_rx_frame(info));
1140}
1141
1142static void mgsl_bh_transmit(struct mgsl_struct *info)
1143{
1144 struct tty_struct *tty = info->tty;
1145 unsigned long flags;
1146
1147 if ( debug_level >= DEBUG_LEVEL_BH )
1148 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1149 __FILE__,__LINE__,info->device_name);
1150
Jiri Slabyb963a842007-02-10 01:44:55 -08001151 if (tty)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 tty_wakeup(tty);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154 /* if transmitter idle and loopmode_send_done_requested
1155 * then start echoing RxD to TxD
1156 */
1157 spin_lock_irqsave(&info->irq_spinlock,flags);
1158 if ( !info->tx_active && info->loopmode_send_done_requested )
1159 usc_loopmode_send_done( info );
1160 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1161}
1162
1163static void mgsl_bh_status(struct mgsl_struct *info)
1164{
1165 if ( debug_level >= DEBUG_LEVEL_BH )
1166 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1167 __FILE__,__LINE__,info->device_name);
1168
1169 info->ri_chkcount = 0;
1170 info->dsr_chkcount = 0;
1171 info->dcd_chkcount = 0;
1172 info->cts_chkcount = 0;
1173}
1174
1175/* mgsl_isr_receive_status()
1176 *
1177 * Service a receive status interrupt. The type of status
1178 * interrupt is indicated by the state of the RCSR.
1179 * This is only used for HDLC mode.
1180 *
1181 * Arguments: info pointer to device instance data
1182 * Return Value: None
1183 */
1184static void mgsl_isr_receive_status( struct mgsl_struct *info )
1185{
1186 u16 status = usc_InReg( info, RCSR );
1187
1188 if ( debug_level >= DEBUG_LEVEL_ISR )
1189 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1190 __FILE__,__LINE__,status);
1191
1192 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1193 info->loopmode_insert_requested &&
1194 usc_loopmode_active(info) )
1195 {
1196 ++info->icount.rxabort;
1197 info->loopmode_insert_requested = FALSE;
1198
1199 /* clear CMR:13 to start echoing RxD to TxD */
1200 info->cmr_value &= ~BIT13;
1201 usc_OutReg(info, CMR, info->cmr_value);
1202
1203 /* disable received abort irq (no longer required) */
1204 usc_OutReg(info, RICR,
1205 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1206 }
1207
1208 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1209 if (status & RXSTATUS_EXITED_HUNT)
1210 info->icount.exithunt++;
1211 if (status & RXSTATUS_IDLE_RECEIVED)
1212 info->icount.rxidle++;
1213 wake_up_interruptible(&info->event_wait_q);
1214 }
1215
1216 if (status & RXSTATUS_OVERRUN){
1217 info->icount.rxover++;
1218 usc_process_rxoverrun_sync( info );
1219 }
1220
1221 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1222 usc_UnlatchRxstatusBits( info, status );
1223
1224} /* end of mgsl_isr_receive_status() */
1225
1226/* mgsl_isr_transmit_status()
1227 *
1228 * Service a transmit status interrupt
1229 * HDLC mode :end of transmit frame
1230 * Async mode:all data is sent
1231 * transmit status is indicated by bits in the TCSR.
1232 *
1233 * Arguments: info pointer to device instance data
1234 * Return Value: None
1235 */
1236static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1237{
1238 u16 status = usc_InReg( info, TCSR );
1239
1240 if ( debug_level >= DEBUG_LEVEL_ISR )
1241 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1242 __FILE__,__LINE__,status);
1243
1244 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1245 usc_UnlatchTxstatusBits( info, status );
1246
1247 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1248 {
1249 /* finished sending HDLC abort. This may leave */
1250 /* the TxFifo with data from the aborted frame */
1251 /* so purge the TxFifo. Also shutdown the DMA */
1252 /* channel in case there is data remaining in */
1253 /* the DMA buffer */
1254 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1255 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1256 }
1257
1258 if ( status & TXSTATUS_EOF_SENT )
1259 info->icount.txok++;
1260 else if ( status & TXSTATUS_UNDERRUN )
1261 info->icount.txunder++;
1262 else if ( status & TXSTATUS_ABORT_SENT )
1263 info->icount.txabort++;
1264 else
1265 info->icount.txunder++;
1266
1267 info->tx_active = 0;
1268 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1269 del_timer(&info->tx_timer);
1270
1271 if ( info->drop_rts_on_tx_done ) {
1272 usc_get_serial_signals( info );
1273 if ( info->serial_signals & SerialSignal_RTS ) {
1274 info->serial_signals &= ~SerialSignal_RTS;
1275 usc_set_serial_signals( info );
1276 }
1277 info->drop_rts_on_tx_done = 0;
1278 }
1279
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08001280#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 if (info->netcount)
1282 hdlcdev_tx_done(info);
1283 else
1284#endif
1285 {
1286 if (info->tty->stopped || info->tty->hw_stopped) {
1287 usc_stop_transmitter(info);
1288 return;
1289 }
1290 info->pending_bh |= BH_TRANSMIT;
1291 }
1292
1293} /* end of mgsl_isr_transmit_status() */
1294
1295/* mgsl_isr_io_pin()
1296 *
1297 * Service an Input/Output pin interrupt. The type of
1298 * interrupt is indicated by bits in the MISR
1299 *
1300 * Arguments: info pointer to device instance data
1301 * Return Value: None
1302 */
1303static void mgsl_isr_io_pin( struct mgsl_struct *info )
1304{
1305 struct mgsl_icount *icount;
1306 u16 status = usc_InReg( info, MISR );
1307
1308 if ( debug_level >= DEBUG_LEVEL_ISR )
1309 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1310 __FILE__,__LINE__,status);
1311
1312 usc_ClearIrqPendingBits( info, IO_PIN );
1313 usc_UnlatchIostatusBits( info, status );
1314
1315 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1316 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1317 icount = &info->icount;
1318 /* update input line counters */
1319 if (status & MISCSTATUS_RI_LATCHED) {
1320 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1321 usc_DisablestatusIrqs(info,SICR_RI);
1322 icount->rng++;
1323 if ( status & MISCSTATUS_RI )
1324 info->input_signal_events.ri_up++;
1325 else
1326 info->input_signal_events.ri_down++;
1327 }
1328 if (status & MISCSTATUS_DSR_LATCHED) {
1329 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1330 usc_DisablestatusIrqs(info,SICR_DSR);
1331 icount->dsr++;
1332 if ( status & MISCSTATUS_DSR )
1333 info->input_signal_events.dsr_up++;
1334 else
1335 info->input_signal_events.dsr_down++;
1336 }
1337 if (status & MISCSTATUS_DCD_LATCHED) {
1338 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1339 usc_DisablestatusIrqs(info,SICR_DCD);
1340 icount->dcd++;
1341 if (status & MISCSTATUS_DCD) {
1342 info->input_signal_events.dcd_up++;
1343 } else
1344 info->input_signal_events.dcd_down++;
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08001345#if SYNCLINK_GENERIC_HDLC
Krzysztof Halasafbeff3c2006-07-21 14:44:55 -07001346 if (info->netcount) {
1347 if (status & MISCSTATUS_DCD)
1348 netif_carrier_on(info->netdev);
1349 else
1350 netif_carrier_off(info->netdev);
1351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352#endif
1353 }
1354 if (status & MISCSTATUS_CTS_LATCHED)
1355 {
1356 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1357 usc_DisablestatusIrqs(info,SICR_CTS);
1358 icount->cts++;
1359 if ( status & MISCSTATUS_CTS )
1360 info->input_signal_events.cts_up++;
1361 else
1362 info->input_signal_events.cts_down++;
1363 }
1364 wake_up_interruptible(&info->status_event_wait_q);
1365 wake_up_interruptible(&info->event_wait_q);
1366
1367 if ( (info->flags & ASYNC_CHECK_CD) &&
1368 (status & MISCSTATUS_DCD_LATCHED) ) {
1369 if ( debug_level >= DEBUG_LEVEL_ISR )
1370 printk("%s CD now %s...", info->device_name,
1371 (status & MISCSTATUS_DCD) ? "on" : "off");
1372 if (status & MISCSTATUS_DCD)
1373 wake_up_interruptible(&info->open_wait);
1374 else {
1375 if ( debug_level >= DEBUG_LEVEL_ISR )
1376 printk("doing serial hangup...");
1377 if (info->tty)
1378 tty_hangup(info->tty);
1379 }
1380 }
1381
1382 if ( (info->flags & ASYNC_CTS_FLOW) &&
1383 (status & MISCSTATUS_CTS_LATCHED) ) {
1384 if (info->tty->hw_stopped) {
1385 if (status & MISCSTATUS_CTS) {
1386 if ( debug_level >= DEBUG_LEVEL_ISR )
1387 printk("CTS tx start...");
1388 if (info->tty)
1389 info->tty->hw_stopped = 0;
1390 usc_start_transmitter(info);
1391 info->pending_bh |= BH_TRANSMIT;
1392 return;
1393 }
1394 } else {
1395 if (!(status & MISCSTATUS_CTS)) {
1396 if ( debug_level >= DEBUG_LEVEL_ISR )
1397 printk("CTS tx stop...");
1398 if (info->tty)
1399 info->tty->hw_stopped = 1;
1400 usc_stop_transmitter(info);
1401 }
1402 }
1403 }
1404 }
1405
1406 info->pending_bh |= BH_STATUS;
1407
1408 /* for diagnostics set IRQ flag */
1409 if ( status & MISCSTATUS_TXC_LATCHED ){
1410 usc_OutReg( info, SICR,
1411 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1412 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1413 info->irq_occurred = 1;
1414 }
1415
1416} /* end of mgsl_isr_io_pin() */
1417
1418/* mgsl_isr_transmit_data()
1419 *
1420 * Service a transmit data interrupt (async mode only).
1421 *
1422 * Arguments: info pointer to device instance data
1423 * Return Value: None
1424 */
1425static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1426{
1427 if ( debug_level >= DEBUG_LEVEL_ISR )
1428 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1429 __FILE__,__LINE__,info->xmit_cnt);
1430
1431 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1432
1433 if (info->tty->stopped || info->tty->hw_stopped) {
1434 usc_stop_transmitter(info);
1435 return;
1436 }
1437
1438 if ( info->xmit_cnt )
1439 usc_load_txfifo( info );
1440 else
1441 info->tx_active = 0;
1442
1443 if (info->xmit_cnt < WAKEUP_CHARS)
1444 info->pending_bh |= BH_TRANSMIT;
1445
1446} /* end of mgsl_isr_transmit_data() */
1447
1448/* mgsl_isr_receive_data()
1449 *
1450 * Service a receive data interrupt. This occurs
1451 * when operating in asynchronous interrupt transfer mode.
1452 * The receive data FIFO is flushed to the receive data buffers.
1453 *
1454 * Arguments: info pointer to device instance data
1455 * Return Value: None
1456 */
1457static void mgsl_isr_receive_data( struct mgsl_struct *info )
1458{
1459 int Fifocount;
1460 u16 status;
Alan Cox33f0f882006-01-09 20:54:13 -08001461 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 unsigned char DataByte;
1463 struct tty_struct *tty = info->tty;
1464 struct mgsl_icount *icount = &info->icount;
1465
1466 if ( debug_level >= DEBUG_LEVEL_ISR )
1467 printk("%s(%d):mgsl_isr_receive_data\n",
1468 __FILE__,__LINE__);
1469
1470 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1471
1472 /* select FIFO status for RICR readback */
1473 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1474
1475 /* clear the Wordstatus bit so that status readback */
1476 /* only reflects the status of this byte */
1477 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1478
1479 /* flush the receive FIFO */
1480
1481 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
Alan Cox33f0f882006-01-09 20:54:13 -08001482 int flag;
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 /* read one byte from RxFIFO */
1485 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1486 info->io_base + CCAR );
1487 DataByte = inb( info->io_base + CCAR );
1488
1489 /* get the status of the received byte */
1490 status = usc_InReg(info, RCSR);
1491 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1492 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1493 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 icount->rx++;
1496
Alan Cox33f0f882006-01-09 20:54:13 -08001497 flag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1499 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1500 printk("rxerr=%04X\n",status);
1501 /* update error statistics */
1502 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1503 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1504 icount->brk++;
1505 } else if (status & RXSTATUS_PARITY_ERROR)
1506 icount->parity++;
1507 else if (status & RXSTATUS_FRAMING_ERROR)
1508 icount->frame++;
1509 else if (status & RXSTATUS_OVERRUN) {
1510 /* must issue purge fifo cmd before */
1511 /* 16C32 accepts more receive chars */
1512 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1513 icount->overrun++;
1514 }
1515
1516 /* discard char if tty control flags say so */
1517 if (status & info->ignore_status_mask)
1518 continue;
1519
1520 status &= info->read_status_mask;
1521
1522 if (status & RXSTATUS_BREAK_RECEIVED) {
Alan Cox33f0f882006-01-09 20:54:13 -08001523 flag = TTY_BREAK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 if (info->flags & ASYNC_SAK)
1525 do_SAK(tty);
1526 } else if (status & RXSTATUS_PARITY_ERROR)
Alan Cox33f0f882006-01-09 20:54:13 -08001527 flag = TTY_PARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 else if (status & RXSTATUS_FRAMING_ERROR)
Alan Cox33f0f882006-01-09 20:54:13 -08001529 flag = TTY_FRAME;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 } /* end of if (error) */
Alan Cox33f0f882006-01-09 20:54:13 -08001531 tty_insert_flip_char(tty, DataByte, flag);
1532 if (status & RXSTATUS_OVERRUN) {
1533 /* Overrun is special, since it's
1534 * reported immediately, and doesn't
1535 * affect the current character
1536 */
1537 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 }
1540
1541 if ( debug_level >= DEBUG_LEVEL_ISR ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1543 __FILE__,__LINE__,icount->rx,icount->brk,
1544 icount->parity,icount->frame,icount->overrun);
1545 }
1546
Alan Cox33f0f882006-01-09 20:54:13 -08001547 if(work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 tty_flip_buffer_push(tty);
1549}
1550
1551/* mgsl_isr_misc()
1552 *
1553 * Service a miscellaneos interrupt source.
1554 *
1555 * Arguments: info pointer to device extension (instance data)
1556 * Return Value: None
1557 */
1558static void mgsl_isr_misc( struct mgsl_struct *info )
1559{
1560 u16 status = usc_InReg( info, MISR );
1561
1562 if ( debug_level >= DEBUG_LEVEL_ISR )
1563 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1564 __FILE__,__LINE__,status);
1565
1566 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1567 (info->params.mode == MGSL_MODE_HDLC)) {
1568
1569 /* turn off receiver and rx DMA */
1570 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1571 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1572 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1573 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1574 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1575
1576 /* schedule BH handler to restart receiver */
1577 info->pending_bh |= BH_RECEIVE;
1578 info->rx_rcc_underrun = 1;
1579 }
1580
1581 usc_ClearIrqPendingBits( info, MISC );
1582 usc_UnlatchMiscstatusBits( info, status );
1583
1584} /* end of mgsl_isr_misc() */
1585
1586/* mgsl_isr_null()
1587 *
1588 * Services undefined interrupt vectors from the
1589 * USC. (hence this function SHOULD never be called)
1590 *
1591 * Arguments: info pointer to device extension (instance data)
1592 * Return Value: None
1593 */
1594static void mgsl_isr_null( struct mgsl_struct *info )
1595{
1596
1597} /* end of mgsl_isr_null() */
1598
1599/* mgsl_isr_receive_dma()
1600 *
1601 * Service a receive DMA channel interrupt.
1602 * For this driver there are two sources of receive DMA interrupts
1603 * as identified in the Receive DMA mode Register (RDMR):
1604 *
1605 * BIT3 EOA/EOL End of List, all receive buffers in receive
1606 * buffer list have been filled (no more free buffers
1607 * available). The DMA controller has shut down.
1608 *
1609 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1610 * DMA buffer is terminated in response to completion
1611 * of a good frame or a frame with errors. The status
1612 * of the frame is stored in the buffer entry in the
1613 * list of receive buffer entries.
1614 *
1615 * Arguments: info pointer to device instance data
1616 * Return Value: None
1617 */
1618static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1619{
1620 u16 status;
1621
1622 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1623 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1624
1625 /* Read the receive DMA status to identify interrupt type. */
1626 /* This also clears the status bits. */
1627 status = usc_InDmaReg( info, RDMR );
1628
1629 if ( debug_level >= DEBUG_LEVEL_ISR )
1630 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1631 __FILE__,__LINE__,info->device_name,status);
1632
1633 info->pending_bh |= BH_RECEIVE;
1634
1635 if ( status & BIT3 ) {
1636 info->rx_overflow = 1;
1637 info->icount.buf_overrun++;
1638 }
1639
1640} /* end of mgsl_isr_receive_dma() */
1641
1642/* mgsl_isr_transmit_dma()
1643 *
1644 * This function services a transmit DMA channel interrupt.
1645 *
1646 * For this driver there is one source of transmit DMA interrupts
1647 * as identified in the Transmit DMA Mode Register (TDMR):
1648 *
1649 * BIT2 EOB End of Buffer. This interrupt occurs when a
1650 * transmit DMA buffer has been emptied.
1651 *
1652 * The driver maintains enough transmit DMA buffers to hold at least
1653 * one max frame size transmit frame. When operating in a buffered
1654 * transmit mode, there may be enough transmit DMA buffers to hold at
1655 * least two or more max frame size frames. On an EOB condition,
1656 * determine if there are any queued transmit buffers and copy into
1657 * transmit DMA buffers if we have room.
1658 *
1659 * Arguments: info pointer to device instance data
1660 * Return Value: None
1661 */
1662static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1663{
1664 u16 status;
1665
1666 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1667 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1668
1669 /* Read the transmit DMA status to identify interrupt type. */
1670 /* This also clears the status bits. */
1671
1672 status = usc_InDmaReg( info, TDMR );
1673
1674 if ( debug_level >= DEBUG_LEVEL_ISR )
1675 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1676 __FILE__,__LINE__,info->device_name,status);
1677
1678 if ( status & BIT2 ) {
1679 --info->tx_dma_buffers_used;
1680
1681 /* if there are transmit frames queued,
1682 * try to load the next one
1683 */
1684 if ( load_next_tx_holding_buffer(info) ) {
1685 /* if call returns non-zero value, we have
1686 * at least one free tx holding buffer
1687 */
1688 info->pending_bh |= BH_TRANSMIT;
1689 }
1690 }
1691
1692} /* end of mgsl_isr_transmit_dma() */
1693
1694/* mgsl_interrupt()
1695 *
1696 * Interrupt service routine entry point.
1697 *
1698 * Arguments:
1699 *
1700 * irq interrupt number that caused interrupt
1701 * dev_id device ID supplied during interrupt registration
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 *
1703 * Return Value: None
1704 */
David Howells7d12e782006-10-05 14:55:46 +01001705static irqreturn_t mgsl_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706{
1707 struct mgsl_struct * info;
1708 u16 UscVector;
1709 u16 DmaVector;
1710
1711 if ( debug_level >= DEBUG_LEVEL_ISR )
1712 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1713 __FILE__,__LINE__,irq);
1714
1715 info = (struct mgsl_struct *)dev_id;
1716 if (!info)
1717 return IRQ_NONE;
1718
1719 spin_lock(&info->irq_spinlock);
1720
1721 for(;;) {
1722 /* Read the interrupt vectors from hardware. */
1723 UscVector = usc_InReg(info, IVR) >> 9;
1724 DmaVector = usc_InDmaReg(info, DIVR);
1725
1726 if ( debug_level >= DEBUG_LEVEL_ISR )
1727 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1728 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1729
1730 if ( !UscVector && !DmaVector )
1731 break;
1732
1733 /* Dispatch interrupt vector */
1734 if ( UscVector )
1735 (*UscIsrTable[UscVector])(info);
1736 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1737 mgsl_isr_transmit_dma(info);
1738 else
1739 mgsl_isr_receive_dma(info);
1740
1741 if ( info->isr_overflow ) {
1742 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1743 __FILE__,__LINE__,info->device_name, irq);
1744 usc_DisableMasterIrqBit(info);
1745 usc_DisableDmaInterrupts(info,DICR_MASTER);
1746 break;
1747 }
1748 }
1749
1750 /* Request bottom half processing if there's something
1751 * for it to do and the bh is not already running
1752 */
1753
1754 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1755 if ( debug_level >= DEBUG_LEVEL_ISR )
1756 printk("%s(%d):%s queueing bh task.\n",
1757 __FILE__,__LINE__,info->device_name);
1758 schedule_work(&info->task);
1759 info->bh_requested = 1;
1760 }
1761
1762 spin_unlock(&info->irq_spinlock);
1763
1764 if ( debug_level >= DEBUG_LEVEL_ISR )
1765 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1766 __FILE__,__LINE__,irq);
1767 return IRQ_HANDLED;
1768} /* end of mgsl_interrupt() */
1769
1770/* startup()
1771 *
1772 * Initialize and start device.
1773 *
1774 * Arguments: info pointer to device instance data
1775 * Return Value: 0 if success, otherwise error code
1776 */
1777static int startup(struct mgsl_struct * info)
1778{
1779 int retval = 0;
1780
1781 if ( debug_level >= DEBUG_LEVEL_INFO )
1782 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1783
1784 if (info->flags & ASYNC_INITIALIZED)
1785 return 0;
1786
1787 if (!info->xmit_buf) {
1788 /* allocate a page of memory for a transmit buffer */
1789 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1790 if (!info->xmit_buf) {
1791 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1792 __FILE__,__LINE__,info->device_name);
1793 return -ENOMEM;
1794 }
1795 }
1796
1797 info->pending_bh = 0;
1798
Paul Fulghum96612392005-09-09 13:02:13 -07001799 memset(&info->icount, 0, sizeof(info->icount));
1800
Jiri Slaby40565f12007-02-12 00:52:31 -08001801 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
1803 /* Allocate and claim adapter resources */
1804 retval = mgsl_claim_resources(info);
1805
1806 /* perform existence check and diagnostics */
1807 if ( !retval )
1808 retval = mgsl_adapter_test(info);
1809
1810 if ( retval ) {
1811 if (capable(CAP_SYS_ADMIN) && info->tty)
1812 set_bit(TTY_IO_ERROR, &info->tty->flags);
1813 mgsl_release_resources(info);
1814 return retval;
1815 }
1816
1817 /* program hardware for current parameters */
1818 mgsl_change_params(info);
1819
1820 if (info->tty)
1821 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1822
1823 info->flags |= ASYNC_INITIALIZED;
1824
1825 return 0;
1826
1827} /* end of startup() */
1828
1829/* shutdown()
1830 *
1831 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1832 *
1833 * Arguments: info pointer to device instance data
1834 * Return Value: None
1835 */
1836static void shutdown(struct mgsl_struct * info)
1837{
1838 unsigned long flags;
1839
1840 if (!(info->flags & ASYNC_INITIALIZED))
1841 return;
1842
1843 if (debug_level >= DEBUG_LEVEL_INFO)
1844 printk("%s(%d):mgsl_shutdown(%s)\n",
1845 __FILE__,__LINE__, info->device_name );
1846
1847 /* clear status wait queue because status changes */
1848 /* can't happen after shutting down the hardware */
1849 wake_up_interruptible(&info->status_event_wait_q);
1850 wake_up_interruptible(&info->event_wait_q);
1851
Jiri Slaby40565f12007-02-12 00:52:31 -08001852 del_timer_sync(&info->tx_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853
1854 if (info->xmit_buf) {
1855 free_page((unsigned long) info->xmit_buf);
1856 info->xmit_buf = NULL;
1857 }
1858
1859 spin_lock_irqsave(&info->irq_spinlock,flags);
1860 usc_DisableMasterIrqBit(info);
1861 usc_stop_receiver(info);
1862 usc_stop_transmitter(info);
1863 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1864 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1865 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1866
1867 /* Disable DMAEN (Port 7, Bit 14) */
1868 /* This disconnects the DMA request signal from the ISA bus */
1869 /* on the ISA adapter. This has no effect for the PCI adapter */
1870 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1871
1872 /* Disable INTEN (Port 6, Bit12) */
1873 /* This disconnects the IRQ request signal to the ISA bus */
1874 /* on the ISA adapter. This has no effect for the PCI adapter */
1875 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1876
1877 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1878 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1879 usc_set_serial_signals(info);
1880 }
1881
1882 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1883
1884 mgsl_release_resources(info);
1885
1886 if (info->tty)
1887 set_bit(TTY_IO_ERROR, &info->tty->flags);
1888
1889 info->flags &= ~ASYNC_INITIALIZED;
1890
1891} /* end of shutdown() */
1892
1893static void mgsl_program_hw(struct mgsl_struct *info)
1894{
1895 unsigned long flags;
1896
1897 spin_lock_irqsave(&info->irq_spinlock,flags);
1898
1899 usc_stop_receiver(info);
1900 usc_stop_transmitter(info);
1901 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1902
1903 if (info->params.mode == MGSL_MODE_HDLC ||
1904 info->params.mode == MGSL_MODE_RAW ||
1905 info->netcount)
1906 usc_set_sync_mode(info);
1907 else
1908 usc_set_async_mode(info);
1909
1910 usc_set_serial_signals(info);
1911
1912 info->dcd_chkcount = 0;
1913 info->cts_chkcount = 0;
1914 info->ri_chkcount = 0;
1915 info->dsr_chkcount = 0;
1916
1917 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1918 usc_EnableInterrupts(info, IO_PIN);
1919 usc_get_serial_signals(info);
1920
1921 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1922 usc_start_receiver(info);
1923
1924 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1925}
1926
1927/* Reconfigure adapter based on new parameters
1928 */
1929static void mgsl_change_params(struct mgsl_struct *info)
1930{
1931 unsigned cflag;
1932 int bits_per_char;
1933
1934 if (!info->tty || !info->tty->termios)
1935 return;
1936
1937 if (debug_level >= DEBUG_LEVEL_INFO)
1938 printk("%s(%d):mgsl_change_params(%s)\n",
1939 __FILE__,__LINE__, info->device_name );
1940
1941 cflag = info->tty->termios->c_cflag;
1942
1943 /* if B0 rate (hangup) specified then negate DTR and RTS */
1944 /* otherwise assert DTR and RTS */
1945 if (cflag & CBAUD)
1946 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1947 else
1948 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1949
1950 /* byte size and parity */
1951
1952 switch (cflag & CSIZE) {
1953 case CS5: info->params.data_bits = 5; break;
1954 case CS6: info->params.data_bits = 6; break;
1955 case CS7: info->params.data_bits = 7; break;
1956 case CS8: info->params.data_bits = 8; break;
1957 /* Never happens, but GCC is too dumb to figure it out */
1958 default: info->params.data_bits = 7; break;
1959 }
1960
1961 if (cflag & CSTOPB)
1962 info->params.stop_bits = 2;
1963 else
1964 info->params.stop_bits = 1;
1965
1966 info->params.parity = ASYNC_PARITY_NONE;
1967 if (cflag & PARENB) {
1968 if (cflag & PARODD)
1969 info->params.parity = ASYNC_PARITY_ODD;
1970 else
1971 info->params.parity = ASYNC_PARITY_EVEN;
1972#ifdef CMSPAR
1973 if (cflag & CMSPAR)
1974 info->params.parity = ASYNC_PARITY_SPACE;
1975#endif
1976 }
1977
1978 /* calculate number of jiffies to transmit a full
1979 * FIFO (32 bytes) at specified data rate
1980 */
1981 bits_per_char = info->params.data_bits +
1982 info->params.stop_bits + 1;
1983
1984 /* if port data rate is set to 460800 or less then
1985 * allow tty settings to override, otherwise keep the
1986 * current data rate.
1987 */
1988 if (info->params.data_rate <= 460800)
1989 info->params.data_rate = tty_get_baud_rate(info->tty);
1990
1991 if ( info->params.data_rate ) {
1992 info->timeout = (32*HZ*bits_per_char) /
1993 info->params.data_rate;
1994 }
1995 info->timeout += HZ/50; /* Add .02 seconds of slop */
1996
1997 if (cflag & CRTSCTS)
1998 info->flags |= ASYNC_CTS_FLOW;
1999 else
2000 info->flags &= ~ASYNC_CTS_FLOW;
2001
2002 if (cflag & CLOCAL)
2003 info->flags &= ~ASYNC_CHECK_CD;
2004 else
2005 info->flags |= ASYNC_CHECK_CD;
2006
2007 /* process tty input control flags */
2008
2009 info->read_status_mask = RXSTATUS_OVERRUN;
2010 if (I_INPCK(info->tty))
2011 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2012 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2013 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2014
2015 if (I_IGNPAR(info->tty))
2016 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2017 if (I_IGNBRK(info->tty)) {
2018 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2019 /* If ignoring parity and break indicators, ignore
2020 * overruns too. (For real raw support).
2021 */
2022 if (I_IGNPAR(info->tty))
2023 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2024 }
2025
2026 mgsl_program_hw(info);
2027
2028} /* end of mgsl_change_params() */
2029
2030/* mgsl_put_char()
2031 *
2032 * Add a character to the transmit buffer.
2033 *
2034 * Arguments: tty pointer to tty information structure
2035 * ch character to add to transmit buffer
2036 *
2037 * Return Value: None
2038 */
2039static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2040{
2041 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2042 unsigned long flags;
2043
2044 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2045 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2046 __FILE__,__LINE__,ch,info->device_name);
2047 }
2048
2049 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2050 return;
2051
2052 if (!tty || !info->xmit_buf)
2053 return;
2054
2055 spin_lock_irqsave(&info->irq_spinlock,flags);
2056
2057 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2058
2059 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2060 info->xmit_buf[info->xmit_head++] = ch;
2061 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2062 info->xmit_cnt++;
2063 }
2064 }
2065
2066 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2067
2068} /* end of mgsl_put_char() */
2069
2070/* mgsl_flush_chars()
2071 *
2072 * Enable transmitter so remaining characters in the
2073 * transmit buffer are sent.
2074 *
2075 * Arguments: tty pointer to tty information structure
2076 * Return Value: None
2077 */
2078static void mgsl_flush_chars(struct tty_struct *tty)
2079{
2080 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2081 unsigned long flags;
2082
2083 if ( debug_level >= DEBUG_LEVEL_INFO )
2084 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2085 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2086
2087 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2088 return;
2089
2090 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2091 !info->xmit_buf)
2092 return;
2093
2094 if ( debug_level >= DEBUG_LEVEL_INFO )
2095 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2096 __FILE__,__LINE__,info->device_name );
2097
2098 spin_lock_irqsave(&info->irq_spinlock,flags);
2099
2100 if (!info->tx_active) {
2101 if ( (info->params.mode == MGSL_MODE_HDLC ||
2102 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2103 /* operating in synchronous (frame oriented) mode */
2104 /* copy data from circular xmit_buf to */
2105 /* transmit DMA buffer. */
2106 mgsl_load_tx_dma_buffer(info,
2107 info->xmit_buf,info->xmit_cnt);
2108 }
2109 usc_start_transmitter(info);
2110 }
2111
2112 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2113
2114} /* end of mgsl_flush_chars() */
2115
2116/* mgsl_write()
2117 *
2118 * Send a block of data
2119 *
2120 * Arguments:
2121 *
2122 * tty pointer to tty information structure
2123 * buf pointer to buffer containing send data
2124 * count size of send data in bytes
2125 *
2126 * Return Value: number of characters written
2127 */
2128static int mgsl_write(struct tty_struct * tty,
2129 const unsigned char *buf, int count)
2130{
2131 int c, ret = 0;
2132 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2133 unsigned long flags;
2134
2135 if ( debug_level >= DEBUG_LEVEL_INFO )
2136 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2137 __FILE__,__LINE__,info->device_name,count);
2138
2139 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2140 goto cleanup;
2141
Paul Fulghum86a34142006-03-28 01:56:14 -08002142 if (!tty || !info->xmit_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 goto cleanup;
2144
2145 if ( info->params.mode == MGSL_MODE_HDLC ||
2146 info->params.mode == MGSL_MODE_RAW ) {
2147 /* operating in synchronous (frame oriented) mode */
2148 /* operating in synchronous (frame oriented) mode */
2149 if (info->tx_active) {
2150
2151 if ( info->params.mode == MGSL_MODE_HDLC ) {
2152 ret = 0;
2153 goto cleanup;
2154 }
2155 /* transmitter is actively sending data -
2156 * if we have multiple transmit dma and
2157 * holding buffers, attempt to queue this
2158 * frame for transmission at a later time.
2159 */
2160 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2161 /* no tx holding buffers available */
2162 ret = 0;
2163 goto cleanup;
2164 }
2165
2166 /* queue transmit frame request */
2167 ret = count;
2168 save_tx_buffer_request(info,buf,count);
2169
2170 /* if we have sufficient tx dma buffers,
2171 * load the next buffered tx request
2172 */
2173 spin_lock_irqsave(&info->irq_spinlock,flags);
2174 load_next_tx_holding_buffer(info);
2175 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2176 goto cleanup;
2177 }
2178
2179 /* if operating in HDLC LoopMode and the adapter */
2180 /* has yet to be inserted into the loop, we can't */
2181 /* transmit */
2182
2183 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2184 !usc_loopmode_active(info) )
2185 {
2186 ret = 0;
2187 goto cleanup;
2188 }
2189
2190 if ( info->xmit_cnt ) {
2191 /* Send accumulated from send_char() calls */
2192 /* as frame and wait before accepting more data. */
2193 ret = 0;
2194
2195 /* copy data from circular xmit_buf to */
2196 /* transmit DMA buffer. */
2197 mgsl_load_tx_dma_buffer(info,
2198 info->xmit_buf,info->xmit_cnt);
2199 if ( debug_level >= DEBUG_LEVEL_INFO )
2200 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2201 __FILE__,__LINE__,info->device_name);
2202 } else {
2203 if ( debug_level >= DEBUG_LEVEL_INFO )
2204 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2205 __FILE__,__LINE__,info->device_name);
2206 ret = count;
2207 info->xmit_cnt = count;
2208 mgsl_load_tx_dma_buffer(info,buf,count);
2209 }
2210 } else {
2211 while (1) {
2212 spin_lock_irqsave(&info->irq_spinlock,flags);
2213 c = min_t(int, count,
2214 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2215 SERIAL_XMIT_SIZE - info->xmit_head));
2216 if (c <= 0) {
2217 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2218 break;
2219 }
2220 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2221 info->xmit_head = ((info->xmit_head + c) &
2222 (SERIAL_XMIT_SIZE-1));
2223 info->xmit_cnt += c;
2224 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2225 buf += c;
2226 count -= c;
2227 ret += c;
2228 }
2229 }
2230
2231 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2232 spin_lock_irqsave(&info->irq_spinlock,flags);
2233 if (!info->tx_active)
2234 usc_start_transmitter(info);
2235 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2236 }
2237cleanup:
2238 if ( debug_level >= DEBUG_LEVEL_INFO )
2239 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2240 __FILE__,__LINE__,info->device_name,ret);
2241
2242 return ret;
2243
2244} /* end of mgsl_write() */
2245
2246/* mgsl_write_room()
2247 *
2248 * Return the count of free bytes in transmit buffer
2249 *
2250 * Arguments: tty pointer to tty info structure
2251 * Return Value: None
2252 */
2253static int mgsl_write_room(struct tty_struct *tty)
2254{
2255 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2256 int ret;
2257
2258 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2259 return 0;
2260 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2261 if (ret < 0)
2262 ret = 0;
2263
2264 if (debug_level >= DEBUG_LEVEL_INFO)
2265 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2266 __FILE__,__LINE__, info->device_name,ret );
2267
2268 if ( info->params.mode == MGSL_MODE_HDLC ||
2269 info->params.mode == MGSL_MODE_RAW ) {
2270 /* operating in synchronous (frame oriented) mode */
2271 if ( info->tx_active )
2272 return 0;
2273 else
2274 return HDLC_MAX_FRAME_SIZE;
2275 }
2276
2277 return ret;
2278
2279} /* end of mgsl_write_room() */
2280
2281/* mgsl_chars_in_buffer()
2282 *
2283 * Return the count of bytes in transmit buffer
2284 *
2285 * Arguments: tty pointer to tty info structure
2286 * Return Value: None
2287 */
2288static int mgsl_chars_in_buffer(struct tty_struct *tty)
2289{
2290 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2291
2292 if (debug_level >= DEBUG_LEVEL_INFO)
2293 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2294 __FILE__,__LINE__, info->device_name );
2295
2296 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2297 return 0;
2298
2299 if (debug_level >= DEBUG_LEVEL_INFO)
2300 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2301 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2302
2303 if ( info->params.mode == MGSL_MODE_HDLC ||
2304 info->params.mode == MGSL_MODE_RAW ) {
2305 /* operating in synchronous (frame oriented) mode */
2306 if ( info->tx_active )
2307 return info->max_frame_size;
2308 else
2309 return 0;
2310 }
2311
2312 return info->xmit_cnt;
2313} /* end of mgsl_chars_in_buffer() */
2314
2315/* mgsl_flush_buffer()
2316 *
2317 * Discard all data in the send buffer
2318 *
2319 * Arguments: tty pointer to tty info structure
2320 * Return Value: None
2321 */
2322static void mgsl_flush_buffer(struct tty_struct *tty)
2323{
2324 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2325 unsigned long flags;
2326
2327 if (debug_level >= DEBUG_LEVEL_INFO)
2328 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2329 __FILE__,__LINE__, info->device_name );
2330
2331 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2332 return;
2333
2334 spin_lock_irqsave(&info->irq_spinlock,flags);
2335 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2336 del_timer(&info->tx_timer);
2337 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2338
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 tty_wakeup(tty);
2340}
2341
2342/* mgsl_send_xchar()
2343 *
2344 * Send a high-priority XON/XOFF character
2345 *
2346 * Arguments: tty pointer to tty info structure
2347 * ch character to send
2348 * Return Value: None
2349 */
2350static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2351{
2352 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2353 unsigned long flags;
2354
2355 if (debug_level >= DEBUG_LEVEL_INFO)
2356 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2357 __FILE__,__LINE__, info->device_name, ch );
2358
2359 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2360 return;
2361
2362 info->x_char = ch;
2363 if (ch) {
2364 /* Make sure transmit interrupts are on */
2365 spin_lock_irqsave(&info->irq_spinlock,flags);
2366 if (!info->tx_enabled)
2367 usc_start_transmitter(info);
2368 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2369 }
2370} /* end of mgsl_send_xchar() */
2371
2372/* mgsl_throttle()
2373 *
2374 * Signal remote device to throttle send data (our receive data)
2375 *
2376 * Arguments: tty pointer to tty info structure
2377 * Return Value: None
2378 */
2379static void mgsl_throttle(struct tty_struct * tty)
2380{
2381 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2382 unsigned long flags;
2383
2384 if (debug_level >= DEBUG_LEVEL_INFO)
2385 printk("%s(%d):mgsl_throttle(%s) entry\n",
2386 __FILE__,__LINE__, info->device_name );
2387
2388 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2389 return;
2390
2391 if (I_IXOFF(tty))
2392 mgsl_send_xchar(tty, STOP_CHAR(tty));
2393
2394 if (tty->termios->c_cflag & CRTSCTS) {
2395 spin_lock_irqsave(&info->irq_spinlock,flags);
2396 info->serial_signals &= ~SerialSignal_RTS;
2397 usc_set_serial_signals(info);
2398 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2399 }
2400} /* end of mgsl_throttle() */
2401
2402/* mgsl_unthrottle()
2403 *
2404 * Signal remote device to stop throttling send data (our receive data)
2405 *
2406 * Arguments: tty pointer to tty info structure
2407 * Return Value: None
2408 */
2409static void mgsl_unthrottle(struct tty_struct * tty)
2410{
2411 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2412 unsigned long flags;
2413
2414 if (debug_level >= DEBUG_LEVEL_INFO)
2415 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2416 __FILE__,__LINE__, info->device_name );
2417
2418 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2419 return;
2420
2421 if (I_IXOFF(tty)) {
2422 if (info->x_char)
2423 info->x_char = 0;
2424 else
2425 mgsl_send_xchar(tty, START_CHAR(tty));
2426 }
2427
2428 if (tty->termios->c_cflag & CRTSCTS) {
2429 spin_lock_irqsave(&info->irq_spinlock,flags);
2430 info->serial_signals |= SerialSignal_RTS;
2431 usc_set_serial_signals(info);
2432 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2433 }
2434
2435} /* end of mgsl_unthrottle() */
2436
2437/* mgsl_get_stats()
2438 *
2439 * get the current serial parameters information
2440 *
2441 * Arguments: info pointer to device instance data
2442 * user_icount pointer to buffer to hold returned stats
2443 *
2444 * Return Value: 0 if success, otherwise error code
2445 */
2446static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2447{
2448 int err;
2449
2450 if (debug_level >= DEBUG_LEVEL_INFO)
2451 printk("%s(%d):mgsl_get_params(%s)\n",
2452 __FILE__,__LINE__, info->device_name);
2453
Paul Fulghum96612392005-09-09 13:02:13 -07002454 if (!user_icount) {
2455 memset(&info->icount, 0, sizeof(info->icount));
2456 } else {
2457 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2458 if (err)
2459 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 }
2461
2462 return 0;
2463
2464} /* end of mgsl_get_stats() */
2465
2466/* mgsl_get_params()
2467 *
2468 * get the current serial parameters information
2469 *
2470 * Arguments: info pointer to device instance data
2471 * user_params pointer to buffer to hold returned params
2472 *
2473 * Return Value: 0 if success, otherwise error code
2474 */
2475static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2476{
2477 int err;
2478 if (debug_level >= DEBUG_LEVEL_INFO)
2479 printk("%s(%d):mgsl_get_params(%s)\n",
2480 __FILE__,__LINE__, info->device_name);
2481
2482 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2483 if (err) {
2484 if ( debug_level >= DEBUG_LEVEL_INFO )
2485 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2486 __FILE__,__LINE__,info->device_name);
2487 return -EFAULT;
2488 }
2489
2490 return 0;
2491
2492} /* end of mgsl_get_params() */
2493
2494/* mgsl_set_params()
2495 *
2496 * set the serial parameters
2497 *
2498 * Arguments:
2499 *
2500 * info pointer to device instance data
2501 * new_params user buffer containing new serial params
2502 *
2503 * Return Value: 0 if success, otherwise error code
2504 */
2505static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2506{
2507 unsigned long flags;
2508 MGSL_PARAMS tmp_params;
2509 int err;
2510
2511 if (debug_level >= DEBUG_LEVEL_INFO)
2512 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2513 info->device_name );
2514 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2515 if (err) {
2516 if ( debug_level >= DEBUG_LEVEL_INFO )
2517 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2518 __FILE__,__LINE__,info->device_name);
2519 return -EFAULT;
2520 }
2521
2522 spin_lock_irqsave(&info->irq_spinlock,flags);
2523 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2524 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2525
2526 mgsl_change_params(info);
2527
2528 return 0;
2529
2530} /* end of mgsl_set_params() */
2531
2532/* mgsl_get_txidle()
2533 *
2534 * get the current transmit idle mode
2535 *
2536 * Arguments: info pointer to device instance data
2537 * idle_mode pointer to buffer to hold returned idle mode
2538 *
2539 * Return Value: 0 if success, otherwise error code
2540 */
2541static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2542{
2543 int err;
2544
2545 if (debug_level >= DEBUG_LEVEL_INFO)
2546 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2547 __FILE__,__LINE__, info->device_name, info->idle_mode);
2548
2549 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2550 if (err) {
2551 if ( debug_level >= DEBUG_LEVEL_INFO )
2552 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2553 __FILE__,__LINE__,info->device_name);
2554 return -EFAULT;
2555 }
2556
2557 return 0;
2558
2559} /* end of mgsl_get_txidle() */
2560
2561/* mgsl_set_txidle() service ioctl to set transmit idle mode
2562 *
2563 * Arguments: info pointer to device instance data
2564 * idle_mode new idle mode
2565 *
2566 * Return Value: 0 if success, otherwise error code
2567 */
2568static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2569{
2570 unsigned long flags;
2571
2572 if (debug_level >= DEBUG_LEVEL_INFO)
2573 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2574 info->device_name, idle_mode );
2575
2576 spin_lock_irqsave(&info->irq_spinlock,flags);
2577 info->idle_mode = idle_mode;
2578 usc_set_txidle( info );
2579 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2580 return 0;
2581
2582} /* end of mgsl_set_txidle() */
2583
2584/* mgsl_txenable()
2585 *
2586 * enable or disable the transmitter
2587 *
2588 * Arguments:
2589 *
2590 * info pointer to device instance data
2591 * enable 1 = enable, 0 = disable
2592 *
2593 * Return Value: 0 if success, otherwise error code
2594 */
2595static int mgsl_txenable(struct mgsl_struct * info, int enable)
2596{
2597 unsigned long flags;
2598
2599 if (debug_level >= DEBUG_LEVEL_INFO)
2600 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2601 info->device_name, enable);
2602
2603 spin_lock_irqsave(&info->irq_spinlock,flags);
2604 if ( enable ) {
2605 if ( !info->tx_enabled ) {
2606
2607 usc_start_transmitter(info);
2608 /*--------------------------------------------------
2609 * if HDLC/SDLC Loop mode, attempt to insert the
2610 * station in the 'loop' by setting CMR:13. Upon
2611 * receipt of the next GoAhead (RxAbort) sequence,
2612 * the OnLoop indicator (CCSR:7) should go active
2613 * to indicate that we are on the loop
2614 *--------------------------------------------------*/
2615 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2616 usc_loopmode_insert_request( info );
2617 }
2618 } else {
2619 if ( info->tx_enabled )
2620 usc_stop_transmitter(info);
2621 }
2622 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2623 return 0;
2624
2625} /* end of mgsl_txenable() */
2626
2627/* mgsl_txabort() abort send HDLC frame
2628 *
2629 * Arguments: info pointer to device instance data
2630 * Return Value: 0 if success, otherwise error code
2631 */
2632static int mgsl_txabort(struct mgsl_struct * info)
2633{
2634 unsigned long flags;
2635
2636 if (debug_level >= DEBUG_LEVEL_INFO)
2637 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2638 info->device_name);
2639
2640 spin_lock_irqsave(&info->irq_spinlock,flags);
2641 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2642 {
2643 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2644 usc_loopmode_cancel_transmit( info );
2645 else
2646 usc_TCmd(info,TCmd_SendAbort);
2647 }
2648 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2649 return 0;
2650
2651} /* end of mgsl_txabort() */
2652
2653/* mgsl_rxenable() enable or disable the receiver
2654 *
2655 * Arguments: info pointer to device instance data
2656 * enable 1 = enable, 0 = disable
2657 * Return Value: 0 if success, otherwise error code
2658 */
2659static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2660{
2661 unsigned long flags;
2662
2663 if (debug_level >= DEBUG_LEVEL_INFO)
2664 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2665 info->device_name, enable);
2666
2667 spin_lock_irqsave(&info->irq_spinlock,flags);
2668 if ( enable ) {
2669 if ( !info->rx_enabled )
2670 usc_start_receiver(info);
2671 } else {
2672 if ( info->rx_enabled )
2673 usc_stop_receiver(info);
2674 }
2675 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2676 return 0;
2677
2678} /* end of mgsl_rxenable() */
2679
2680/* mgsl_wait_event() wait for specified event to occur
2681 *
2682 * Arguments: info pointer to device instance data
2683 * mask pointer to bitmask of events to wait for
2684 * Return Value: 0 if successful and bit mask updated with
2685 * of events triggerred,
2686 * otherwise error code
2687 */
2688static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2689{
2690 unsigned long flags;
2691 int s;
2692 int rc=0;
2693 struct mgsl_icount cprev, cnow;
2694 int events;
2695 int mask;
2696 struct _input_signal_events oldsigs, newsigs;
2697 DECLARE_WAITQUEUE(wait, current);
2698
2699 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2700 if (rc) {
2701 return -EFAULT;
2702 }
2703
2704 if (debug_level >= DEBUG_LEVEL_INFO)
2705 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2706 info->device_name, mask);
2707
2708 spin_lock_irqsave(&info->irq_spinlock,flags);
2709
2710 /* return immediately if state matches requested events */
2711 usc_get_serial_signals(info);
2712 s = info->serial_signals;
2713 events = mask &
2714 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2715 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2716 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2717 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2718 if (events) {
2719 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2720 goto exit;
2721 }
2722
2723 /* save current irq counts */
2724 cprev = info->icount;
2725 oldsigs = info->input_signal_events;
2726
2727 /* enable hunt and idle irqs if needed */
2728 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2729 u16 oldreg = usc_InReg(info,RICR);
2730 u16 newreg = oldreg +
2731 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2732 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2733 if (oldreg != newreg)
2734 usc_OutReg(info, RICR, newreg);
2735 }
2736
2737 set_current_state(TASK_INTERRUPTIBLE);
2738 add_wait_queue(&info->event_wait_q, &wait);
2739
2740 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2741
2742
2743 for(;;) {
2744 schedule();
2745 if (signal_pending(current)) {
2746 rc = -ERESTARTSYS;
2747 break;
2748 }
2749
2750 /* get current irq counts */
2751 spin_lock_irqsave(&info->irq_spinlock,flags);
2752 cnow = info->icount;
2753 newsigs = info->input_signal_events;
2754 set_current_state(TASK_INTERRUPTIBLE);
2755 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2756
2757 /* if no change, wait aborted for some reason */
2758 if (newsigs.dsr_up == oldsigs.dsr_up &&
2759 newsigs.dsr_down == oldsigs.dsr_down &&
2760 newsigs.dcd_up == oldsigs.dcd_up &&
2761 newsigs.dcd_down == oldsigs.dcd_down &&
2762 newsigs.cts_up == oldsigs.cts_up &&
2763 newsigs.cts_down == oldsigs.cts_down &&
2764 newsigs.ri_up == oldsigs.ri_up &&
2765 newsigs.ri_down == oldsigs.ri_down &&
2766 cnow.exithunt == cprev.exithunt &&
2767 cnow.rxidle == cprev.rxidle) {
2768 rc = -EIO;
2769 break;
2770 }
2771
2772 events = mask &
2773 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2774 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2775 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2776 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2777 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2778 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2779 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2780 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2781 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2782 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2783 if (events)
2784 break;
2785
2786 cprev = cnow;
2787 oldsigs = newsigs;
2788 }
2789
2790 remove_wait_queue(&info->event_wait_q, &wait);
2791 set_current_state(TASK_RUNNING);
2792
2793 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2794 spin_lock_irqsave(&info->irq_spinlock,flags);
2795 if (!waitqueue_active(&info->event_wait_q)) {
2796 /* disable enable exit hunt mode/idle rcvd IRQs */
2797 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2798 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2799 }
2800 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2801 }
2802exit:
2803 if ( rc == 0 )
2804 PUT_USER(rc, events, mask_ptr);
2805
2806 return rc;
2807
2808} /* end of mgsl_wait_event() */
2809
2810static int modem_input_wait(struct mgsl_struct *info,int arg)
2811{
2812 unsigned long flags;
2813 int rc;
2814 struct mgsl_icount cprev, cnow;
2815 DECLARE_WAITQUEUE(wait, current);
2816
2817 /* save current irq counts */
2818 spin_lock_irqsave(&info->irq_spinlock,flags);
2819 cprev = info->icount;
2820 add_wait_queue(&info->status_event_wait_q, &wait);
2821 set_current_state(TASK_INTERRUPTIBLE);
2822 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2823
2824 for(;;) {
2825 schedule();
2826 if (signal_pending(current)) {
2827 rc = -ERESTARTSYS;
2828 break;
2829 }
2830
2831 /* get new irq counts */
2832 spin_lock_irqsave(&info->irq_spinlock,flags);
2833 cnow = info->icount;
2834 set_current_state(TASK_INTERRUPTIBLE);
2835 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2836
2837 /* if no change, wait aborted for some reason */
2838 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2839 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2840 rc = -EIO;
2841 break;
2842 }
2843
2844 /* check for change in caller specified modem input */
2845 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2846 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2847 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2848 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2849 rc = 0;
2850 break;
2851 }
2852
2853 cprev = cnow;
2854 }
2855 remove_wait_queue(&info->status_event_wait_q, &wait);
2856 set_current_state(TASK_RUNNING);
2857 return rc;
2858}
2859
2860/* return the state of the serial control and status signals
2861 */
2862static int tiocmget(struct tty_struct *tty, struct file *file)
2863{
2864 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2865 unsigned int result;
2866 unsigned long flags;
2867
2868 spin_lock_irqsave(&info->irq_spinlock,flags);
2869 usc_get_serial_signals(info);
2870 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2871
2872 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2873 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2874 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2875 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2876 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2877 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2878
2879 if (debug_level >= DEBUG_LEVEL_INFO)
2880 printk("%s(%d):%s tiocmget() value=%08X\n",
2881 __FILE__,__LINE__, info->device_name, result );
2882 return result;
2883}
2884
2885/* set modem control signals (DTR/RTS)
2886 */
2887static int tiocmset(struct tty_struct *tty, struct file *file,
2888 unsigned int set, unsigned int clear)
2889{
2890 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2891 unsigned long flags;
2892
2893 if (debug_level >= DEBUG_LEVEL_INFO)
2894 printk("%s(%d):%s tiocmset(%x,%x)\n",
2895 __FILE__,__LINE__,info->device_name, set, clear);
2896
2897 if (set & TIOCM_RTS)
2898 info->serial_signals |= SerialSignal_RTS;
2899 if (set & TIOCM_DTR)
2900 info->serial_signals |= SerialSignal_DTR;
2901 if (clear & TIOCM_RTS)
2902 info->serial_signals &= ~SerialSignal_RTS;
2903 if (clear & TIOCM_DTR)
2904 info->serial_signals &= ~SerialSignal_DTR;
2905
2906 spin_lock_irqsave(&info->irq_spinlock,flags);
2907 usc_set_serial_signals(info);
2908 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2909
2910 return 0;
2911}
2912
2913/* mgsl_break() Set or clear transmit break condition
2914 *
2915 * Arguments: tty pointer to tty instance data
2916 * break_state -1=set break condition, 0=clear
2917 * Return Value: None
2918 */
2919static void mgsl_break(struct tty_struct *tty, int break_state)
2920{
2921 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2922 unsigned long flags;
2923
2924 if (debug_level >= DEBUG_LEVEL_INFO)
2925 printk("%s(%d):mgsl_break(%s,%d)\n",
2926 __FILE__,__LINE__, info->device_name, break_state);
2927
2928 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2929 return;
2930
2931 spin_lock_irqsave(&info->irq_spinlock,flags);
2932 if (break_state == -1)
2933 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2934 else
2935 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2936 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2937
2938} /* end of mgsl_break() */
2939
2940/* mgsl_ioctl() Service an IOCTL request
2941 *
2942 * Arguments:
2943 *
2944 * tty pointer to tty instance data
2945 * file pointer to associated file object for device
2946 * cmd IOCTL command code
2947 * arg command argument/context
2948 *
2949 * Return Value: 0 if success, otherwise error code
2950 */
2951static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2952 unsigned int cmd, unsigned long arg)
2953{
2954 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2955
2956 if (debug_level >= DEBUG_LEVEL_INFO)
2957 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2958 info->device_name, cmd );
2959
2960 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2961 return -ENODEV;
2962
2963 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2964 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2965 if (tty->flags & (1 << TTY_IO_ERROR))
2966 return -EIO;
2967 }
2968
2969 return mgsl_ioctl_common(info, cmd, arg);
2970}
2971
2972static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2973{
2974 int error;
2975 struct mgsl_icount cnow; /* kernel counter temps */
2976 void __user *argp = (void __user *)arg;
2977 struct serial_icounter_struct __user *p_cuser; /* user space */
2978 unsigned long flags;
2979
2980 switch (cmd) {
2981 case MGSL_IOCGPARAMS:
2982 return mgsl_get_params(info, argp);
2983 case MGSL_IOCSPARAMS:
2984 return mgsl_set_params(info, argp);
2985 case MGSL_IOCGTXIDLE:
2986 return mgsl_get_txidle(info, argp);
2987 case MGSL_IOCSTXIDLE:
2988 return mgsl_set_txidle(info,(int)arg);
2989 case MGSL_IOCTXENABLE:
2990 return mgsl_txenable(info,(int)arg);
2991 case MGSL_IOCRXENABLE:
2992 return mgsl_rxenable(info,(int)arg);
2993 case MGSL_IOCTXABORT:
2994 return mgsl_txabort(info);
2995 case MGSL_IOCGSTATS:
2996 return mgsl_get_stats(info, argp);
2997 case MGSL_IOCWAITEVENT:
2998 return mgsl_wait_event(info, argp);
2999 case MGSL_IOCLOOPTXDONE:
3000 return mgsl_loopmode_send_done(info);
3001 /* Wait for modem input (DCD,RI,DSR,CTS) change
3002 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3003 */
3004 case TIOCMIWAIT:
3005 return modem_input_wait(info,(int)arg);
3006
3007 /*
3008 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3009 * Return: write counters to the user passed counter struct
3010 * NB: both 1->0 and 0->1 transitions are counted except for
3011 * RI where only 0->1 is counted.
3012 */
3013 case TIOCGICOUNT:
3014 spin_lock_irqsave(&info->irq_spinlock,flags);
3015 cnow = info->icount;
3016 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3017 p_cuser = argp;
3018 PUT_USER(error,cnow.cts, &p_cuser->cts);
3019 if (error) return error;
3020 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3021 if (error) return error;
3022 PUT_USER(error,cnow.rng, &p_cuser->rng);
3023 if (error) return error;
3024 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3025 if (error) return error;
3026 PUT_USER(error,cnow.rx, &p_cuser->rx);
3027 if (error) return error;
3028 PUT_USER(error,cnow.tx, &p_cuser->tx);
3029 if (error) return error;
3030 PUT_USER(error,cnow.frame, &p_cuser->frame);
3031 if (error) return error;
3032 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3033 if (error) return error;
3034 PUT_USER(error,cnow.parity, &p_cuser->parity);
3035 if (error) return error;
3036 PUT_USER(error,cnow.brk, &p_cuser->brk);
3037 if (error) return error;
3038 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3039 if (error) return error;
3040 return 0;
3041 default:
3042 return -ENOIOCTLCMD;
3043 }
3044 return 0;
3045}
3046
3047/* mgsl_set_termios()
3048 *
3049 * Set new termios settings
3050 *
3051 * Arguments:
3052 *
3053 * tty pointer to tty structure
3054 * termios pointer to buffer to hold returned old termios
3055 *
3056 * Return Value: None
3057 */
Alan Cox606d0992006-12-08 02:38:45 -08003058static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059{
3060 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3061 unsigned long flags;
3062
3063 if (debug_level >= DEBUG_LEVEL_INFO)
3064 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3065 tty->driver->name );
3066
3067 /* just return if nothing has changed */
3068 if ((tty->termios->c_cflag == old_termios->c_cflag)
3069 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3070 == RELEVANT_IFLAG(old_termios->c_iflag)))
3071 return;
3072
3073 mgsl_change_params(info);
3074
3075 /* Handle transition to B0 status */
3076 if (old_termios->c_cflag & CBAUD &&
3077 !(tty->termios->c_cflag & CBAUD)) {
3078 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3079 spin_lock_irqsave(&info->irq_spinlock,flags);
3080 usc_set_serial_signals(info);
3081 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3082 }
3083
3084 /* Handle transition away from B0 status */
3085 if (!(old_termios->c_cflag & CBAUD) &&
3086 tty->termios->c_cflag & CBAUD) {
3087 info->serial_signals |= SerialSignal_DTR;
3088 if (!(tty->termios->c_cflag & CRTSCTS) ||
3089 !test_bit(TTY_THROTTLED, &tty->flags)) {
3090 info->serial_signals |= SerialSignal_RTS;
3091 }
3092 spin_lock_irqsave(&info->irq_spinlock,flags);
3093 usc_set_serial_signals(info);
3094 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3095 }
3096
3097 /* Handle turning off CRTSCTS */
3098 if (old_termios->c_cflag & CRTSCTS &&
3099 !(tty->termios->c_cflag & CRTSCTS)) {
3100 tty->hw_stopped = 0;
3101 mgsl_start(tty);
3102 }
3103
3104} /* end of mgsl_set_termios() */
3105
3106/* mgsl_close()
3107 *
3108 * Called when port is closed. Wait for remaining data to be
3109 * sent. Disable port and free resources.
3110 *
3111 * Arguments:
3112 *
3113 * tty pointer to open tty structure
3114 * filp pointer to open file object
3115 *
3116 * Return Value: None
3117 */
3118static void mgsl_close(struct tty_struct *tty, struct file * filp)
3119{
3120 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3121
3122 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3123 return;
3124
3125 if (debug_level >= DEBUG_LEVEL_INFO)
3126 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3127 __FILE__,__LINE__, info->device_name, info->count);
3128
3129 if (!info->count)
3130 return;
3131
3132 if (tty_hung_up_p(filp))
3133 goto cleanup;
3134
3135 if ((tty->count == 1) && (info->count != 1)) {
3136 /*
3137 * tty->count is 1 and the tty structure will be freed.
3138 * info->count should be one in this case.
3139 * if it's not, correct it so that the port is shutdown.
3140 */
3141 printk("mgsl_close: bad refcount; tty->count is 1, "
3142 "info->count is %d\n", info->count);
3143 info->count = 1;
3144 }
3145
3146 info->count--;
3147
3148 /* if at least one open remaining, leave hardware active */
3149 if (info->count)
3150 goto cleanup;
3151
3152 info->flags |= ASYNC_CLOSING;
3153
3154 /* set tty->closing to notify line discipline to
3155 * only process XON/XOFF characters. Only the N_TTY
3156 * discipline appears to use this (ppp does not).
3157 */
3158 tty->closing = 1;
3159
3160 /* wait for transmit data to clear all layers */
3161
3162 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3163 if (debug_level >= DEBUG_LEVEL_INFO)
3164 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3165 __FILE__,__LINE__, info->device_name );
3166 tty_wait_until_sent(tty, info->closing_wait);
3167 }
3168
3169 if (info->flags & ASYNC_INITIALIZED)
3170 mgsl_wait_until_sent(tty, info->timeout);
3171
3172 if (tty->driver->flush_buffer)
3173 tty->driver->flush_buffer(tty);
3174
3175 tty_ldisc_flush(tty);
3176
3177 shutdown(info);
3178
3179 tty->closing = 0;
3180 info->tty = NULL;
3181
3182 if (info->blocked_open) {
3183 if (info->close_delay) {
3184 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3185 }
3186 wake_up_interruptible(&info->open_wait);
3187 }
3188
3189 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3190
3191 wake_up_interruptible(&info->close_wait);
3192
3193cleanup:
3194 if (debug_level >= DEBUG_LEVEL_INFO)
3195 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3196 tty->driver->name, info->count);
3197
3198} /* end of mgsl_close() */
3199
3200/* mgsl_wait_until_sent()
3201 *
3202 * Wait until the transmitter is empty.
3203 *
3204 * Arguments:
3205 *
3206 * tty pointer to tty info structure
3207 * timeout time to wait for send completion
3208 *
3209 * Return Value: None
3210 */
3211static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3212{
3213 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3214 unsigned long orig_jiffies, char_time;
3215
3216 if (!info )
3217 return;
3218
3219 if (debug_level >= DEBUG_LEVEL_INFO)
3220 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3221 __FILE__,__LINE__, info->device_name );
3222
3223 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3224 return;
3225
3226 if (!(info->flags & ASYNC_INITIALIZED))
3227 goto exit;
3228
3229 orig_jiffies = jiffies;
3230
3231 /* Set check interval to 1/5 of estimated time to
3232 * send a character, and make it at least 1. The check
3233 * interval should also be less than the timeout.
3234 * Note: use tight timings here to satisfy the NIST-PCTS.
3235 */
3236
3237 if ( info->params.data_rate ) {
3238 char_time = info->timeout/(32 * 5);
3239 if (!char_time)
3240 char_time++;
3241 } else
3242 char_time = 1;
3243
3244 if (timeout)
3245 char_time = min_t(unsigned long, char_time, timeout);
3246
3247 if ( info->params.mode == MGSL_MODE_HDLC ||
3248 info->params.mode == MGSL_MODE_RAW ) {
3249 while (info->tx_active) {
3250 msleep_interruptible(jiffies_to_msecs(char_time));
3251 if (signal_pending(current))
3252 break;
3253 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3254 break;
3255 }
3256 } else {
3257 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3258 info->tx_enabled) {
3259 msleep_interruptible(jiffies_to_msecs(char_time));
3260 if (signal_pending(current))
3261 break;
3262 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3263 break;
3264 }
3265 }
3266
3267exit:
3268 if (debug_level >= DEBUG_LEVEL_INFO)
3269 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3270 __FILE__,__LINE__, info->device_name );
3271
3272} /* end of mgsl_wait_until_sent() */
3273
3274/* mgsl_hangup()
3275 *
3276 * Called by tty_hangup() when a hangup is signaled.
3277 * This is the same as to closing all open files for the port.
3278 *
3279 * Arguments: tty pointer to associated tty object
3280 * Return Value: None
3281 */
3282static void mgsl_hangup(struct tty_struct *tty)
3283{
3284 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3285
3286 if (debug_level >= DEBUG_LEVEL_INFO)
3287 printk("%s(%d):mgsl_hangup(%s)\n",
3288 __FILE__,__LINE__, info->device_name );
3289
3290 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3291 return;
3292
3293 mgsl_flush_buffer(tty);
3294 shutdown(info);
3295
3296 info->count = 0;
3297 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3298 info->tty = NULL;
3299
3300 wake_up_interruptible(&info->open_wait);
3301
3302} /* end of mgsl_hangup() */
3303
3304/* block_til_ready()
3305 *
3306 * Block the current process until the specified port
3307 * is ready to be opened.
3308 *
3309 * Arguments:
3310 *
3311 * tty pointer to tty info structure
3312 * filp pointer to open file object
3313 * info pointer to device instance data
3314 *
3315 * Return Value: 0 if success, otherwise error code
3316 */
3317static int block_til_ready(struct tty_struct *tty, struct file * filp,
3318 struct mgsl_struct *info)
3319{
3320 DECLARE_WAITQUEUE(wait, current);
3321 int retval;
3322 int do_clocal = 0, extra_count = 0;
3323 unsigned long flags;
3324
3325 if (debug_level >= DEBUG_LEVEL_INFO)
3326 printk("%s(%d):block_til_ready on %s\n",
3327 __FILE__,__LINE__, tty->driver->name );
3328
3329 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3330 /* nonblock mode is set or port is not enabled */
3331 info->flags |= ASYNC_NORMAL_ACTIVE;
3332 return 0;
3333 }
3334
3335 if (tty->termios->c_cflag & CLOCAL)
3336 do_clocal = 1;
3337
3338 /* Wait for carrier detect and the line to become
3339 * free (i.e., not in use by the callout). While we are in
3340 * this loop, info->count is dropped by one, so that
3341 * mgsl_close() knows when to free things. We restore it upon
3342 * exit, either normal or abnormal.
3343 */
3344
3345 retval = 0;
3346 add_wait_queue(&info->open_wait, &wait);
3347
3348 if (debug_level >= DEBUG_LEVEL_INFO)
3349 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3350 __FILE__,__LINE__, tty->driver->name, info->count );
3351
3352 spin_lock_irqsave(&info->irq_spinlock, flags);
3353 if (!tty_hung_up_p(filp)) {
3354 extra_count = 1;
3355 info->count--;
3356 }
3357 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3358 info->blocked_open++;
3359
3360 while (1) {
3361 if (tty->termios->c_cflag & CBAUD) {
3362 spin_lock_irqsave(&info->irq_spinlock,flags);
3363 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3364 usc_set_serial_signals(info);
3365 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3366 }
3367
3368 set_current_state(TASK_INTERRUPTIBLE);
3369
3370 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3371 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3372 -EAGAIN : -ERESTARTSYS;
3373 break;
3374 }
3375
3376 spin_lock_irqsave(&info->irq_spinlock,flags);
3377 usc_get_serial_signals(info);
3378 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3379
3380 if (!(info->flags & ASYNC_CLOSING) &&
3381 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3382 break;
3383 }
3384
3385 if (signal_pending(current)) {
3386 retval = -ERESTARTSYS;
3387 break;
3388 }
3389
3390 if (debug_level >= DEBUG_LEVEL_INFO)
3391 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3392 __FILE__,__LINE__, tty->driver->name, info->count );
3393
3394 schedule();
3395 }
3396
3397 set_current_state(TASK_RUNNING);
3398 remove_wait_queue(&info->open_wait, &wait);
3399
3400 if (extra_count)
3401 info->count++;
3402 info->blocked_open--;
3403
3404 if (debug_level >= DEBUG_LEVEL_INFO)
3405 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3406 __FILE__,__LINE__, tty->driver->name, info->count );
3407
3408 if (!retval)
3409 info->flags |= ASYNC_NORMAL_ACTIVE;
3410
3411 return retval;
3412
3413} /* end of block_til_ready() */
3414
3415/* mgsl_open()
3416 *
3417 * Called when a port is opened. Init and enable port.
3418 * Perform serial-specific initialization for the tty structure.
3419 *
3420 * Arguments: tty pointer to tty info structure
3421 * filp associated file pointer
3422 *
3423 * Return Value: 0 if success, otherwise error code
3424 */
3425static int mgsl_open(struct tty_struct *tty, struct file * filp)
3426{
3427 struct mgsl_struct *info;
3428 int retval, line;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 unsigned long flags;
3430
3431 /* verify range of specified line number */
3432 line = tty->index;
3433 if ((line < 0) || (line >= mgsl_device_count)) {
3434 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3435 __FILE__,__LINE__,line);
3436 return -ENODEV;
3437 }
3438
3439 /* find the info structure for the specified line */
3440 info = mgsl_device_list;
3441 while(info && info->line != line)
3442 info = info->next_device;
3443 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3444 return -ENODEV;
3445
3446 tty->driver_data = info;
3447 info->tty = tty;
3448
3449 if (debug_level >= DEBUG_LEVEL_INFO)
3450 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3451 __FILE__,__LINE__,tty->driver->name, info->count);
3452
3453 /* If port is closing, signal caller to try again */
3454 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3455 if (info->flags & ASYNC_CLOSING)
3456 interruptible_sleep_on(&info->close_wait);
3457 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3458 -EAGAIN : -ERESTARTSYS);
3459 goto cleanup;
3460 }
3461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3463
3464 spin_lock_irqsave(&info->netlock, flags);
3465 if (info->netcount) {
3466 retval = -EBUSY;
3467 spin_unlock_irqrestore(&info->netlock, flags);
3468 goto cleanup;
3469 }
3470 info->count++;
3471 spin_unlock_irqrestore(&info->netlock, flags);
3472
3473 if (info->count == 1) {
3474 /* 1st open on this device, init hardware */
3475 retval = startup(info);
3476 if (retval < 0)
3477 goto cleanup;
3478 }
3479
3480 retval = block_til_ready(tty, filp, info);
3481 if (retval) {
3482 if (debug_level >= DEBUG_LEVEL_INFO)
3483 printk("%s(%d):block_til_ready(%s) returned %d\n",
3484 __FILE__,__LINE__, info->device_name, retval);
3485 goto cleanup;
3486 }
3487
3488 if (debug_level >= DEBUG_LEVEL_INFO)
3489 printk("%s(%d):mgsl_open(%s) success\n",
3490 __FILE__,__LINE__, info->device_name);
3491 retval = 0;
3492
3493cleanup:
3494 if (retval) {
3495 if (tty->count == 1)
3496 info->tty = NULL; /* tty layer will release tty struct */
3497 if(info->count)
3498 info->count--;
3499 }
3500
3501 return retval;
3502
3503} /* end of mgsl_open() */
3504
3505/*
3506 * /proc fs routines....
3507 */
3508
3509static inline int line_info(char *buf, struct mgsl_struct *info)
3510{
3511 char stat_buf[30];
3512 int ret;
3513 unsigned long flags;
3514
3515 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3516 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3517 info->device_name, info->io_base, info->irq_level,
3518 info->phys_memory_base, info->phys_lcr_base);
3519 } else {
3520 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3521 info->device_name, info->io_base,
3522 info->irq_level, info->dma_level);
3523 }
3524
3525 /* output current serial signal states */
3526 spin_lock_irqsave(&info->irq_spinlock,flags);
3527 usc_get_serial_signals(info);
3528 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3529
3530 stat_buf[0] = 0;
3531 stat_buf[1] = 0;
3532 if (info->serial_signals & SerialSignal_RTS)
3533 strcat(stat_buf, "|RTS");
3534 if (info->serial_signals & SerialSignal_CTS)
3535 strcat(stat_buf, "|CTS");
3536 if (info->serial_signals & SerialSignal_DTR)
3537 strcat(stat_buf, "|DTR");
3538 if (info->serial_signals & SerialSignal_DSR)
3539 strcat(stat_buf, "|DSR");
3540 if (info->serial_signals & SerialSignal_DCD)
3541 strcat(stat_buf, "|CD");
3542 if (info->serial_signals & SerialSignal_RI)
3543 strcat(stat_buf, "|RI");
3544
3545 if (info->params.mode == MGSL_MODE_HDLC ||
3546 info->params.mode == MGSL_MODE_RAW ) {
3547 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3548 info->icount.txok, info->icount.rxok);
3549 if (info->icount.txunder)
3550 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3551 if (info->icount.txabort)
3552 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3553 if (info->icount.rxshort)
3554 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3555 if (info->icount.rxlong)
3556 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3557 if (info->icount.rxover)
3558 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3559 if (info->icount.rxcrc)
3560 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3561 } else {
3562 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3563 info->icount.tx, info->icount.rx);
3564 if (info->icount.frame)
3565 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3566 if (info->icount.parity)
3567 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3568 if (info->icount.brk)
3569 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3570 if (info->icount.overrun)
3571 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3572 }
3573
3574 /* Append serial signal status to end */
3575 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3576
3577 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3578 info->tx_active,info->bh_requested,info->bh_running,
3579 info->pending_bh);
3580
3581 spin_lock_irqsave(&info->irq_spinlock,flags);
3582 {
3583 u16 Tcsr = usc_InReg( info, TCSR );
3584 u16 Tdmr = usc_InDmaReg( info, TDMR );
3585 u16 Ticr = usc_InReg( info, TICR );
3586 u16 Rscr = usc_InReg( info, RCSR );
3587 u16 Rdmr = usc_InDmaReg( info, RDMR );
3588 u16 Ricr = usc_InReg( info, RICR );
3589 u16 Icr = usc_InReg( info, ICR );
3590 u16 Dccr = usc_InReg( info, DCCR );
3591 u16 Tmr = usc_InReg( info, TMR );
3592 u16 Tccr = usc_InReg( info, TCCR );
3593 u16 Ccar = inw( info->io_base + CCAR );
3594 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3595 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3596 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3597 }
3598 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3599
3600 return ret;
3601
3602} /* end of line_info() */
3603
3604/* mgsl_read_proc()
3605 *
3606 * Called to print information about devices
3607 *
3608 * Arguments:
3609 * page page of memory to hold returned info
3610 * start
3611 * off
3612 * count
3613 * eof
3614 * data
3615 *
3616 * Return Value:
3617 */
3618static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3619 int *eof, void *data)
3620{
3621 int len = 0, l;
3622 off_t begin = 0;
3623 struct mgsl_struct *info;
3624
3625 len += sprintf(page, "synclink driver:%s\n", driver_version);
3626
3627 info = mgsl_device_list;
3628 while( info ) {
3629 l = line_info(page + len, info);
3630 len += l;
3631 if (len+begin > off+count)
3632 goto done;
3633 if (len+begin < off) {
3634 begin += len;
3635 len = 0;
3636 }
3637 info = info->next_device;
3638 }
3639
3640 *eof = 1;
3641done:
3642 if (off >= len+begin)
3643 return 0;
3644 *start = page + (off-begin);
3645 return ((count < begin+len-off) ? count : begin+len-off);
3646
3647} /* end of mgsl_read_proc() */
3648
3649/* mgsl_allocate_dma_buffers()
3650 *
3651 * Allocate and format DMA buffers (ISA adapter)
3652 * or format shared memory buffers (PCI adapter).
3653 *
3654 * Arguments: info pointer to device instance data
3655 * Return Value: 0 if success, otherwise error
3656 */
3657static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3658{
3659 unsigned short BuffersPerFrame;
3660
3661 info->last_mem_alloc = 0;
3662
3663 /* Calculate the number of DMA buffers necessary to hold the */
3664 /* largest allowable frame size. Note: If the max frame size is */
3665 /* not an even multiple of the DMA buffer size then we need to */
3666 /* round the buffer count per frame up one. */
3667
3668 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3669 if ( info->max_frame_size % DMABUFFERSIZE )
3670 BuffersPerFrame++;
3671
3672 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3673 /*
3674 * The PCI adapter has 256KBytes of shared memory to use.
3675 * This is 64 PAGE_SIZE buffers.
3676 *
3677 * The first page is used for padding at this time so the
3678 * buffer list does not begin at offset 0 of the PCI
3679 * adapter's shared memory.
3680 *
3681 * The 2nd page is used for the buffer list. A 4K buffer
3682 * list can hold 128 DMA_BUFFER structures at 32 bytes
3683 * each.
3684 *
3685 * This leaves 62 4K pages.
3686 *
3687 * The next N pages are used for transmit frame(s). We
3688 * reserve enough 4K page blocks to hold the required
3689 * number of transmit dma buffers (num_tx_dma_buffers),
3690 * each of MaxFrameSize size.
3691 *
3692 * Of the remaining pages (62-N), determine how many can
3693 * be used to receive full MaxFrameSize inbound frames
3694 */
3695 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3696 info->rx_buffer_count = 62 - info->tx_buffer_count;
3697 } else {
3698 /* Calculate the number of PAGE_SIZE buffers needed for */
3699 /* receive and transmit DMA buffers. */
3700
3701
3702 /* Calculate the number of DMA buffers necessary to */
3703 /* hold 7 max size receive frames and one max size transmit frame. */
3704 /* The receive buffer count is bumped by one so we avoid an */
3705 /* End of List condition if all receive buffers are used when */
3706 /* using linked list DMA buffers. */
3707
3708 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3709 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3710
3711 /*
3712 * limit total TxBuffers & RxBuffers to 62 4K total
3713 * (ala PCI Allocation)
3714 */
3715
3716 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3717 info->rx_buffer_count = 62 - info->tx_buffer_count;
3718
3719 }
3720
3721 if ( debug_level >= DEBUG_LEVEL_INFO )
3722 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3723 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3724
3725 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3726 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3727 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3728 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3729 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3730 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3731 return -ENOMEM;
3732 }
3733
3734 mgsl_reset_rx_dma_buffers( info );
3735 mgsl_reset_tx_dma_buffers( info );
3736
3737 return 0;
3738
3739} /* end of mgsl_allocate_dma_buffers() */
3740
3741/*
3742 * mgsl_alloc_buffer_list_memory()
3743 *
3744 * Allocate a common DMA buffer for use as the
3745 * receive and transmit buffer lists.
3746 *
3747 * A buffer list is a set of buffer entries where each entry contains
3748 * a pointer to an actual buffer and a pointer to the next buffer entry
3749 * (plus some other info about the buffer).
3750 *
3751 * The buffer entries for a list are built to form a circular list so
3752 * that when the entire list has been traversed you start back at the
3753 * beginning.
3754 *
3755 * This function allocates memory for just the buffer entries.
3756 * The links (pointer to next entry) are filled in with the physical
3757 * address of the next entry so the adapter can navigate the list
3758 * using bus master DMA. The pointers to the actual buffers are filled
3759 * out later when the actual buffers are allocated.
3760 *
3761 * Arguments: info pointer to device instance data
3762 * Return Value: 0 if success, otherwise error
3763 */
3764static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3765{
3766 unsigned int i;
3767
3768 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3769 /* PCI adapter uses shared memory. */
3770 info->buffer_list = info->memory_base + info->last_mem_alloc;
3771 info->buffer_list_phys = info->last_mem_alloc;
3772 info->last_mem_alloc += BUFFERLISTSIZE;
3773 } else {
3774 /* ISA adapter uses system memory. */
3775 /* The buffer lists are allocated as a common buffer that both */
3776 /* the processor and adapter can access. This allows the driver to */
3777 /* inspect portions of the buffer while other portions are being */
3778 /* updated by the adapter using Bus Master DMA. */
3779
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003780 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3781 if (info->buffer_list == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 return -ENOMEM;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003783 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 }
3785
3786 /* We got the memory for the buffer entry lists. */
3787 /* Initialize the memory block to all zeros. */
3788 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3789
3790 /* Save virtual address pointers to the receive and */
3791 /* transmit buffer lists. (Receive 1st). These pointers will */
3792 /* be used by the processor to access the lists. */
3793 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3794 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3795 info->tx_buffer_list += info->rx_buffer_count;
3796
3797 /*
3798 * Build the links for the buffer entry lists such that
3799 * two circular lists are built. (Transmit and Receive).
3800 *
3801 * Note: the links are physical addresses
3802 * which are read by the adapter to determine the next
3803 * buffer entry to use.
3804 */
3805
3806 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3807 /* calculate and store physical address of this buffer entry */
3808 info->rx_buffer_list[i].phys_entry =
3809 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3810
3811 /* calculate and store physical address of */
3812 /* next entry in cirular list of entries */
3813
3814 info->rx_buffer_list[i].link = info->buffer_list_phys;
3815
3816 if ( i < info->rx_buffer_count - 1 )
3817 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3818 }
3819
3820 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3821 /* calculate and store physical address of this buffer entry */
3822 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3823 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3824
3825 /* calculate and store physical address of */
3826 /* next entry in cirular list of entries */
3827
3828 info->tx_buffer_list[i].link = info->buffer_list_phys +
3829 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3830
3831 if ( i < info->tx_buffer_count - 1 )
3832 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3833 }
3834
3835 return 0;
3836
3837} /* end of mgsl_alloc_buffer_list_memory() */
3838
3839/* Free DMA buffers allocated for use as the
3840 * receive and transmit buffer lists.
3841 * Warning:
3842 *
3843 * The data transfer buffers associated with the buffer list
3844 * MUST be freed before freeing the buffer list itself because
3845 * the buffer list contains the information necessary to free
3846 * the individual buffers!
3847 */
3848static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3849{
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003850 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3851 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852
3853 info->buffer_list = NULL;
3854 info->rx_buffer_list = NULL;
3855 info->tx_buffer_list = NULL;
3856
3857} /* end of mgsl_free_buffer_list_memory() */
3858
3859/*
3860 * mgsl_alloc_frame_memory()
3861 *
3862 * Allocate the frame DMA buffers used by the specified buffer list.
3863 * Each DMA buffer will be one memory page in size. This is necessary
3864 * because memory can fragment enough that it may be impossible
3865 * contiguous pages.
3866 *
3867 * Arguments:
3868 *
3869 * info pointer to device instance data
3870 * BufferList pointer to list of buffer entries
3871 * Buffercount count of buffer entries in buffer list
3872 *
3873 * Return Value: 0 if success, otherwise -ENOMEM
3874 */
3875static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3876{
3877 int i;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003878 u32 phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
3880 /* Allocate page sized buffers for the receive buffer list */
3881
3882 for ( i = 0; i < Buffercount; i++ ) {
3883 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3884 /* PCI adapter uses shared memory buffers. */
3885 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3886 phys_addr = info->last_mem_alloc;
3887 info->last_mem_alloc += DMABUFFERSIZE;
3888 } else {
3889 /* ISA adapter uses system memory. */
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003890 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3891 if (BufferList[i].virt_addr == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 return -ENOMEM;
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003893 phys_addr = (u32)(BufferList[i].dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894 }
3895 BufferList[i].phys_addr = phys_addr;
3896 }
3897
3898 return 0;
3899
3900} /* end of mgsl_alloc_frame_memory() */
3901
3902/*
3903 * mgsl_free_frame_memory()
3904 *
3905 * Free the buffers associated with
3906 * each buffer entry of a buffer list.
3907 *
3908 * Arguments:
3909 *
3910 * info pointer to device instance data
3911 * BufferList pointer to list of buffer entries
3912 * Buffercount count of buffer entries in buffer list
3913 *
3914 * Return Value: None
3915 */
3916static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3917{
3918 int i;
3919
3920 if ( BufferList ) {
3921 for ( i = 0 ; i < Buffercount ; i++ ) {
3922 if ( BufferList[i].virt_addr ) {
3923 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
Paul Fulghum0ff1b2c2005-11-13 16:07:19 -08003924 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 BufferList[i].virt_addr = NULL;
3926 }
3927 }
3928 }
3929
3930} /* end of mgsl_free_frame_memory() */
3931
3932/* mgsl_free_dma_buffers()
3933 *
3934 * Free DMA buffers
3935 *
3936 * Arguments: info pointer to device instance data
3937 * Return Value: None
3938 */
3939static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3940{
3941 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3942 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3943 mgsl_free_buffer_list_memory( info );
3944
3945} /* end of mgsl_free_dma_buffers() */
3946
3947
3948/*
3949 * mgsl_alloc_intermediate_rxbuffer_memory()
3950 *
3951 * Allocate a buffer large enough to hold max_frame_size. This buffer
3952 * is used to pass an assembled frame to the line discipline.
3953 *
3954 * Arguments:
3955 *
3956 * info pointer to device instance data
3957 *
3958 * Return Value: 0 if success, otherwise -ENOMEM
3959 */
3960static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3961{
3962 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3963 if ( info->intermediate_rxbuffer == NULL )
3964 return -ENOMEM;
3965
3966 return 0;
3967
3968} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3969
3970/*
3971 * mgsl_free_intermediate_rxbuffer_memory()
3972 *
3973 *
3974 * Arguments:
3975 *
3976 * info pointer to device instance data
3977 *
3978 * Return Value: None
3979 */
3980static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3981{
Jesper Juhl735d5662005-11-07 01:01:29 -08003982 kfree(info->intermediate_rxbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983 info->intermediate_rxbuffer = NULL;
3984
3985} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3986
3987/*
3988 * mgsl_alloc_intermediate_txbuffer_memory()
3989 *
3990 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3991 * This buffer is used to load transmit frames into the adapter's dma transfer
3992 * buffers when there is sufficient space.
3993 *
3994 * Arguments:
3995 *
3996 * info pointer to device instance data
3997 *
3998 * Return Value: 0 if success, otherwise -ENOMEM
3999 */
4000static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4001{
4002 int i;
4003
4004 if ( debug_level >= DEBUG_LEVEL_INFO )
4005 printk("%s %s(%d) allocating %d tx holding buffers\n",
4006 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4007
4008 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4009
4010 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4011 info->tx_holding_buffers[i].buffer =
4012 kmalloc(info->max_frame_size, GFP_KERNEL);
4013 if ( info->tx_holding_buffers[i].buffer == NULL )
4014 return -ENOMEM;
4015 }
4016
4017 return 0;
4018
4019} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4020
4021/*
4022 * mgsl_free_intermediate_txbuffer_memory()
4023 *
4024 *
4025 * Arguments:
4026 *
4027 * info pointer to device instance data
4028 *
4029 * Return Value: None
4030 */
4031static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4032{
4033 int i;
4034
4035 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
Jesper Juhl735d5662005-11-07 01:01:29 -08004036 kfree(info->tx_holding_buffers[i].buffer);
4037 info->tx_holding_buffers[i].buffer = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 }
4039
4040 info->get_tx_holding_index = 0;
4041 info->put_tx_holding_index = 0;
4042 info->tx_holding_count = 0;
4043
4044} /* end of mgsl_free_intermediate_txbuffer_memory() */
4045
4046
4047/*
4048 * load_next_tx_holding_buffer()
4049 *
4050 * attempts to load the next buffered tx request into the
4051 * tx dma buffers
4052 *
4053 * Arguments:
4054 *
4055 * info pointer to device instance data
4056 *
4057 * Return Value: 1 if next buffered tx request loaded
4058 * into adapter's tx dma buffer,
4059 * 0 otherwise
4060 */
4061static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4062{
4063 int ret = 0;
4064
4065 if ( info->tx_holding_count ) {
4066 /* determine if we have enough tx dma buffers
4067 * to accommodate the next tx frame
4068 */
4069 struct tx_holding_buffer *ptx =
4070 &info->tx_holding_buffers[info->get_tx_holding_index];
4071 int num_free = num_free_tx_dma_buffers(info);
4072 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4073 if ( ptx->buffer_size % DMABUFFERSIZE )
4074 ++num_needed;
4075
4076 if (num_needed <= num_free) {
4077 info->xmit_cnt = ptx->buffer_size;
4078 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4079
4080 --info->tx_holding_count;
4081 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4082 info->get_tx_holding_index=0;
4083
4084 /* restart transmit timer */
4085 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4086
4087 ret = 1;
4088 }
4089 }
4090
4091 return ret;
4092}
4093
4094/*
4095 * save_tx_buffer_request()
4096 *
4097 * attempt to store transmit frame request for later transmission
4098 *
4099 * Arguments:
4100 *
4101 * info pointer to device instance data
4102 * Buffer pointer to buffer containing frame to load
4103 * BufferSize size in bytes of frame in Buffer
4104 *
4105 * Return Value: 1 if able to store, 0 otherwise
4106 */
4107static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4108{
4109 struct tx_holding_buffer *ptx;
4110
4111 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4112 return 0; /* all buffers in use */
4113 }
4114
4115 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4116 ptx->buffer_size = BufferSize;
4117 memcpy( ptx->buffer, Buffer, BufferSize);
4118
4119 ++info->tx_holding_count;
4120 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4121 info->put_tx_holding_index=0;
4122
4123 return 1;
4124}
4125
4126static int mgsl_claim_resources(struct mgsl_struct *info)
4127{
4128 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4129 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4130 __FILE__,__LINE__,info->device_name, info->io_base);
4131 return -ENODEV;
4132 }
4133 info->io_addr_requested = 1;
4134
4135 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4136 info->device_name, info ) < 0 ) {
4137 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4138 __FILE__,__LINE__,info->device_name, info->irq_level );
4139 goto errout;
4140 }
4141 info->irq_requested = 1;
4142
4143 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4144 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4145 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4146 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4147 goto errout;
4148 }
4149 info->shared_mem_requested = 1;
4150 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4151 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4152 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4153 goto errout;
4154 }
4155 info->lcr_mem_requested = 1;
4156
4157 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4158 if (!info->memory_base) {
4159 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4160 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4161 goto errout;
4162 }
4163
4164 if ( !mgsl_memory_test(info) ) {
4165 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4166 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4167 goto errout;
4168 }
4169
4170 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4171 if (!info->lcr_base) {
4172 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4173 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4174 goto errout;
4175 }
4176
4177 } else {
4178 /* claim DMA channel */
4179
4180 if (request_dma(info->dma_level,info->device_name) < 0){
4181 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4182 __FILE__,__LINE__,info->device_name, info->dma_level );
4183 mgsl_release_resources( info );
4184 return -ENODEV;
4185 }
4186 info->dma_requested = 1;
4187
4188 /* ISA adapter uses bus master DMA */
4189 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4190 enable_dma(info->dma_level);
4191 }
4192
4193 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4194 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4195 __FILE__,__LINE__,info->device_name, info->dma_level );
4196 goto errout;
4197 }
4198
4199 return 0;
4200errout:
4201 mgsl_release_resources(info);
4202 return -ENODEV;
4203
4204} /* end of mgsl_claim_resources() */
4205
4206static void mgsl_release_resources(struct mgsl_struct *info)
4207{
4208 if ( debug_level >= DEBUG_LEVEL_INFO )
4209 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4210 __FILE__,__LINE__,info->device_name );
4211
4212 if ( info->irq_requested ) {
4213 free_irq(info->irq_level, info);
4214 info->irq_requested = 0;
4215 }
4216 if ( info->dma_requested ) {
4217 disable_dma(info->dma_level);
4218 free_dma(info->dma_level);
4219 info->dma_requested = 0;
4220 }
4221 mgsl_free_dma_buffers(info);
4222 mgsl_free_intermediate_rxbuffer_memory(info);
4223 mgsl_free_intermediate_txbuffer_memory(info);
4224
4225 if ( info->io_addr_requested ) {
4226 release_region(info->io_base,info->io_addr_size);
4227 info->io_addr_requested = 0;
4228 }
4229 if ( info->shared_mem_requested ) {
4230 release_mem_region(info->phys_memory_base,0x40000);
4231 info->shared_mem_requested = 0;
4232 }
4233 if ( info->lcr_mem_requested ) {
4234 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4235 info->lcr_mem_requested = 0;
4236 }
4237 if (info->memory_base){
4238 iounmap(info->memory_base);
4239 info->memory_base = NULL;
4240 }
4241 if (info->lcr_base){
4242 iounmap(info->lcr_base - info->lcr_offset);
4243 info->lcr_base = NULL;
4244 }
4245
4246 if ( debug_level >= DEBUG_LEVEL_INFO )
4247 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4248 __FILE__,__LINE__,info->device_name );
4249
4250} /* end of mgsl_release_resources() */
4251
4252/* mgsl_add_device()
4253 *
4254 * Add the specified device instance data structure to the
4255 * global linked list of devices and increment the device count.
4256 *
4257 * Arguments: info pointer to device instance data
4258 * Return Value: None
4259 */
4260static void mgsl_add_device( struct mgsl_struct *info )
4261{
4262 info->next_device = NULL;
4263 info->line = mgsl_device_count;
4264 sprintf(info->device_name,"ttySL%d",info->line);
4265
4266 if (info->line < MAX_TOTAL_DEVICES) {
4267 if (maxframe[info->line])
4268 info->max_frame_size = maxframe[info->line];
4269 info->dosyncppp = dosyncppp[info->line];
4270
4271 if (txdmabufs[info->line]) {
4272 info->num_tx_dma_buffers = txdmabufs[info->line];
4273 if (info->num_tx_dma_buffers < 1)
4274 info->num_tx_dma_buffers = 1;
4275 }
4276
4277 if (txholdbufs[info->line]) {
4278 info->num_tx_holding_buffers = txholdbufs[info->line];
4279 if (info->num_tx_holding_buffers < 1)
4280 info->num_tx_holding_buffers = 1;
4281 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4282 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4283 }
4284 }
4285
4286 mgsl_device_count++;
4287
4288 if ( !mgsl_device_list )
4289 mgsl_device_list = info;
4290 else {
4291 struct mgsl_struct *current_dev = mgsl_device_list;
4292 while( current_dev->next_device )
4293 current_dev = current_dev->next_device;
4294 current_dev->next_device = info;
4295 }
4296
4297 if ( info->max_frame_size < 4096 )
4298 info->max_frame_size = 4096;
4299 else if ( info->max_frame_size > 65535 )
4300 info->max_frame_size = 65535;
4301
4302 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4303 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4304 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4305 info->phys_memory_base, info->phys_lcr_base,
4306 info->max_frame_size );
4307 } else {
4308 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4309 info->device_name, info->io_base, info->irq_level, info->dma_level,
4310 info->max_frame_size );
4311 }
4312
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08004313#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 hdlcdev_init(info);
4315#endif
4316
4317} /* end of mgsl_add_device() */
4318
4319/* mgsl_allocate_device()
4320 *
4321 * Allocate and initialize a device instance structure
4322 *
4323 * Arguments: none
4324 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4325 */
4326static struct mgsl_struct* mgsl_allocate_device(void)
4327{
4328 struct mgsl_struct *info;
4329
Robert P. J. Day5cbded52006-12-13 00:35:56 -08004330 info = kmalloc(sizeof(struct mgsl_struct),
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 GFP_KERNEL);
4332
4333 if (!info) {
4334 printk("Error can't allocate device instance data\n");
4335 } else {
4336 memset(info, 0, sizeof(struct mgsl_struct));
4337 info->magic = MGSL_MAGIC;
David Howellsc4028952006-11-22 14:57:56 +00004338 INIT_WORK(&info->task, mgsl_bh_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 info->max_frame_size = 4096;
4340 info->close_delay = 5*HZ/10;
4341 info->closing_wait = 30*HZ;
4342 init_waitqueue_head(&info->open_wait);
4343 init_waitqueue_head(&info->close_wait);
4344 init_waitqueue_head(&info->status_event_wait_q);
4345 init_waitqueue_head(&info->event_wait_q);
4346 spin_lock_init(&info->irq_spinlock);
4347 spin_lock_init(&info->netlock);
4348 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4349 info->idle_mode = HDLC_TXIDLE_FLAGS;
4350 info->num_tx_dma_buffers = 1;
4351 info->num_tx_holding_buffers = 0;
4352 }
4353
4354 return info;
4355
4356} /* end of mgsl_allocate_device()*/
4357
Jeff Dikeb68e31d2006-10-02 02:17:18 -07004358static const struct tty_operations mgsl_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359 .open = mgsl_open,
4360 .close = mgsl_close,
4361 .write = mgsl_write,
4362 .put_char = mgsl_put_char,
4363 .flush_chars = mgsl_flush_chars,
4364 .write_room = mgsl_write_room,
4365 .chars_in_buffer = mgsl_chars_in_buffer,
4366 .flush_buffer = mgsl_flush_buffer,
4367 .ioctl = mgsl_ioctl,
4368 .throttle = mgsl_throttle,
4369 .unthrottle = mgsl_unthrottle,
4370 .send_xchar = mgsl_send_xchar,
4371 .break_ctl = mgsl_break,
4372 .wait_until_sent = mgsl_wait_until_sent,
4373 .read_proc = mgsl_read_proc,
4374 .set_termios = mgsl_set_termios,
4375 .stop = mgsl_stop,
4376 .start = mgsl_start,
4377 .hangup = mgsl_hangup,
4378 .tiocmget = tiocmget,
4379 .tiocmset = tiocmset,
4380};
4381
4382/*
4383 * perform tty device initialization
4384 */
4385static int mgsl_init_tty(void)
4386{
4387 int rc;
4388
4389 serial_driver = alloc_tty_driver(128);
4390 if (!serial_driver)
4391 return -ENOMEM;
4392
4393 serial_driver->owner = THIS_MODULE;
4394 serial_driver->driver_name = "synclink";
4395 serial_driver->name = "ttySL";
4396 serial_driver->major = ttymajor;
4397 serial_driver->minor_start = 64;
4398 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4399 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4400 serial_driver->init_termios = tty_std_termios;
4401 serial_driver->init_termios.c_cflag =
4402 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
Alan Cox606d0992006-12-08 02:38:45 -08004403 serial_driver->init_termios.c_ispeed = 9600;
4404 serial_driver->init_termios.c_ospeed = 9600;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4406 tty_set_operations(serial_driver, &mgsl_ops);
4407 if ((rc = tty_register_driver(serial_driver)) < 0) {
4408 printk("%s(%d):Couldn't register serial driver\n",
4409 __FILE__,__LINE__);
4410 put_tty_driver(serial_driver);
4411 serial_driver = NULL;
4412 return rc;
4413 }
4414
4415 printk("%s %s, tty major#%d\n",
4416 driver_name, driver_version,
4417 serial_driver->major);
4418 return 0;
4419}
4420
4421/* enumerate user specified ISA adapters
4422 */
4423static void mgsl_enum_isa_devices(void)
4424{
4425 struct mgsl_struct *info;
4426 int i;
4427
4428 /* Check for user specified ISA devices */
4429
4430 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4431 if ( debug_level >= DEBUG_LEVEL_INFO )
4432 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4433 io[i], irq[i], dma[i] );
4434
4435 info = mgsl_allocate_device();
4436 if ( !info ) {
4437 /* error allocating device instance data */
4438 if ( debug_level >= DEBUG_LEVEL_ERROR )
4439 printk( "can't allocate device instance data.\n");
4440 continue;
4441 }
4442
4443 /* Copy user configuration info to device instance data */
4444 info->io_base = (unsigned int)io[i];
4445 info->irq_level = (unsigned int)irq[i];
4446 info->irq_level = irq_canonicalize(info->irq_level);
4447 info->dma_level = (unsigned int)dma[i];
4448 info->bus_type = MGSL_BUS_TYPE_ISA;
4449 info->io_addr_size = 16;
4450 info->irq_flags = 0;
4451
4452 mgsl_add_device( info );
4453 }
4454}
4455
4456static void synclink_cleanup(void)
4457{
4458 int rc;
4459 struct mgsl_struct *info;
4460 struct mgsl_struct *tmp;
4461
4462 printk("Unloading %s: %s\n", driver_name, driver_version);
4463
4464 if (serial_driver) {
4465 if ((rc = tty_unregister_driver(serial_driver)))
4466 printk("%s(%d) failed to unregister tty driver err=%d\n",
4467 __FILE__,__LINE__,rc);
4468 put_tty_driver(serial_driver);
4469 }
4470
4471 info = mgsl_device_list;
4472 while(info) {
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08004473#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 hdlcdev_exit(info);
4475#endif
4476 mgsl_release_resources(info);
4477 tmp = info;
4478 info = info->next_device;
4479 kfree(tmp);
4480 }
4481
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 if (pci_registered)
4483 pci_unregister_driver(&synclink_pci_driver);
4484}
4485
4486static int __init synclink_init(void)
4487{
4488 int rc;
4489
4490 if (break_on_load) {
4491 mgsl_get_text_ptr();
4492 BREAKPOINT();
4493 }
4494
4495 printk("%s %s\n", driver_name, driver_version);
4496
4497 mgsl_enum_isa_devices();
4498 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4499 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4500 else
4501 pci_registered = 1;
4502
4503 if ((rc = mgsl_init_tty()) < 0)
4504 goto error;
4505
4506 return 0;
4507
4508error:
4509 synclink_cleanup();
4510 return rc;
4511}
4512
4513static void __exit synclink_exit(void)
4514{
4515 synclink_cleanup();
4516}
4517
4518module_init(synclink_init);
4519module_exit(synclink_exit);
4520
4521/*
4522 * usc_RTCmd()
4523 *
4524 * Issue a USC Receive/Transmit command to the
4525 * Channel Command/Address Register (CCAR).
4526 *
4527 * Notes:
4528 *
4529 * The command is encoded in the most significant 5 bits <15..11>
4530 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4531 * and Bits <6..0> must be written as zeros.
4532 *
4533 * Arguments:
4534 *
4535 * info pointer to device information structure
4536 * Cmd command mask (use symbolic macros)
4537 *
4538 * Return Value:
4539 *
4540 * None
4541 */
4542static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4543{
4544 /* output command to CCAR in bits <15..11> */
4545 /* preserve bits <10..7>, bits <6..0> must be zero */
4546
4547 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4548
4549 /* Read to flush write to CCAR */
4550 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4551 inw( info->io_base + CCAR );
4552
4553} /* end of usc_RTCmd() */
4554
4555/*
4556 * usc_DmaCmd()
4557 *
4558 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4559 *
4560 * Arguments:
4561 *
4562 * info pointer to device information structure
4563 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4564 *
4565 * Return Value:
4566 *
4567 * None
4568 */
4569static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4570{
4571 /* write command mask to DCAR */
4572 outw( Cmd + info->mbre_bit, info->io_base );
4573
4574 /* Read to flush write to DCAR */
4575 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4576 inw( info->io_base );
4577
4578} /* end of usc_DmaCmd() */
4579
4580/*
4581 * usc_OutDmaReg()
4582 *
4583 * Write a 16-bit value to a USC DMA register
4584 *
4585 * Arguments:
4586 *
4587 * info pointer to device info structure
4588 * RegAddr register address (number) for write
4589 * RegValue 16-bit value to write to register
4590 *
4591 * Return Value:
4592 *
4593 * None
4594 *
4595 */
4596static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4597{
4598 /* Note: The DCAR is located at the adapter base address */
4599 /* Note: must preserve state of BIT8 in DCAR */
4600
4601 outw( RegAddr + info->mbre_bit, info->io_base );
4602 outw( RegValue, info->io_base );
4603
4604 /* Read to flush write to DCAR */
4605 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4606 inw( info->io_base );
4607
4608} /* end of usc_OutDmaReg() */
4609
4610/*
4611 * usc_InDmaReg()
4612 *
4613 * Read a 16-bit value from a DMA register
4614 *
4615 * Arguments:
4616 *
4617 * info pointer to device info structure
4618 * RegAddr register address (number) to read from
4619 *
4620 * Return Value:
4621 *
4622 * The 16-bit value read from register
4623 *
4624 */
4625static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4626{
4627 /* Note: The DCAR is located at the adapter base address */
4628 /* Note: must preserve state of BIT8 in DCAR */
4629
4630 outw( RegAddr + info->mbre_bit, info->io_base );
4631 return inw( info->io_base );
4632
4633} /* end of usc_InDmaReg() */
4634
4635/*
4636 *
4637 * usc_OutReg()
4638 *
4639 * Write a 16-bit value to a USC serial channel register
4640 *
4641 * Arguments:
4642 *
4643 * info pointer to device info structure
4644 * RegAddr register address (number) to write to
4645 * RegValue 16-bit value to write to register
4646 *
4647 * Return Value:
4648 *
4649 * None
4650 *
4651 */
4652static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4653{
4654 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4655 outw( RegValue, info->io_base + CCAR );
4656
4657 /* Read to flush write to CCAR */
4658 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4659 inw( info->io_base + CCAR );
4660
4661} /* end of usc_OutReg() */
4662
4663/*
4664 * usc_InReg()
4665 *
4666 * Reads a 16-bit value from a USC serial channel register
4667 *
4668 * Arguments:
4669 *
4670 * info pointer to device extension
4671 * RegAddr register address (number) to read from
4672 *
4673 * Return Value:
4674 *
4675 * 16-bit value read from register
4676 */
4677static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4678{
4679 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4680 return inw( info->io_base + CCAR );
4681
4682} /* end of usc_InReg() */
4683
4684/* usc_set_sdlc_mode()
4685 *
4686 * Set up the adapter for SDLC DMA communications.
4687 *
4688 * Arguments: info pointer to device instance data
4689 * Return Value: NONE
4690 */
4691static void usc_set_sdlc_mode( struct mgsl_struct *info )
4692{
4693 u16 RegValue;
4694 int PreSL1660;
4695
4696 /*
4697 * determine if the IUSC on the adapter is pre-SL1660. If
4698 * not, take advantage of the UnderWait feature of more
4699 * modern chips. If an underrun occurs and this bit is set,
4700 * the transmitter will idle the programmed idle pattern
4701 * until the driver has time to service the underrun. Otherwise,
4702 * the dma controller may get the cycles previously requested
4703 * and begin transmitting queued tx data.
4704 */
4705 usc_OutReg(info,TMCR,0x1f);
4706 RegValue=usc_InReg(info,TMDR);
4707 if ( RegValue == IUSC_PRE_SL1660 )
4708 PreSL1660 = 1;
4709 else
4710 PreSL1660 = 0;
4711
4712
4713 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4714 {
4715 /*
4716 ** Channel Mode Register (CMR)
4717 **
4718 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4719 ** <13> 0 0 = Transmit Disabled (initially)
4720 ** <12> 0 1 = Consecutive Idles share common 0
4721 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4722 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4723 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4724 **
4725 ** 1000 1110 0000 0110 = 0x8e06
4726 */
4727 RegValue = 0x8e06;
4728
4729 /*--------------------------------------------------
4730 * ignore user options for UnderRun Actions and
4731 * preambles
4732 *--------------------------------------------------*/
4733 }
4734 else
4735 {
4736 /* Channel mode Register (CMR)
4737 *
4738 * <15..14> 00 Tx Sub modes, Underrun Action
4739 * <13> 0 1 = Send Preamble before opening flag
4740 * <12> 0 1 = Consecutive Idles share common 0
4741 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4742 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4743 * <3..0> 0110 Receiver mode = HDLC/SDLC
4744 *
4745 * 0000 0110 0000 0110 = 0x0606
4746 */
4747 if (info->params.mode == MGSL_MODE_RAW) {
4748 RegValue = 0x0001; /* Set Receive mode = external sync */
4749
4750 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4751 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4752
4753 /*
4754 * TxSubMode:
4755 * CMR <15> 0 Don't send CRC on Tx Underrun
4756 * CMR <14> x undefined
4757 * CMR <13> 0 Send preamble before openning sync
4758 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4759 *
4760 * TxMode:
4761 * CMR <11-8) 0100 MonoSync
4762 *
4763 * 0x00 0100 xxxx xxxx 04xx
4764 */
4765 RegValue |= 0x0400;
4766 }
4767 else {
4768
4769 RegValue = 0x0606;
4770
4771 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4772 RegValue |= BIT14;
4773 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4774 RegValue |= BIT15;
4775 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4776 RegValue |= BIT15 + BIT14;
4777 }
4778
4779 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4780 RegValue |= BIT13;
4781 }
4782
4783 if ( info->params.mode == MGSL_MODE_HDLC &&
4784 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4785 RegValue |= BIT12;
4786
4787 if ( info->params.addr_filter != 0xff )
4788 {
4789 /* set up receive address filtering */
4790 usc_OutReg( info, RSR, info->params.addr_filter );
4791 RegValue |= BIT4;
4792 }
4793
4794 usc_OutReg( info, CMR, RegValue );
4795 info->cmr_value = RegValue;
4796
4797 /* Receiver mode Register (RMR)
4798 *
4799 * <15..13> 000 encoding
4800 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4801 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4802 * <9> 0 1 = Include Receive chars in CRC
4803 * <8> 1 1 = Use Abort/PE bit as abort indicator
4804 * <7..6> 00 Even parity
4805 * <5> 0 parity disabled
4806 * <4..2> 000 Receive Char Length = 8 bits
4807 * <1..0> 00 Disable Receiver
4808 *
4809 * 0000 0101 0000 0000 = 0x0500
4810 */
4811
4812 RegValue = 0x0500;
4813
4814 switch ( info->params.encoding ) {
4815 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4816 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4817 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4818 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4819 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4820 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4821 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4822 }
4823
4824 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4825 RegValue |= BIT9;
4826 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4827 RegValue |= ( BIT12 | BIT10 | BIT9 );
4828
4829 usc_OutReg( info, RMR, RegValue );
4830
4831 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4832 /* When an opening flag of an SDLC frame is recognized the */
4833 /* Receive Character count (RCC) is loaded with the value in */
4834 /* RCLR. The RCC is decremented for each received byte. The */
4835 /* value of RCC is stored after the closing flag of the frame */
4836 /* allowing the frame size to be computed. */
4837
4838 usc_OutReg( info, RCLR, RCLRVALUE );
4839
4840 usc_RCmd( info, RCmd_SelectRicrdma_level );
4841
4842 /* Receive Interrupt Control Register (RICR)
4843 *
4844 * <15..8> ? RxFIFO DMA Request Level
4845 * <7> 0 Exited Hunt IA (Interrupt Arm)
4846 * <6> 0 Idle Received IA
4847 * <5> 0 Break/Abort IA
4848 * <4> 0 Rx Bound IA
4849 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4850 * <2> 0 Abort/PE IA
4851 * <1> 1 Rx Overrun IA
4852 * <0> 0 Select TC0 value for readback
4853 *
4854 * 0000 0000 0000 1000 = 0x000a
4855 */
4856
4857 /* Carry over the Exit Hunt and Idle Received bits */
4858 /* in case they have been armed by usc_ArmEvents. */
4859
4860 RegValue = usc_InReg( info, RICR ) & 0xc0;
4861
4862 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4863 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4864 else
4865 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4866
4867 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4868
4869 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4870 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4871
4872 /* Transmit mode Register (TMR)
4873 *
4874 * <15..13> 000 encoding
4875 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4876 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4877 * <9> 0 1 = Tx CRC Enabled
4878 * <8> 0 1 = Append CRC to end of transmit frame
4879 * <7..6> 00 Transmit parity Even
4880 * <5> 0 Transmit parity Disabled
4881 * <4..2> 000 Tx Char Length = 8 bits
4882 * <1..0> 00 Disable Transmitter
4883 *
4884 * 0000 0100 0000 0000 = 0x0400
4885 */
4886
4887 RegValue = 0x0400;
4888
4889 switch ( info->params.encoding ) {
4890 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4891 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4892 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4893 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4894 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4895 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4896 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4897 }
4898
4899 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4900 RegValue |= BIT9 + BIT8;
4901 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4902 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4903
4904 usc_OutReg( info, TMR, RegValue );
4905
4906 usc_set_txidle( info );
4907
4908
4909 usc_TCmd( info, TCmd_SelectTicrdma_level );
4910
4911 /* Transmit Interrupt Control Register (TICR)
4912 *
4913 * <15..8> ? Transmit FIFO DMA Level
4914 * <7> 0 Present IA (Interrupt Arm)
4915 * <6> 0 Idle Sent IA
4916 * <5> 1 Abort Sent IA
4917 * <4> 1 EOF/EOM Sent IA
4918 * <3> 0 CRC Sent IA
4919 * <2> 1 1 = Wait for SW Trigger to Start Frame
4920 * <1> 1 Tx Underrun IA
4921 * <0> 0 TC0 constant on read back
4922 *
4923 * 0000 0000 0011 0110 = 0x0036
4924 */
4925
4926 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4927 usc_OutReg( info, TICR, 0x0736 );
4928 else
4929 usc_OutReg( info, TICR, 0x1436 );
4930
4931 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4932 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4933
4934 /*
4935 ** Transmit Command/Status Register (TCSR)
4936 **
4937 ** <15..12> 0000 TCmd
4938 ** <11> 0/1 UnderWait
4939 ** <10..08> 000 TxIdle
4940 ** <7> x PreSent
4941 ** <6> x IdleSent
4942 ** <5> x AbortSent
4943 ** <4> x EOF/EOM Sent
4944 ** <3> x CRC Sent
4945 ** <2> x All Sent
4946 ** <1> x TxUnder
4947 ** <0> x TxEmpty
4948 **
4949 ** 0000 0000 0000 0000 = 0x0000
4950 */
4951 info->tcsr_value = 0;
4952
4953 if ( !PreSL1660 )
4954 info->tcsr_value |= TCSR_UNDERWAIT;
4955
4956 usc_OutReg( info, TCSR, info->tcsr_value );
4957
4958 /* Clock mode Control Register (CMCR)
4959 *
4960 * <15..14> 00 counter 1 Source = Disabled
4961 * <13..12> 00 counter 0 Source = Disabled
4962 * <11..10> 11 BRG1 Input is TxC Pin
4963 * <9..8> 11 BRG0 Input is TxC Pin
4964 * <7..6> 01 DPLL Input is BRG1 Output
4965 * <5..3> XXX TxCLK comes from Port 0
4966 * <2..0> XXX RxCLK comes from Port 1
4967 *
4968 * 0000 1111 0111 0111 = 0x0f77
4969 */
4970
4971 RegValue = 0x0f40;
4972
4973 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4974 RegValue |= 0x0003; /* RxCLK from DPLL */
4975 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4976 RegValue |= 0x0004; /* RxCLK from BRG0 */
4977 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4978 RegValue |= 0x0006; /* RxCLK from TXC Input */
4979 else
4980 RegValue |= 0x0007; /* RxCLK from Port1 */
4981
4982 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4983 RegValue |= 0x0018; /* TxCLK from DPLL */
4984 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4985 RegValue |= 0x0020; /* TxCLK from BRG0 */
4986 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4987 RegValue |= 0x0038; /* RxCLK from TXC Input */
4988 else
4989 RegValue |= 0x0030; /* TxCLK from Port0 */
4990
4991 usc_OutReg( info, CMCR, RegValue );
4992
4993
4994 /* Hardware Configuration Register (HCR)
4995 *
4996 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4997 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4998 * <12> 0 CVOK:0=report code violation in biphase
4999 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5000 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5001 * <7..6> 00 reserved
5002 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5003 * <4> X BRG1 Enable
5004 * <3..2> 00 reserved
5005 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5006 * <0> 0 BRG0 Enable
5007 */
5008
5009 RegValue = 0x0000;
5010
5011 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5012 u32 XtalSpeed;
5013 u32 DpllDivisor;
5014 u16 Tc;
5015
5016 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5017 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5018
5019 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5020 XtalSpeed = 11059200;
5021 else
5022 XtalSpeed = 14745600;
5023
5024 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5025 DpllDivisor = 16;
5026 RegValue |= BIT10;
5027 }
5028 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5029 DpllDivisor = 8;
5030 RegValue |= BIT11;
5031 }
5032 else
5033 DpllDivisor = 32;
5034
5035 /* Tc = (Xtal/Speed) - 1 */
5036 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5037 /* then rounding up gives a more precise time constant. Instead */
5038 /* of rounding up and then subtracting 1 we just don't subtract */
5039 /* the one in this case. */
5040
5041 /*--------------------------------------------------
5042 * ejz: for DPLL mode, application should use the
5043 * same clock speed as the partner system, even
5044 * though clocking is derived from the input RxData.
5045 * In case the user uses a 0 for the clock speed,
5046 * default to 0xffffffff and don't try to divide by
5047 * zero
5048 *--------------------------------------------------*/
5049 if ( info->params.clock_speed )
5050 {
5051 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5052 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5053 / info->params.clock_speed) )
5054 Tc--;
5055 }
5056 else
5057 Tc = -1;
5058
5059
5060 /* Write 16-bit Time Constant for BRG1 */
5061 usc_OutReg( info, TC1R, Tc );
5062
5063 RegValue |= BIT4; /* enable BRG1 */
5064
5065 switch ( info->params.encoding ) {
5066 case HDLC_ENCODING_NRZ:
5067 case HDLC_ENCODING_NRZB:
5068 case HDLC_ENCODING_NRZI_MARK:
5069 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5070 case HDLC_ENCODING_BIPHASE_MARK:
5071 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5072 case HDLC_ENCODING_BIPHASE_LEVEL:
5073 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5074 }
5075 }
5076
5077 usc_OutReg( info, HCR, RegValue );
5078
5079
5080 /* Channel Control/status Register (CCSR)
5081 *
5082 * <15> X RCC FIFO Overflow status (RO)
5083 * <14> X RCC FIFO Not Empty status (RO)
5084 * <13> 0 1 = Clear RCC FIFO (WO)
5085 * <12> X DPLL Sync (RW)
5086 * <11> X DPLL 2 Missed Clocks status (RO)
5087 * <10> X DPLL 1 Missed Clock status (RO)
5088 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5089 * <7> X SDLC Loop On status (RO)
5090 * <6> X SDLC Loop Send status (RO)
5091 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5092 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5093 * <1..0> 00 reserved
5094 *
5095 * 0000 0000 0010 0000 = 0x0020
5096 */
5097
5098 usc_OutReg( info, CCSR, 0x1020 );
5099
5100
5101 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5102 usc_OutReg( info, SICR,
5103 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5104 }
5105
5106
5107 /* enable Master Interrupt Enable bit (MIE) */
5108 usc_EnableMasterIrqBit( info );
5109
5110 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5111 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5112
5113 /* arm RCC underflow interrupt */
5114 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5115 usc_EnableInterrupts(info, MISC);
5116
5117 info->mbre_bit = 0;
5118 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5119 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5120 info->mbre_bit = BIT8;
5121 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5122
5123 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5124 /* Enable DMAEN (Port 7, Bit 14) */
5125 /* This connects the DMA request signal to the ISA bus */
5126 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5127 }
5128
5129 /* DMA Control Register (DCR)
5130 *
5131 * <15..14> 10 Priority mode = Alternating Tx/Rx
5132 * 01 Rx has priority
5133 * 00 Tx has priority
5134 *
5135 * <13> 1 Enable Priority Preempt per DCR<15..14>
5136 * (WARNING DCR<11..10> must be 00 when this is 1)
5137 * 0 Choose activate channel per DCR<11..10>
5138 *
5139 * <12> 0 Little Endian for Array/List
5140 * <11..10> 00 Both Channels can use each bus grant
5141 * <9..6> 0000 reserved
5142 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5143 * <4> 0 1 = drive D/C and S/D pins
5144 * <3> 1 1 = Add one wait state to all DMA cycles.
5145 * <2> 0 1 = Strobe /UAS on every transfer.
5146 * <1..0> 11 Addr incrementing only affects LS24 bits
5147 *
5148 * 0110 0000 0000 1011 = 0x600b
5149 */
5150
5151 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5152 /* PCI adapter does not need DMA wait state */
5153 usc_OutDmaReg( info, DCR, 0xa00b );
5154 }
5155 else
5156 usc_OutDmaReg( info, DCR, 0x800b );
5157
5158
5159 /* Receive DMA mode Register (RDMR)
5160 *
5161 * <15..14> 11 DMA mode = Linked List Buffer mode
5162 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5163 * <12> 1 Clear count of List Entry after fetching
5164 * <11..10> 00 Address mode = Increment
5165 * <9> 1 Terminate Buffer on RxBound
5166 * <8> 0 Bus Width = 16bits
5167 * <7..0> ? status Bits (write as 0s)
5168 *
5169 * 1111 0010 0000 0000 = 0xf200
5170 */
5171
5172 usc_OutDmaReg( info, RDMR, 0xf200 );
5173
5174
5175 /* Transmit DMA mode Register (TDMR)
5176 *
5177 * <15..14> 11 DMA mode = Linked List Buffer mode
5178 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5179 * <12> 1 Clear count of List Entry after fetching
5180 * <11..10> 00 Address mode = Increment
5181 * <9> 1 Terminate Buffer on end of frame
5182 * <8> 0 Bus Width = 16bits
5183 * <7..0> ? status Bits (Read Only so write as 0)
5184 *
5185 * 1111 0010 0000 0000 = 0xf200
5186 */
5187
5188 usc_OutDmaReg( info, TDMR, 0xf200 );
5189
5190
5191 /* DMA Interrupt Control Register (DICR)
5192 *
5193 * <15> 1 DMA Interrupt Enable
5194 * <14> 0 1 = Disable IEO from USC
5195 * <13> 0 1 = Don't provide vector during IntAck
5196 * <12> 1 1 = Include status in Vector
5197 * <10..2> 0 reserved, Must be 0s
5198 * <1> 0 1 = Rx DMA Interrupt Enabled
5199 * <0> 0 1 = Tx DMA Interrupt Enabled
5200 *
5201 * 1001 0000 0000 0000 = 0x9000
5202 */
5203
5204 usc_OutDmaReg( info, DICR, 0x9000 );
5205
5206 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5207 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5208 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5209
5210 /* Channel Control Register (CCR)
5211 *
5212 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5213 * <13> 0 Trigger Tx on SW Command Disabled
5214 * <12> 0 Flag Preamble Disabled
5215 * <11..10> 00 Preamble Length
5216 * <9..8> 00 Preamble Pattern
5217 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5218 * <5> 0 Trigger Rx on SW Command Disabled
5219 * <4..0> 0 reserved
5220 *
5221 * 1000 0000 1000 0000 = 0x8080
5222 */
5223
5224 RegValue = 0x8080;
5225
5226 switch ( info->params.preamble_length ) {
5227 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5228 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5229 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5230 }
5231
5232 switch ( info->params.preamble ) {
5233 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5234 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5235 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5236 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5237 }
5238
5239 usc_OutReg( info, CCR, RegValue );
5240
5241
5242 /*
5243 * Burst/Dwell Control Register
5244 *
5245 * <15..8> 0x20 Maximum number of transfers per bus grant
5246 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5247 */
5248
5249 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5250 /* don't limit bus occupancy on PCI adapter */
5251 usc_OutDmaReg( info, BDCR, 0x0000 );
5252 }
5253 else
5254 usc_OutDmaReg( info, BDCR, 0x2000 );
5255
5256 usc_stop_transmitter(info);
5257 usc_stop_receiver(info);
5258
5259} /* end of usc_set_sdlc_mode() */
5260
5261/* usc_enable_loopback()
5262 *
5263 * Set the 16C32 for internal loopback mode.
5264 * The TxCLK and RxCLK signals are generated from the BRG0 and
5265 * the TxD is looped back to the RxD internally.
5266 *
5267 * Arguments: info pointer to device instance data
5268 * enable 1 = enable loopback, 0 = disable
5269 * Return Value: None
5270 */
5271static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5272{
5273 if (enable) {
5274 /* blank external TXD output */
5275 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5276
5277 /* Clock mode Control Register (CMCR)
5278 *
5279 * <15..14> 00 counter 1 Disabled
5280 * <13..12> 00 counter 0 Disabled
5281 * <11..10> 11 BRG1 Input is TxC Pin
5282 * <9..8> 11 BRG0 Input is TxC Pin
5283 * <7..6> 01 DPLL Input is BRG1 Output
5284 * <5..3> 100 TxCLK comes from BRG0
5285 * <2..0> 100 RxCLK comes from BRG0
5286 *
5287 * 0000 1111 0110 0100 = 0x0f64
5288 */
5289
5290 usc_OutReg( info, CMCR, 0x0f64 );
5291
5292 /* Write 16-bit Time Constant for BRG0 */
5293 /* use clock speed if available, otherwise use 8 for diagnostics */
5294 if (info->params.clock_speed) {
5295 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5296 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5297 else
5298 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5299 } else
5300 usc_OutReg(info, TC0R, (u16)8);
5301
5302 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5303 mode = Continuous Set Bit 0 to enable BRG0. */
5304 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5305
5306 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5307 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5308
5309 /* set Internal Data loopback mode */
5310 info->loopback_bits = 0x300;
5311 outw( 0x0300, info->io_base + CCAR );
5312 } else {
5313 /* enable external TXD output */
5314 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5315
5316 /* clear Internal Data loopback mode */
5317 info->loopback_bits = 0;
5318 outw( 0,info->io_base + CCAR );
5319 }
5320
5321} /* end of usc_enable_loopback() */
5322
5323/* usc_enable_aux_clock()
5324 *
5325 * Enabled the AUX clock output at the specified frequency.
5326 *
5327 * Arguments:
5328 *
5329 * info pointer to device extension
5330 * data_rate data rate of clock in bits per second
5331 * A data rate of 0 disables the AUX clock.
5332 *
5333 * Return Value: None
5334 */
5335static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5336{
5337 u32 XtalSpeed;
5338 u16 Tc;
5339
5340 if ( data_rate ) {
5341 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5342 XtalSpeed = 11059200;
5343 else
5344 XtalSpeed = 14745600;
5345
5346
5347 /* Tc = (Xtal/Speed) - 1 */
5348 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5349 /* then rounding up gives a more precise time constant. Instead */
5350 /* of rounding up and then subtracting 1 we just don't subtract */
5351 /* the one in this case. */
5352
5353
5354 Tc = (u16)(XtalSpeed/data_rate);
5355 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5356 Tc--;
5357
5358 /* Write 16-bit Time Constant for BRG0 */
5359 usc_OutReg( info, TC0R, Tc );
5360
5361 /*
5362 * Hardware Configuration Register (HCR)
5363 * Clear Bit 1, BRG0 mode = Continuous
5364 * Set Bit 0 to enable BRG0.
5365 */
5366
5367 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5368
5369 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5370 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5371 } else {
5372 /* data rate == 0 so turn off BRG0 */
5373 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5374 }
5375
5376} /* end of usc_enable_aux_clock() */
5377
5378/*
5379 *
5380 * usc_process_rxoverrun_sync()
5381 *
5382 * This function processes a receive overrun by resetting the
5383 * receive DMA buffers and issuing a Purge Rx FIFO command
5384 * to allow the receiver to continue receiving.
5385 *
5386 * Arguments:
5387 *
5388 * info pointer to device extension
5389 *
5390 * Return Value: None
5391 */
5392static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5393{
5394 int start_index;
5395 int end_index;
5396 int frame_start_index;
5397 int start_of_frame_found = FALSE;
5398 int end_of_frame_found = FALSE;
5399 int reprogram_dma = FALSE;
5400
5401 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5402 u32 phys_addr;
5403
5404 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5405 usc_RCmd( info, RCmd_EnterHuntmode );
5406 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5407
5408 /* CurrentRxBuffer points to the 1st buffer of the next */
5409 /* possibly available receive frame. */
5410
5411 frame_start_index = start_index = end_index = info->current_rx_buffer;
5412
5413 /* Search for an unfinished string of buffers. This means */
5414 /* that a receive frame started (at least one buffer with */
5415 /* count set to zero) but there is no terminiting buffer */
5416 /* (status set to non-zero). */
5417
5418 while( !buffer_list[end_index].count )
5419 {
5420 /* Count field has been reset to zero by 16C32. */
5421 /* This buffer is currently in use. */
5422
5423 if ( !start_of_frame_found )
5424 {
5425 start_of_frame_found = TRUE;
5426 frame_start_index = end_index;
5427 end_of_frame_found = FALSE;
5428 }
5429
5430 if ( buffer_list[end_index].status )
5431 {
5432 /* Status field has been set by 16C32. */
5433 /* This is the last buffer of a received frame. */
5434
5435 /* We want to leave the buffers for this frame intact. */
5436 /* Move on to next possible frame. */
5437
5438 start_of_frame_found = FALSE;
5439 end_of_frame_found = TRUE;
5440 }
5441
5442 /* advance to next buffer entry in linked list */
5443 end_index++;
5444 if ( end_index == info->rx_buffer_count )
5445 end_index = 0;
5446
5447 if ( start_index == end_index )
5448 {
5449 /* The entire list has been searched with all Counts == 0 and */
5450 /* all Status == 0. The receive buffers are */
5451 /* completely screwed, reset all receive buffers! */
5452 mgsl_reset_rx_dma_buffers( info );
5453 frame_start_index = 0;
5454 start_of_frame_found = FALSE;
5455 reprogram_dma = TRUE;
5456 break;
5457 }
5458 }
5459
5460 if ( start_of_frame_found && !end_of_frame_found )
5461 {
5462 /* There is an unfinished string of receive DMA buffers */
5463 /* as a result of the receiver overrun. */
5464
5465 /* Reset the buffers for the unfinished frame */
5466 /* and reprogram the receive DMA controller to start */
5467 /* at the 1st buffer of unfinished frame. */
5468
5469 start_index = frame_start_index;
5470
5471 do
5472 {
5473 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5474
5475 /* Adjust index for wrap around. */
5476 if ( start_index == info->rx_buffer_count )
5477 start_index = 0;
5478
5479 } while( start_index != end_index );
5480
5481 reprogram_dma = TRUE;
5482 }
5483
5484 if ( reprogram_dma )
5485 {
5486 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5487 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5488 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5489
5490 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5491
5492 /* This empties the receive FIFO and loads the RCC with RCLR */
5493 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5494
5495 /* program 16C32 with physical address of 1st DMA buffer entry */
5496 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5497 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5498 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5499
5500 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5501 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5502 usc_EnableInterrupts( info, RECEIVE_STATUS );
5503
5504 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5505 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5506
5507 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5508 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5509 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5510 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5511 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5512 else
5513 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5514 }
5515 else
5516 {
5517 /* This empties the receive FIFO and loads the RCC with RCLR */
5518 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5519 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5520 }
5521
5522} /* end of usc_process_rxoverrun_sync() */
5523
5524/* usc_stop_receiver()
5525 *
5526 * Disable USC receiver
5527 *
5528 * Arguments: info pointer to device instance data
5529 * Return Value: None
5530 */
5531static void usc_stop_receiver( struct mgsl_struct *info )
5532{
5533 if (debug_level >= DEBUG_LEVEL_ISR)
5534 printk("%s(%d):usc_stop_receiver(%s)\n",
5535 __FILE__,__LINE__, info->device_name );
5536
5537 /* Disable receive DMA channel. */
5538 /* This also disables receive DMA channel interrupts */
5539 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5540
5541 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5542 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5543 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5544
5545 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5546
5547 /* This empties the receive FIFO and loads the RCC with RCLR */
5548 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5549 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5550
5551 info->rx_enabled = 0;
5552 info->rx_overflow = 0;
5553 info->rx_rcc_underrun = 0;
5554
5555} /* end of stop_receiver() */
5556
5557/* usc_start_receiver()
5558 *
5559 * Enable the USC receiver
5560 *
5561 * Arguments: info pointer to device instance data
5562 * Return Value: None
5563 */
5564static void usc_start_receiver( struct mgsl_struct *info )
5565{
5566 u32 phys_addr;
5567
5568 if (debug_level >= DEBUG_LEVEL_ISR)
5569 printk("%s(%d):usc_start_receiver(%s)\n",
5570 __FILE__,__LINE__, info->device_name );
5571
5572 mgsl_reset_rx_dma_buffers( info );
5573 usc_stop_receiver( info );
5574
5575 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5576 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5577
5578 if ( info->params.mode == MGSL_MODE_HDLC ||
5579 info->params.mode == MGSL_MODE_RAW ) {
5580 /* DMA mode Transfers */
5581 /* Program the DMA controller. */
5582 /* Enable the DMA controller end of buffer interrupt. */
5583
5584 /* program 16C32 with physical address of 1st DMA buffer entry */
5585 phys_addr = info->rx_buffer_list[0].phys_entry;
5586 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5587 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5588
5589 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5590 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5591 usc_EnableInterrupts( info, RECEIVE_STATUS );
5592
5593 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5594 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5595
5596 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5597 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5598 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5599 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5600 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5601 else
5602 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5603 } else {
5604 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5605 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5606 usc_EnableInterrupts(info, RECEIVE_DATA);
5607
5608 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5609 usc_RCmd( info, RCmd_EnterHuntmode );
5610
5611 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5612 }
5613
5614 usc_OutReg( info, CCSR, 0x1020 );
5615
5616 info->rx_enabled = 1;
5617
5618} /* end of usc_start_receiver() */
5619
5620/* usc_start_transmitter()
5621 *
5622 * Enable the USC transmitter and send a transmit frame if
5623 * one is loaded in the DMA buffers.
5624 *
5625 * Arguments: info pointer to device instance data
5626 * Return Value: None
5627 */
5628static void usc_start_transmitter( struct mgsl_struct *info )
5629{
5630 u32 phys_addr;
5631 unsigned int FrameSize;
5632
5633 if (debug_level >= DEBUG_LEVEL_ISR)
5634 printk("%s(%d):usc_start_transmitter(%s)\n",
5635 __FILE__,__LINE__, info->device_name );
5636
5637 if ( info->xmit_cnt ) {
5638
5639 /* If auto RTS enabled and RTS is inactive, then assert */
5640 /* RTS and set a flag indicating that the driver should */
5641 /* negate RTS when the transmission completes. */
5642
5643 info->drop_rts_on_tx_done = 0;
5644
5645 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5646 usc_get_serial_signals( info );
5647 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5648 info->serial_signals |= SerialSignal_RTS;
5649 usc_set_serial_signals( info );
5650 info->drop_rts_on_tx_done = 1;
5651 }
5652 }
5653
5654
5655 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5656 if ( !info->tx_active ) {
5657 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5658 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5659 usc_EnableInterrupts(info, TRANSMIT_DATA);
5660 usc_load_txfifo(info);
5661 }
5662 } else {
5663 /* Disable transmit DMA controller while programming. */
5664 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5665
5666 /* Transmit DMA buffer is loaded, so program USC */
5667 /* to send the frame contained in the buffers. */
5668
5669 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5670
5671 /* if operating in Raw sync mode, reset the rcc component
5672 * of the tx dma buffer entry, otherwise, the serial controller
5673 * will send a closing sync char after this count.
5674 */
5675 if ( info->params.mode == MGSL_MODE_RAW )
5676 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5677
5678 /* Program the Transmit Character Length Register (TCLR) */
5679 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5680 usc_OutReg( info, TCLR, (u16)FrameSize );
5681
5682 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5683
5684 /* Program the address of the 1st DMA Buffer Entry in linked list */
5685 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5686 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5687 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5688
5689 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5690 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5691 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5692
5693 if ( info->params.mode == MGSL_MODE_RAW &&
5694 info->num_tx_dma_buffers > 1 ) {
5695 /* When running external sync mode, attempt to 'stream' transmit */
5696 /* by filling tx dma buffers as they become available. To do this */
5697 /* we need to enable Tx DMA EOB Status interrupts : */
5698 /* */
5699 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5700 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5701
5702 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5703 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5704 }
5705
5706 /* Initialize Transmit DMA Channel */
5707 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5708
5709 usc_TCmd( info, TCmd_SendFrame );
5710
Jiri Slaby40565f12007-02-12 00:52:31 -08005711 mod_timer(&info->tx_timer, jiffies +
5712 msecs_to_jiffies(5000));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005713 }
5714 info->tx_active = 1;
5715 }
5716
5717 if ( !info->tx_enabled ) {
5718 info->tx_enabled = 1;
5719 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5720 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5721 else
5722 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5723 }
5724
5725} /* end of usc_start_transmitter() */
5726
5727/* usc_stop_transmitter()
5728 *
5729 * Stops the transmitter and DMA
5730 *
5731 * Arguments: info pointer to device isntance data
5732 * Return Value: None
5733 */
5734static void usc_stop_transmitter( struct mgsl_struct *info )
5735{
5736 if (debug_level >= DEBUG_LEVEL_ISR)
5737 printk("%s(%d):usc_stop_transmitter(%s)\n",
5738 __FILE__,__LINE__, info->device_name );
5739
5740 del_timer(&info->tx_timer);
5741
5742 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5743 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5744 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5745
5746 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5747 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5748 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5749
5750 info->tx_enabled = 0;
5751 info->tx_active = 0;
5752
5753} /* end of usc_stop_transmitter() */
5754
5755/* usc_load_txfifo()
5756 *
5757 * Fill the transmit FIFO until the FIFO is full or
5758 * there is no more data to load.
5759 *
5760 * Arguments: info pointer to device extension (instance data)
5761 * Return Value: None
5762 */
5763static void usc_load_txfifo( struct mgsl_struct *info )
5764{
5765 int Fifocount;
5766 u8 TwoBytes[2];
5767
5768 if ( !info->xmit_cnt && !info->x_char )
5769 return;
5770
5771 /* Select transmit FIFO status readback in TICR */
5772 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5773
5774 /* load the Transmit FIFO until FIFOs full or all data sent */
5775
5776 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5777 /* there is more space in the transmit FIFO and */
5778 /* there is more data in transmit buffer */
5779
5780 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5781 /* write a 16-bit word from transmit buffer to 16C32 */
5782
5783 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5784 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5785 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5786 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5787
5788 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5789
5790 info->xmit_cnt -= 2;
5791 info->icount.tx += 2;
5792 } else {
5793 /* only 1 byte left to transmit or 1 FIFO slot left */
5794
5795 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5796 info->io_base + CCAR );
5797
5798 if (info->x_char) {
5799 /* transmit pending high priority char */
5800 outw( info->x_char,info->io_base + CCAR );
5801 info->x_char = 0;
5802 } else {
5803 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5804 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5805 info->xmit_cnt--;
5806 }
5807 info->icount.tx++;
5808 }
5809 }
5810
5811} /* end of usc_load_txfifo() */
5812
5813/* usc_reset()
5814 *
5815 * Reset the adapter to a known state and prepare it for further use.
5816 *
5817 * Arguments: info pointer to device instance data
5818 * Return Value: None
5819 */
5820static void usc_reset( struct mgsl_struct *info )
5821{
5822 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5823 int i;
5824 u32 readval;
5825
5826 /* Set BIT30 of Misc Control Register */
5827 /* (Local Control Register 0x50) to force reset of USC. */
5828
5829 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5830 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5831
5832 info->misc_ctrl_value |= BIT30;
5833 *MiscCtrl = info->misc_ctrl_value;
5834
5835 /*
5836 * Force at least 170ns delay before clearing
5837 * reset bit. Each read from LCR takes at least
5838 * 30ns so 10 times for 300ns to be safe.
5839 */
5840 for(i=0;i<10;i++)
5841 readval = *MiscCtrl;
5842
5843 info->misc_ctrl_value &= ~BIT30;
5844 *MiscCtrl = info->misc_ctrl_value;
5845
5846 *LCR0BRDR = BUS_DESCRIPTOR(
5847 1, // Write Strobe Hold (0-3)
5848 2, // Write Strobe Delay (0-3)
5849 2, // Read Strobe Delay (0-3)
5850 0, // NWDD (Write data-data) (0-3)
5851 4, // NWAD (Write Addr-data) (0-31)
5852 0, // NXDA (Read/Write Data-Addr) (0-3)
5853 0, // NRDD (Read Data-Data) (0-3)
5854 5 // NRAD (Read Addr-Data) (0-31)
5855 );
5856 } else {
5857 /* do HW reset */
5858 outb( 0,info->io_base + 8 );
5859 }
5860
5861 info->mbre_bit = 0;
5862 info->loopback_bits = 0;
5863 info->usc_idle_mode = 0;
5864
5865 /*
5866 * Program the Bus Configuration Register (BCR)
5867 *
5868 * <15> 0 Don't use separate address
5869 * <14..6> 0 reserved
5870 * <5..4> 00 IAckmode = Default, don't care
5871 * <3> 1 Bus Request Totem Pole output
5872 * <2> 1 Use 16 Bit data bus
5873 * <1> 0 IRQ Totem Pole output
5874 * <0> 0 Don't Shift Right Addr
5875 *
5876 * 0000 0000 0000 1100 = 0x000c
5877 *
5878 * By writing to io_base + SDPIN the Wait/Ack pin is
5879 * programmed to work as a Wait pin.
5880 */
5881
5882 outw( 0x000c,info->io_base + SDPIN );
5883
5884
5885 outw( 0,info->io_base );
5886 outw( 0,info->io_base + CCAR );
5887
5888 /* select little endian byte ordering */
5889 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5890
5891
5892 /* Port Control Register (PCR)
5893 *
5894 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5895 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5896 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5897 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5898 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5899 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5900 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5901 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5902 *
5903 * 1111 0000 1111 0101 = 0xf0f5
5904 */
5905
5906 usc_OutReg( info, PCR, 0xf0f5 );
5907
5908
5909 /*
5910 * Input/Output Control Register
5911 *
5912 * <15..14> 00 CTS is active low input
5913 * <13..12> 00 DCD is active low input
5914 * <11..10> 00 TxREQ pin is input (DSR)
5915 * <9..8> 00 RxREQ pin is input (RI)
5916 * <7..6> 00 TxD is output (Transmit Data)
5917 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5918 * <2..0> 100 RxC is Output (drive with BRG0)
5919 *
5920 * 0000 0000 0000 0100 = 0x0004
5921 */
5922
5923 usc_OutReg( info, IOCR, 0x0004 );
5924
5925} /* end of usc_reset() */
5926
5927/* usc_set_async_mode()
5928 *
5929 * Program adapter for asynchronous communications.
5930 *
5931 * Arguments: info pointer to device instance data
5932 * Return Value: None
5933 */
5934static void usc_set_async_mode( struct mgsl_struct *info )
5935{
5936 u16 RegValue;
5937
5938 /* disable interrupts while programming USC */
5939 usc_DisableMasterIrqBit( info );
5940
5941 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5942 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5943
5944 usc_loopback_frame( info );
5945
5946 /* Channel mode Register (CMR)
5947 *
5948 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5949 * <13..12> 00 00 = 16X Clock
5950 * <11..8> 0000 Transmitter mode = Asynchronous
5951 * <7..6> 00 reserved?
5952 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5953 * <3..0> 0000 Receiver mode = Asynchronous
5954 *
5955 * 0000 0000 0000 0000 = 0x0
5956 */
5957
5958 RegValue = 0;
5959 if ( info->params.stop_bits != 1 )
5960 RegValue |= BIT14;
5961 usc_OutReg( info, CMR, RegValue );
5962
5963
5964 /* Receiver mode Register (RMR)
5965 *
5966 * <15..13> 000 encoding = None
5967 * <12..08> 00000 reserved (Sync Only)
5968 * <7..6> 00 Even parity
5969 * <5> 0 parity disabled
5970 * <4..2> 000 Receive Char Length = 8 bits
5971 * <1..0> 00 Disable Receiver
5972 *
5973 * 0000 0000 0000 0000 = 0x0
5974 */
5975
5976 RegValue = 0;
5977
5978 if ( info->params.data_bits != 8 )
5979 RegValue |= BIT4+BIT3+BIT2;
5980
5981 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5982 RegValue |= BIT5;
5983 if ( info->params.parity != ASYNC_PARITY_ODD )
5984 RegValue |= BIT6;
5985 }
5986
5987 usc_OutReg( info, RMR, RegValue );
5988
5989
5990 /* Set IRQ trigger level */
5991
5992 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5993
5994
5995 /* Receive Interrupt Control Register (RICR)
5996 *
5997 * <15..8> ? RxFIFO IRQ Request Level
5998 *
5999 * Note: For async mode the receive FIFO level must be set
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08006000 * to 0 to avoid the situation where the FIFO contains fewer bytes
Linus Torvalds1da177e2005-04-16 15:20:36 -07006001 * than the trigger level and no more data is expected.
6002 *
6003 * <7> 0 Exited Hunt IA (Interrupt Arm)
6004 * <6> 0 Idle Received IA
6005 * <5> 0 Break/Abort IA
6006 * <4> 0 Rx Bound IA
6007 * <3> 0 Queued status reflects oldest byte in FIFO
6008 * <2> 0 Abort/PE IA
6009 * <1> 0 Rx Overrun IA
6010 * <0> 0 Select TC0 value for readback
6011 *
6012 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6013 */
6014
6015 usc_OutReg( info, RICR, 0x0000 );
6016
6017 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6018 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6019
6020
6021 /* Transmit mode Register (TMR)
6022 *
6023 * <15..13> 000 encoding = None
6024 * <12..08> 00000 reserved (Sync Only)
6025 * <7..6> 00 Transmit parity Even
6026 * <5> 0 Transmit parity Disabled
6027 * <4..2> 000 Tx Char Length = 8 bits
6028 * <1..0> 00 Disable Transmitter
6029 *
6030 * 0000 0000 0000 0000 = 0x0
6031 */
6032
6033 RegValue = 0;
6034
6035 if ( info->params.data_bits != 8 )
6036 RegValue |= BIT4+BIT3+BIT2;
6037
6038 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6039 RegValue |= BIT5;
6040 if ( info->params.parity != ASYNC_PARITY_ODD )
6041 RegValue |= BIT6;
6042 }
6043
6044 usc_OutReg( info, TMR, RegValue );
6045
6046 usc_set_txidle( info );
6047
6048
6049 /* Set IRQ trigger level */
6050
6051 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6052
6053
6054 /* Transmit Interrupt Control Register (TICR)
6055 *
6056 * <15..8> ? Transmit FIFO IRQ Level
6057 * <7> 0 Present IA (Interrupt Arm)
6058 * <6> 1 Idle Sent IA
6059 * <5> 0 Abort Sent IA
6060 * <4> 0 EOF/EOM Sent IA
6061 * <3> 0 CRC Sent IA
6062 * <2> 0 1 = Wait for SW Trigger to Start Frame
6063 * <1> 0 Tx Underrun IA
6064 * <0> 0 TC0 constant on read back
6065 *
6066 * 0000 0000 0100 0000 = 0x0040
6067 */
6068
6069 usc_OutReg( info, TICR, 0x1f40 );
6070
6071 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6072 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6073
6074 usc_enable_async_clock( info, info->params.data_rate );
6075
6076
6077 /* Channel Control/status Register (CCSR)
6078 *
6079 * <15> X RCC FIFO Overflow status (RO)
6080 * <14> X RCC FIFO Not Empty status (RO)
6081 * <13> 0 1 = Clear RCC FIFO (WO)
6082 * <12> X DPLL in Sync status (RO)
6083 * <11> X DPLL 2 Missed Clocks status (RO)
6084 * <10> X DPLL 1 Missed Clock status (RO)
6085 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6086 * <7> X SDLC Loop On status (RO)
6087 * <6> X SDLC Loop Send status (RO)
6088 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6089 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6090 * <1..0> 00 reserved
6091 *
6092 * 0000 0000 0010 0000 = 0x0020
6093 */
6094
6095 usc_OutReg( info, CCSR, 0x0020 );
6096
6097 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6098 RECEIVE_DATA + RECEIVE_STATUS );
6099
6100 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6101 RECEIVE_DATA + RECEIVE_STATUS );
6102
6103 usc_EnableMasterIrqBit( info );
6104
6105 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6106 /* Enable INTEN (Port 6, Bit12) */
6107 /* This connects the IRQ request signal to the ISA bus */
6108 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6109 }
6110
Paul Fulghum7c1fff52005-09-09 13:02:14 -07006111 if (info->params.loopback) {
6112 info->loopback_bits = 0x300;
6113 outw(0x0300, info->io_base + CCAR);
6114 }
6115
Linus Torvalds1da177e2005-04-16 15:20:36 -07006116} /* end of usc_set_async_mode() */
6117
6118/* usc_loopback_frame()
6119 *
6120 * Loop back a small (2 byte) dummy SDLC frame.
6121 * Interrupts and DMA are NOT used. The purpose of this is to
6122 * clear any 'stale' status info left over from running in async mode.
6123 *
6124 * The 16C32 shows the strange behaviour of marking the 1st
6125 * received SDLC frame with a CRC error even when there is no
6126 * CRC error. To get around this a small dummy from of 2 bytes
6127 * is looped back when switching from async to sync mode.
6128 *
6129 * Arguments: info pointer to device instance data
6130 * Return Value: None
6131 */
6132static void usc_loopback_frame( struct mgsl_struct *info )
6133{
6134 int i;
6135 unsigned long oldmode = info->params.mode;
6136
6137 info->params.mode = MGSL_MODE_HDLC;
6138
6139 usc_DisableMasterIrqBit( info );
6140
6141 usc_set_sdlc_mode( info );
6142 usc_enable_loopback( info, 1 );
6143
6144 /* Write 16-bit Time Constant for BRG0 */
6145 usc_OutReg( info, TC0R, 0 );
6146
6147 /* Channel Control Register (CCR)
6148 *
6149 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6150 * <13> 0 Trigger Tx on SW Command Disabled
6151 * <12> 0 Flag Preamble Disabled
6152 * <11..10> 00 Preamble Length = 8-Bits
6153 * <9..8> 01 Preamble Pattern = flags
6154 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6155 * <5> 0 Trigger Rx on SW Command Disabled
6156 * <4..0> 0 reserved
6157 *
6158 * 0000 0001 0000 0000 = 0x0100
6159 */
6160
6161 usc_OutReg( info, CCR, 0x0100 );
6162
6163 /* SETUP RECEIVER */
6164 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6165 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6166
6167 /* SETUP TRANSMITTER */
6168 /* Program the Transmit Character Length Register (TCLR) */
6169 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6170 usc_OutReg( info, TCLR, 2 );
6171 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6172
6173 /* unlatch Tx status bits, and start transmit channel. */
6174 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6175 outw(0,info->io_base + DATAREG);
6176
6177 /* ENABLE TRANSMITTER */
6178 usc_TCmd( info, TCmd_SendFrame );
6179 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6180
6181 /* WAIT FOR RECEIVE COMPLETE */
6182 for (i=0 ; i<1000 ; i++)
6183 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6184 break;
6185
6186 /* clear Internal Data loopback mode */
6187 usc_enable_loopback(info, 0);
6188
6189 usc_EnableMasterIrqBit(info);
6190
6191 info->params.mode = oldmode;
6192
6193} /* end of usc_loopback_frame() */
6194
6195/* usc_set_sync_mode() Programs the USC for SDLC communications.
6196 *
6197 * Arguments: info pointer to adapter info structure
6198 * Return Value: None
6199 */
6200static void usc_set_sync_mode( struct mgsl_struct *info )
6201{
6202 usc_loopback_frame( info );
6203 usc_set_sdlc_mode( info );
6204
6205 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6206 /* Enable INTEN (Port 6, Bit12) */
6207 /* This connects the IRQ request signal to the ISA bus */
6208 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6209 }
6210
6211 usc_enable_aux_clock(info, info->params.clock_speed);
6212
6213 if (info->params.loopback)
6214 usc_enable_loopback(info,1);
6215
6216} /* end of mgsl_set_sync_mode() */
6217
6218/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6219 *
6220 * Arguments: info pointer to device instance data
6221 * Return Value: None
6222 */
6223static void usc_set_txidle( struct mgsl_struct *info )
6224{
6225 u16 usc_idle_mode = IDLEMODE_FLAGS;
6226
6227 /* Map API idle mode to USC register bits */
6228
6229 switch( info->idle_mode ){
6230 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6231 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6232 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6233 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6234 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6235 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6236 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6237 }
6238
6239 info->usc_idle_mode = usc_idle_mode;
6240 //usc_OutReg(info, TCSR, usc_idle_mode);
6241 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6242 info->tcsr_value += usc_idle_mode;
6243 usc_OutReg(info, TCSR, info->tcsr_value);
6244
6245 /*
6246 * if SyncLink WAN adapter is running in external sync mode, the
6247 * transmitter has been set to Monosync in order to try to mimic
6248 * a true raw outbound bit stream. Monosync still sends an open/close
6249 * sync char at the start/end of a frame. Try to match those sync
6250 * patterns to the idle mode set here
6251 */
6252 if ( info->params.mode == MGSL_MODE_RAW ) {
6253 unsigned char syncpat = 0;
6254 switch( info->idle_mode ) {
6255 case HDLC_TXIDLE_FLAGS:
6256 syncpat = 0x7e;
6257 break;
6258 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6259 syncpat = 0x55;
6260 break;
6261 case HDLC_TXIDLE_ZEROS:
6262 case HDLC_TXIDLE_SPACE:
6263 syncpat = 0x00;
6264 break;
6265 case HDLC_TXIDLE_ONES:
6266 case HDLC_TXIDLE_MARK:
6267 syncpat = 0xff;
6268 break;
6269 case HDLC_TXIDLE_ALT_MARK_SPACE:
6270 syncpat = 0xaa;
6271 break;
6272 }
6273
6274 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6275 }
6276
6277} /* end of usc_set_txidle() */
6278
6279/* usc_get_serial_signals()
6280 *
6281 * Query the adapter for the state of the V24 status (input) signals.
6282 *
6283 * Arguments: info pointer to device instance data
6284 * Return Value: None
6285 */
6286static void usc_get_serial_signals( struct mgsl_struct *info )
6287{
6288 u16 status;
6289
6290 /* clear all serial signals except DTR and RTS */
6291 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6292
6293 /* Read the Misc Interrupt status Register (MISR) to get */
6294 /* the V24 status signals. */
6295
6296 status = usc_InReg( info, MISR );
6297
6298 /* set serial signal bits to reflect MISR */
6299
6300 if ( status & MISCSTATUS_CTS )
6301 info->serial_signals |= SerialSignal_CTS;
6302
6303 if ( status & MISCSTATUS_DCD )
6304 info->serial_signals |= SerialSignal_DCD;
6305
6306 if ( status & MISCSTATUS_RI )
6307 info->serial_signals |= SerialSignal_RI;
6308
6309 if ( status & MISCSTATUS_DSR )
6310 info->serial_signals |= SerialSignal_DSR;
6311
6312} /* end of usc_get_serial_signals() */
6313
6314/* usc_set_serial_signals()
6315 *
6316 * Set the state of DTR and RTS based on contents of
6317 * serial_signals member of device extension.
6318 *
6319 * Arguments: info pointer to device instance data
6320 * Return Value: None
6321 */
6322static void usc_set_serial_signals( struct mgsl_struct *info )
6323{
6324 u16 Control;
6325 unsigned char V24Out = info->serial_signals;
6326
6327 /* get the current value of the Port Control Register (PCR) */
6328
6329 Control = usc_InReg( info, PCR );
6330
6331 if ( V24Out & SerialSignal_RTS )
6332 Control &= ~(BIT6);
6333 else
6334 Control |= BIT6;
6335
6336 if ( V24Out & SerialSignal_DTR )
6337 Control &= ~(BIT4);
6338 else
6339 Control |= BIT4;
6340
6341 usc_OutReg( info, PCR, Control );
6342
6343} /* end of usc_set_serial_signals() */
6344
6345/* usc_enable_async_clock()
6346 *
6347 * Enable the async clock at the specified frequency.
6348 *
6349 * Arguments: info pointer to device instance data
6350 * data_rate data rate of clock in bps
6351 * 0 disables the AUX clock.
6352 * Return Value: None
6353 */
6354static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6355{
6356 if ( data_rate ) {
6357 /*
6358 * Clock mode Control Register (CMCR)
6359 *
6360 * <15..14> 00 counter 1 Disabled
6361 * <13..12> 00 counter 0 Disabled
6362 * <11..10> 11 BRG1 Input is TxC Pin
6363 * <9..8> 11 BRG0 Input is TxC Pin
6364 * <7..6> 01 DPLL Input is BRG1 Output
6365 * <5..3> 100 TxCLK comes from BRG0
6366 * <2..0> 100 RxCLK comes from BRG0
6367 *
6368 * 0000 1111 0110 0100 = 0x0f64
6369 */
6370
6371 usc_OutReg( info, CMCR, 0x0f64 );
6372
6373
6374 /*
6375 * Write 16-bit Time Constant for BRG0
6376 * Time Constant = (ClkSpeed / data_rate) - 1
6377 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6378 */
6379
6380 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6381 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6382 else
6383 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6384
6385
6386 /*
6387 * Hardware Configuration Register (HCR)
6388 * Clear Bit 1, BRG0 mode = Continuous
6389 * Set Bit 0 to enable BRG0.
6390 */
6391
6392 usc_OutReg( info, HCR,
6393 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6394
6395
6396 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6397
6398 usc_OutReg( info, IOCR,
6399 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6400 } else {
6401 /* data rate == 0 so turn off BRG0 */
6402 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6403 }
6404
6405} /* end of usc_enable_async_clock() */
6406
6407/*
6408 * Buffer Structures:
6409 *
6410 * Normal memory access uses virtual addresses that can make discontiguous
6411 * physical memory pages appear to be contiguous in the virtual address
6412 * space (the processors memory mapping handles the conversions).
6413 *
6414 * DMA transfers require physically contiguous memory. This is because
6415 * the DMA system controller and DMA bus masters deal with memory using
6416 * only physical addresses.
6417 *
6418 * This causes a problem under Windows NT when large DMA buffers are
6419 * needed. Fragmentation of the nonpaged pool prevents allocations of
6420 * physically contiguous buffers larger than the PAGE_SIZE.
6421 *
6422 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6423 * allows DMA transfers to physically discontiguous buffers. Information
6424 * about each data transfer buffer is contained in a memory structure
6425 * called a 'buffer entry'. A list of buffer entries is maintained
6426 * to track and control the use of the data transfer buffers.
6427 *
6428 * To support this strategy we will allocate sufficient PAGE_SIZE
6429 * contiguous memory buffers to allow for the total required buffer
6430 * space.
6431 *
6432 * The 16C32 accesses the list of buffer entries using Bus Master
6433 * DMA. Control information is read from the buffer entries by the
6434 * 16C32 to control data transfers. status information is written to
6435 * the buffer entries by the 16C32 to indicate the status of completed
6436 * transfers.
6437 *
6438 * The CPU writes control information to the buffer entries to control
6439 * the 16C32 and reads status information from the buffer entries to
6440 * determine information about received and transmitted frames.
6441 *
6442 * Because the CPU and 16C32 (adapter) both need simultaneous access
6443 * to the buffer entries, the buffer entry memory is allocated with
6444 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6445 * entry list to PAGE_SIZE.
6446 *
6447 * The actual data buffers on the other hand will only be accessed
6448 * by the CPU or the adapter but not by both simultaneously. This allows
6449 * Scatter/Gather packet based DMA procedures for using physically
6450 * discontiguous pages.
6451 */
6452
6453/*
6454 * mgsl_reset_tx_dma_buffers()
6455 *
6456 * Set the count for all transmit buffers to 0 to indicate the
6457 * buffer is available for use and set the current buffer to the
6458 * first buffer. This effectively makes all buffers free and
6459 * discards any data in buffers.
6460 *
6461 * Arguments: info pointer to device instance data
6462 * Return Value: None
6463 */
6464static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6465{
6466 unsigned int i;
6467
6468 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6469 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6470 }
6471
6472 info->current_tx_buffer = 0;
6473 info->start_tx_dma_buffer = 0;
6474 info->tx_dma_buffers_used = 0;
6475
6476 info->get_tx_holding_index = 0;
6477 info->put_tx_holding_index = 0;
6478 info->tx_holding_count = 0;
6479
6480} /* end of mgsl_reset_tx_dma_buffers() */
6481
6482/*
6483 * num_free_tx_dma_buffers()
6484 *
6485 * returns the number of free tx dma buffers available
6486 *
6487 * Arguments: info pointer to device instance data
6488 * Return Value: number of free tx dma buffers
6489 */
6490static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6491{
6492 return info->tx_buffer_count - info->tx_dma_buffers_used;
6493}
6494
6495/*
6496 * mgsl_reset_rx_dma_buffers()
6497 *
6498 * Set the count for all receive buffers to DMABUFFERSIZE
6499 * and set the current buffer to the first buffer. This effectively
6500 * makes all buffers free and discards any data in buffers.
6501 *
6502 * Arguments: info pointer to device instance data
6503 * Return Value: None
6504 */
6505static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6506{
6507 unsigned int i;
6508
6509 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6510 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6511// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6512// info->rx_buffer_list[i].status = 0;
6513 }
6514
6515 info->current_rx_buffer = 0;
6516
6517} /* end of mgsl_reset_rx_dma_buffers() */
6518
6519/*
6520 * mgsl_free_rx_frame_buffers()
6521 *
6522 * Free the receive buffers used by a received SDLC
6523 * frame such that the buffers can be reused.
6524 *
6525 * Arguments:
6526 *
6527 * info pointer to device instance data
6528 * StartIndex index of 1st receive buffer of frame
6529 * EndIndex index of last receive buffer of frame
6530 *
6531 * Return Value: None
6532 */
6533static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6534{
6535 int Done = 0;
6536 DMABUFFERENTRY *pBufEntry;
6537 unsigned int Index;
6538
6539 /* Starting with 1st buffer entry of the frame clear the status */
6540 /* field and set the count field to DMA Buffer Size. */
6541
6542 Index = StartIndex;
6543
6544 while( !Done ) {
6545 pBufEntry = &(info->rx_buffer_list[Index]);
6546
6547 if ( Index == EndIndex ) {
6548 /* This is the last buffer of the frame! */
6549 Done = 1;
6550 }
6551
6552 /* reset current buffer for reuse */
6553// pBufEntry->status = 0;
6554// pBufEntry->count = DMABUFFERSIZE;
6555 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6556
6557 /* advance to next buffer entry in linked list */
6558 Index++;
6559 if ( Index == info->rx_buffer_count )
6560 Index = 0;
6561 }
6562
6563 /* set current buffer to next buffer after last buffer of frame */
6564 info->current_rx_buffer = Index;
6565
6566} /* end of free_rx_frame_buffers() */
6567
6568/* mgsl_get_rx_frame()
6569 *
6570 * This function attempts to return a received SDLC frame from the
6571 * receive DMA buffers. Only frames received without errors are returned.
6572 *
6573 * Arguments: info pointer to device extension
6574 * Return Value: 1 if frame returned, otherwise 0
6575 */
6576static int mgsl_get_rx_frame(struct mgsl_struct *info)
6577{
6578 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6579 unsigned short status;
6580 DMABUFFERENTRY *pBufEntry;
6581 unsigned int framesize = 0;
6582 int ReturnCode = 0;
6583 unsigned long flags;
6584 struct tty_struct *tty = info->tty;
6585 int return_frame = 0;
6586
6587 /*
6588 * current_rx_buffer points to the 1st buffer of the next available
6589 * receive frame. To find the last buffer of the frame look for
6590 * a non-zero status field in the buffer entries. (The status
6591 * field is set by the 16C32 after completing a receive frame.
6592 */
6593
6594 StartIndex = EndIndex = info->current_rx_buffer;
6595
6596 while( !info->rx_buffer_list[EndIndex].status ) {
6597 /*
6598 * If the count field of the buffer entry is non-zero then
6599 * this buffer has not been used. (The 16C32 clears the count
6600 * field when it starts using the buffer.) If an unused buffer
6601 * is encountered then there are no frames available.
6602 */
6603
6604 if ( info->rx_buffer_list[EndIndex].count )
6605 goto Cleanup;
6606
6607 /* advance to next buffer entry in linked list */
6608 EndIndex++;
6609 if ( EndIndex == info->rx_buffer_count )
6610 EndIndex = 0;
6611
6612 /* if entire list searched then no frame available */
6613 if ( EndIndex == StartIndex ) {
6614 /* If this occurs then something bad happened,
6615 * all buffers have been 'used' but none mark
6616 * the end of a frame. Reset buffers and receiver.
6617 */
6618
6619 if ( info->rx_enabled ){
6620 spin_lock_irqsave(&info->irq_spinlock,flags);
6621 usc_start_receiver(info);
6622 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6623 }
6624 goto Cleanup;
6625 }
6626 }
6627
6628
6629 /* check status of receive frame */
6630
6631 status = info->rx_buffer_list[EndIndex].status;
6632
6633 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6634 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6635 if ( status & RXSTATUS_SHORT_FRAME )
6636 info->icount.rxshort++;
6637 else if ( status & RXSTATUS_ABORT )
6638 info->icount.rxabort++;
6639 else if ( status & RXSTATUS_OVERRUN )
6640 info->icount.rxover++;
6641 else {
6642 info->icount.rxcrc++;
6643 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6644 return_frame = 1;
6645 }
6646 framesize = 0;
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08006647#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -07006648 {
6649 struct net_device_stats *stats = hdlc_stats(info->netdev);
6650 stats->rx_errors++;
6651 stats->rx_frame_errors++;
6652 }
6653#endif
6654 } else
6655 return_frame = 1;
6656
6657 if ( return_frame ) {
6658 /* receive frame has no errors, get frame size.
6659 * The frame size is the starting value of the RCC (which was
6660 * set to 0xffff) minus the ending value of the RCC (decremented
6661 * once for each receive character) minus 2 for the 16-bit CRC.
6662 */
6663
6664 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6665
6666 /* adjust frame size for CRC if any */
6667 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6668 framesize -= 2;
6669 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6670 framesize -= 4;
6671 }
6672
6673 if ( debug_level >= DEBUG_LEVEL_BH )
6674 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6675 __FILE__,__LINE__,info->device_name,status,framesize);
6676
6677 if ( debug_level >= DEBUG_LEVEL_DATA )
6678 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6679 min_t(int, framesize, DMABUFFERSIZE),0);
6680
6681 if (framesize) {
6682 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6683 ((framesize+1) > info->max_frame_size) ) ||
6684 (framesize > info->max_frame_size) )
6685 info->icount.rxlong++;
6686 else {
6687 /* copy dma buffer(s) to contiguous intermediate buffer */
6688 int copy_count = framesize;
6689 int index = StartIndex;
6690 unsigned char *ptmp = info->intermediate_rxbuffer;
6691
6692 if ( !(status & RXSTATUS_CRC_ERROR))
6693 info->icount.rxok++;
6694
6695 while(copy_count) {
6696 int partial_count;
6697 if ( copy_count > DMABUFFERSIZE )
6698 partial_count = DMABUFFERSIZE;
6699 else
6700 partial_count = copy_count;
6701
6702 pBufEntry = &(info->rx_buffer_list[index]);
6703 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6704 ptmp += partial_count;
6705 copy_count -= partial_count;
6706
6707 if ( ++index == info->rx_buffer_count )
6708 index = 0;
6709 }
6710
6711 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6712 ++framesize;
6713 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6714 RX_CRC_ERROR :
6715 RX_OK);
6716
6717 if ( debug_level >= DEBUG_LEVEL_DATA )
6718 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6719 __FILE__,__LINE__,info->device_name,
6720 *ptmp);
6721 }
6722
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08006723#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -07006724 if (info->netcount)
6725 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6726 else
6727#endif
6728 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6729 }
6730 }
6731 /* Free the buffers used by this frame. */
6732 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6733
6734 ReturnCode = 1;
6735
6736Cleanup:
6737
6738 if ( info->rx_enabled && info->rx_overflow ) {
6739 /* The receiver needs to restarted because of
6740 * a receive overflow (buffer or FIFO). If the
6741 * receive buffers are now empty, then restart receiver.
6742 */
6743
6744 if ( !info->rx_buffer_list[EndIndex].status &&
6745 info->rx_buffer_list[EndIndex].count ) {
6746 spin_lock_irqsave(&info->irq_spinlock,flags);
6747 usc_start_receiver(info);
6748 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6749 }
6750 }
6751
6752 return ReturnCode;
6753
6754} /* end of mgsl_get_rx_frame() */
6755
6756/* mgsl_get_raw_rx_frame()
6757 *
6758 * This function attempts to return a received frame from the
6759 * receive DMA buffers when running in external loop mode. In this mode,
6760 * we will return at most one DMABUFFERSIZE frame to the application.
6761 * The USC receiver is triggering off of DCD going active to start a new
6762 * frame, and DCD going inactive to terminate the frame (similar to
6763 * processing a closing flag character).
6764 *
6765 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6766 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6767 * status field and the RCC field will indicate the length of the
6768 * entire received frame. We take this RCC field and get the modulus
6769 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6770 * last Rx DMA buffer and return that last portion of the frame.
6771 *
6772 * Arguments: info pointer to device extension
6773 * Return Value: 1 if frame returned, otherwise 0
6774 */
6775static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6776{
6777 unsigned int CurrentIndex, NextIndex;
6778 unsigned short status;
6779 DMABUFFERENTRY *pBufEntry;
6780 unsigned int framesize = 0;
6781 int ReturnCode = 0;
6782 unsigned long flags;
6783 struct tty_struct *tty = info->tty;
6784
6785 /*
6786 * current_rx_buffer points to the 1st buffer of the next available
6787 * receive frame. The status field is set by the 16C32 after
6788 * completing a receive frame. If the status field of this buffer
6789 * is zero, either the USC is still filling this buffer or this
6790 * is one of a series of buffers making up a received frame.
6791 *
6792 * If the count field of this buffer is zero, the USC is either
6793 * using this buffer or has used this buffer. Look at the count
6794 * field of the next buffer. If that next buffer's count is
6795 * non-zero, the USC is still actively using the current buffer.
6796 * Otherwise, if the next buffer's count field is zero, the
6797 * current buffer is complete and the USC is using the next
6798 * buffer.
6799 */
6800 CurrentIndex = NextIndex = info->current_rx_buffer;
6801 ++NextIndex;
6802 if ( NextIndex == info->rx_buffer_count )
6803 NextIndex = 0;
6804
6805 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6806 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6807 info->rx_buffer_list[NextIndex].count == 0)) {
6808 /*
6809 * Either the status field of this dma buffer is non-zero
6810 * (indicating the last buffer of a receive frame) or the next
6811 * buffer is marked as in use -- implying this buffer is complete
6812 * and an intermediate buffer for this received frame.
6813 */
6814
6815 status = info->rx_buffer_list[CurrentIndex].status;
6816
6817 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6818 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6819 if ( status & RXSTATUS_SHORT_FRAME )
6820 info->icount.rxshort++;
6821 else if ( status & RXSTATUS_ABORT )
6822 info->icount.rxabort++;
6823 else if ( status & RXSTATUS_OVERRUN )
6824 info->icount.rxover++;
6825 else
6826 info->icount.rxcrc++;
6827 framesize = 0;
6828 } else {
6829 /*
6830 * A receive frame is available, get frame size and status.
6831 *
6832 * The frame size is the starting value of the RCC (which was
6833 * set to 0xffff) minus the ending value of the RCC (decremented
6834 * once for each receive character) minus 2 or 4 for the 16-bit
6835 * or 32-bit CRC.
6836 *
6837 * If the status field is zero, this is an intermediate buffer.
6838 * It's size is 4K.
6839 *
6840 * If the DMA Buffer Entry's Status field is non-zero, the
6841 * receive operation completed normally (ie: DCD dropped). The
6842 * RCC field is valid and holds the received frame size.
6843 * It is possible that the RCC field will be zero on a DMA buffer
6844 * entry with a non-zero status. This can occur if the total
6845 * frame size (number of bytes between the time DCD goes active
6846 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6847 * case the 16C32 has underrun on the RCC count and appears to
6848 * stop updating this counter to let us know the actual received
6849 * frame size. If this happens (non-zero status and zero RCC),
6850 * simply return the entire RxDMA Buffer
6851 */
6852 if ( status ) {
6853 /*
6854 * In the event that the final RxDMA Buffer is
6855 * terminated with a non-zero status and the RCC
6856 * field is zero, we interpret this as the RCC
6857 * having underflowed (received frame > 65535 bytes).
6858 *
6859 * Signal the event to the user by passing back
6860 * a status of RxStatus_CrcError returning the full
6861 * buffer and let the app figure out what data is
6862 * actually valid
6863 */
6864 if ( info->rx_buffer_list[CurrentIndex].rcc )
6865 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6866 else
6867 framesize = DMABUFFERSIZE;
6868 }
6869 else
6870 framesize = DMABUFFERSIZE;
6871 }
6872
6873 if ( framesize > DMABUFFERSIZE ) {
6874 /*
6875 * if running in raw sync mode, ISR handler for
6876 * End Of Buffer events terminates all buffers at 4K.
6877 * If this frame size is said to be >4K, get the
6878 * actual number of bytes of the frame in this buffer.
6879 */
6880 framesize = framesize % DMABUFFERSIZE;
6881 }
6882
6883
6884 if ( debug_level >= DEBUG_LEVEL_BH )
6885 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6886 __FILE__,__LINE__,info->device_name,status,framesize);
6887
6888 if ( debug_level >= DEBUG_LEVEL_DATA )
6889 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6890 min_t(int, framesize, DMABUFFERSIZE),0);
6891
6892 if (framesize) {
6893 /* copy dma buffer(s) to contiguous intermediate buffer */
6894 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6895
6896 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6897 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6898 info->icount.rxok++;
6899
6900 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6901 }
6902
6903 /* Free the buffers used by this frame. */
6904 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6905
6906 ReturnCode = 1;
6907 }
6908
6909
6910 if ( info->rx_enabled && info->rx_overflow ) {
6911 /* The receiver needs to restarted because of
6912 * a receive overflow (buffer or FIFO). If the
6913 * receive buffers are now empty, then restart receiver.
6914 */
6915
6916 if ( !info->rx_buffer_list[CurrentIndex].status &&
6917 info->rx_buffer_list[CurrentIndex].count ) {
6918 spin_lock_irqsave(&info->irq_spinlock,flags);
6919 usc_start_receiver(info);
6920 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6921 }
6922 }
6923
6924 return ReturnCode;
6925
6926} /* end of mgsl_get_raw_rx_frame() */
6927
6928/* mgsl_load_tx_dma_buffer()
6929 *
6930 * Load the transmit DMA buffer with the specified data.
6931 *
6932 * Arguments:
6933 *
6934 * info pointer to device extension
6935 * Buffer pointer to buffer containing frame to load
6936 * BufferSize size in bytes of frame in Buffer
6937 *
6938 * Return Value: None
6939 */
6940static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6941 const char *Buffer, unsigned int BufferSize)
6942{
6943 unsigned short Copycount;
6944 unsigned int i = 0;
6945 DMABUFFERENTRY *pBufEntry;
6946
6947 if ( debug_level >= DEBUG_LEVEL_DATA )
6948 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6949
6950 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6951 /* set CMR:13 to start transmit when
6952 * next GoAhead (abort) is received
6953 */
6954 info->cmr_value |= BIT13;
6955 }
6956
6957 /* begin loading the frame in the next available tx dma
6958 * buffer, remember it's starting location for setting
6959 * up tx dma operation
6960 */
6961 i = info->current_tx_buffer;
6962 info->start_tx_dma_buffer = i;
6963
6964 /* Setup the status and RCC (Frame Size) fields of the 1st */
6965 /* buffer entry in the transmit DMA buffer list. */
6966
6967 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6968 info->tx_buffer_list[i].rcc = BufferSize;
6969 info->tx_buffer_list[i].count = BufferSize;
6970
6971 /* Copy frame data from 1st source buffer to the DMA buffers. */
6972 /* The frame data may span multiple DMA buffers. */
6973
6974 while( BufferSize ){
6975 /* Get a pointer to next DMA buffer entry. */
6976 pBufEntry = &info->tx_buffer_list[i++];
6977
6978 if ( i == info->tx_buffer_count )
6979 i=0;
6980
6981 /* Calculate the number of bytes that can be copied from */
6982 /* the source buffer to this DMA buffer. */
6983 if ( BufferSize > DMABUFFERSIZE )
6984 Copycount = DMABUFFERSIZE;
6985 else
6986 Copycount = BufferSize;
6987
6988 /* Actually copy data from source buffer to DMA buffer. */
6989 /* Also set the data count for this individual DMA buffer. */
6990 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6991 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6992 else
6993 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6994
6995 pBufEntry->count = Copycount;
6996
6997 /* Advance source pointer and reduce remaining data count. */
6998 Buffer += Copycount;
6999 BufferSize -= Copycount;
7000
7001 ++info->tx_dma_buffers_used;
7002 }
7003
7004 /* remember next available tx dma buffer */
7005 info->current_tx_buffer = i;
7006
7007} /* end of mgsl_load_tx_dma_buffer() */
7008
7009/*
7010 * mgsl_register_test()
7011 *
7012 * Performs a register test of the 16C32.
7013 *
7014 * Arguments: info pointer to device instance data
7015 * Return Value: TRUE if test passed, otherwise FALSE
7016 */
7017static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7018{
7019 static unsigned short BitPatterns[] =
7020 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
Tobias Klauserfe971072006-01-09 20:54:02 -08007021 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007022 unsigned int i;
7023 BOOLEAN rc = TRUE;
7024 unsigned long flags;
7025
7026 spin_lock_irqsave(&info->irq_spinlock,flags);
7027 usc_reset(info);
7028
7029 /* Verify the reset state of some registers. */
7030
7031 if ( (usc_InReg( info, SICR ) != 0) ||
7032 (usc_InReg( info, IVR ) != 0) ||
7033 (usc_InDmaReg( info, DIVR ) != 0) ){
7034 rc = FALSE;
7035 }
7036
7037 if ( rc == TRUE ){
7038 /* Write bit patterns to various registers but do it out of */
7039 /* sync, then read back and verify values. */
7040
7041 for ( i = 0 ; i < Patterncount ; i++ ) {
7042 usc_OutReg( info, TC0R, BitPatterns[i] );
7043 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7044 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7045 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7046 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7047 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7048
7049 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7050 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7051 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7052 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7053 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7054 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7055 rc = FALSE;
7056 break;
7057 }
7058 }
7059 }
7060
7061 usc_reset(info);
7062 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7063
7064 return rc;
7065
7066} /* end of mgsl_register_test() */
7067
7068/* mgsl_irq_test() Perform interrupt test of the 16C32.
7069 *
7070 * Arguments: info pointer to device instance data
7071 * Return Value: TRUE if test passed, otherwise FALSE
7072 */
7073static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7074{
7075 unsigned long EndTime;
7076 unsigned long flags;
7077
7078 spin_lock_irqsave(&info->irq_spinlock,flags);
7079 usc_reset(info);
7080
7081 /*
7082 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7083 * The ISR sets irq_occurred to 1.
7084 */
7085
7086 info->irq_occurred = FALSE;
7087
7088 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7089 /* Enable INTEN (Port 6, Bit12) */
7090 /* This connects the IRQ request signal to the ISA bus */
7091 /* on the ISA adapter. This has no effect for the PCI adapter */
7092 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7093
7094 usc_EnableMasterIrqBit(info);
7095 usc_EnableInterrupts(info, IO_PIN);
7096 usc_ClearIrqPendingBits(info, IO_PIN);
7097
7098 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7099 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7100
7101 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7102
7103 EndTime=100;
7104 while( EndTime-- && !info->irq_occurred ) {
7105 msleep_interruptible(10);
7106 }
7107
7108 spin_lock_irqsave(&info->irq_spinlock,flags);
7109 usc_reset(info);
7110 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7111
7112 if ( !info->irq_occurred )
7113 return FALSE;
7114 else
7115 return TRUE;
7116
7117} /* end of mgsl_irq_test() */
7118
7119/* mgsl_dma_test()
7120 *
7121 * Perform a DMA test of the 16C32. A small frame is
7122 * transmitted via DMA from a transmit buffer to a receive buffer
7123 * using single buffer DMA mode.
7124 *
7125 * Arguments: info pointer to device instance data
7126 * Return Value: TRUE if test passed, otherwise FALSE
7127 */
7128static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7129{
7130 unsigned short FifoLevel;
7131 unsigned long phys_addr;
7132 unsigned int FrameSize;
7133 unsigned int i;
7134 char *TmpPtr;
7135 BOOLEAN rc = TRUE;
7136 unsigned short status=0;
7137 unsigned long EndTime;
7138 unsigned long flags;
7139 MGSL_PARAMS tmp_params;
7140
7141 /* save current port options */
7142 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7143 /* load default port options */
7144 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7145
7146#define TESTFRAMESIZE 40
7147
7148 spin_lock_irqsave(&info->irq_spinlock,flags);
7149
7150 /* setup 16C32 for SDLC DMA transfer mode */
7151
7152 usc_reset(info);
7153 usc_set_sdlc_mode(info);
7154 usc_enable_loopback(info,1);
7155
7156 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7157 * field of the buffer entry after fetching buffer address. This
7158 * way we can detect a DMA failure for a DMA read (which should be
7159 * non-destructive to system memory) before we try and write to
7160 * memory (where a failure could corrupt system memory).
7161 */
7162
7163 /* Receive DMA mode Register (RDMR)
7164 *
7165 * <15..14> 11 DMA mode = Linked List Buffer mode
7166 * <13> 1 RSBinA/L = store Rx status Block in List entry
7167 * <12> 0 1 = Clear count of List Entry after fetching
7168 * <11..10> 00 Address mode = Increment
7169 * <9> 1 Terminate Buffer on RxBound
7170 * <8> 0 Bus Width = 16bits
7171 * <7..0> ? status Bits (write as 0s)
7172 *
7173 * 1110 0010 0000 0000 = 0xe200
7174 */
7175
7176 usc_OutDmaReg( info, RDMR, 0xe200 );
7177
7178 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7179
7180
7181 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7182
7183 FrameSize = TESTFRAMESIZE;
7184
7185 /* setup 1st transmit buffer entry: */
7186 /* with frame size and transmit control word */
7187
7188 info->tx_buffer_list[0].count = FrameSize;
7189 info->tx_buffer_list[0].rcc = FrameSize;
7190 info->tx_buffer_list[0].status = 0x4000;
7191
7192 /* build a transmit frame in 1st transmit DMA buffer */
7193
7194 TmpPtr = info->tx_buffer_list[0].virt_addr;
7195 for (i = 0; i < FrameSize; i++ )
7196 *TmpPtr++ = i;
7197
7198 /* setup 1st receive buffer entry: */
7199 /* clear status, set max receive buffer size */
7200
7201 info->rx_buffer_list[0].status = 0;
7202 info->rx_buffer_list[0].count = FrameSize + 4;
7203
7204 /* zero out the 1st receive buffer */
7205
7206 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7207
7208 /* Set count field of next buffer entries to prevent */
7209 /* 16C32 from using buffers after the 1st one. */
7210
7211 info->tx_buffer_list[1].count = 0;
7212 info->rx_buffer_list[1].count = 0;
7213
7214
7215 /***************************/
7216 /* Program 16C32 receiver. */
7217 /***************************/
7218
7219 spin_lock_irqsave(&info->irq_spinlock,flags);
7220
7221 /* setup DMA transfers */
7222 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7223
7224 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7225 phys_addr = info->rx_buffer_list[0].phys_entry;
7226 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7227 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7228
7229 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7230 usc_InDmaReg( info, RDMR );
7231 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7232
7233 /* Enable Receiver (RMR <1..0> = 10) */
7234 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7235
7236 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7237
7238
7239 /*************************************************************/
7240 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7241 /*************************************************************/
7242
7243 /* Wait 100ms for interrupt. */
7244 EndTime = jiffies + msecs_to_jiffies(100);
7245
7246 for(;;) {
7247 if (time_after(jiffies, EndTime)) {
7248 rc = FALSE;
7249 break;
7250 }
7251
7252 spin_lock_irqsave(&info->irq_spinlock,flags);
7253 status = usc_InDmaReg( info, RDMR );
7254 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7255
7256 if ( !(status & BIT4) && (status & BIT5) ) {
7257 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7258 /* BUSY (BIT 5) is active (channel still active). */
7259 /* This means the buffer entry read has completed. */
7260 break;
7261 }
7262 }
7263
7264
7265 /******************************/
7266 /* Program 16C32 transmitter. */
7267 /******************************/
7268
7269 spin_lock_irqsave(&info->irq_spinlock,flags);
7270
7271 /* Program the Transmit Character Length Register (TCLR) */
7272 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7273
7274 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7275 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7276
7277 /* Program the address of the 1st DMA Buffer Entry in linked list */
7278
7279 phys_addr = info->tx_buffer_list[0].phys_entry;
7280 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7281 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7282
7283 /* unlatch Tx status bits, and start transmit channel. */
7284
7285 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7286 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7287
7288 /* wait for DMA controller to fill transmit FIFO */
7289
7290 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7291
7292 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7293
7294
7295 /**********************************/
7296 /* WAIT FOR TRANSMIT FIFO TO FILL */
7297 /**********************************/
7298
7299 /* Wait 100ms */
7300 EndTime = jiffies + msecs_to_jiffies(100);
7301
7302 for(;;) {
7303 if (time_after(jiffies, EndTime)) {
7304 rc = FALSE;
7305 break;
7306 }
7307
7308 spin_lock_irqsave(&info->irq_spinlock,flags);
7309 FifoLevel = usc_InReg(info, TICR) >> 8;
7310 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7311
7312 if ( FifoLevel < 16 )
7313 break;
7314 else
7315 if ( FrameSize < 32 ) {
7316 /* This frame is smaller than the entire transmit FIFO */
7317 /* so wait for the entire frame to be loaded. */
7318 if ( FifoLevel <= (32 - FrameSize) )
7319 break;
7320 }
7321 }
7322
7323
7324 if ( rc == TRUE )
7325 {
7326 /* Enable 16C32 transmitter. */
7327
7328 spin_lock_irqsave(&info->irq_spinlock,flags);
7329
7330 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7331 usc_TCmd( info, TCmd_SendFrame );
7332 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7333
7334 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7335
7336
7337 /******************************/
7338 /* WAIT FOR TRANSMIT COMPLETE */
7339 /******************************/
7340
7341 /* Wait 100ms */
7342 EndTime = jiffies + msecs_to_jiffies(100);
7343
7344 /* While timer not expired wait for transmit complete */
7345
7346 spin_lock_irqsave(&info->irq_spinlock,flags);
7347 status = usc_InReg( info, TCSR );
7348 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7349
7350 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7351 if (time_after(jiffies, EndTime)) {
7352 rc = FALSE;
7353 break;
7354 }
7355
7356 spin_lock_irqsave(&info->irq_spinlock,flags);
7357 status = usc_InReg( info, TCSR );
7358 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7359 }
7360 }
7361
7362
7363 if ( rc == TRUE ){
7364 /* CHECK FOR TRANSMIT ERRORS */
7365 if ( status & (BIT5 + BIT1) )
7366 rc = FALSE;
7367 }
7368
7369 if ( rc == TRUE ) {
7370 /* WAIT FOR RECEIVE COMPLETE */
7371
7372 /* Wait 100ms */
7373 EndTime = jiffies + msecs_to_jiffies(100);
7374
7375 /* Wait for 16C32 to write receive status to buffer entry. */
7376 status=info->rx_buffer_list[0].status;
7377 while ( status == 0 ) {
7378 if (time_after(jiffies, EndTime)) {
7379 rc = FALSE;
7380 break;
7381 }
7382 status=info->rx_buffer_list[0].status;
7383 }
7384 }
7385
7386
7387 if ( rc == TRUE ) {
7388 /* CHECK FOR RECEIVE ERRORS */
7389 status = info->rx_buffer_list[0].status;
7390
7391 if ( status & (BIT8 + BIT3 + BIT1) ) {
7392 /* receive error has occurred */
7393 rc = FALSE;
7394 } else {
7395 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7396 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7397 rc = FALSE;
7398 }
7399 }
7400 }
7401
7402 spin_lock_irqsave(&info->irq_spinlock,flags);
7403 usc_reset( info );
7404 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7405
7406 /* restore current port options */
7407 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7408
7409 return rc;
7410
7411} /* end of mgsl_dma_test() */
7412
7413/* mgsl_adapter_test()
7414 *
7415 * Perform the register, IRQ, and DMA tests for the 16C32.
7416 *
7417 * Arguments: info pointer to device instance data
7418 * Return Value: 0 if success, otherwise -ENODEV
7419 */
7420static int mgsl_adapter_test( struct mgsl_struct *info )
7421{
7422 if ( debug_level >= DEBUG_LEVEL_INFO )
7423 printk( "%s(%d):Testing device %s\n",
7424 __FILE__,__LINE__,info->device_name );
7425
7426 if ( !mgsl_register_test( info ) ) {
7427 info->init_error = DiagStatus_AddressFailure;
7428 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7429 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7430 return -ENODEV;
7431 }
7432
7433 if ( !mgsl_irq_test( info ) ) {
7434 info->init_error = DiagStatus_IrqFailure;
7435 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7436 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7437 return -ENODEV;
7438 }
7439
7440 if ( !mgsl_dma_test( info ) ) {
7441 info->init_error = DiagStatus_DmaFailure;
7442 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7443 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7444 return -ENODEV;
7445 }
7446
7447 if ( debug_level >= DEBUG_LEVEL_INFO )
7448 printk( "%s(%d):device %s passed diagnostics\n",
7449 __FILE__,__LINE__,info->device_name );
7450
7451 return 0;
7452
7453} /* end of mgsl_adapter_test() */
7454
7455/* mgsl_memory_test()
7456 *
7457 * Test the shared memory on a PCI adapter.
7458 *
7459 * Arguments: info pointer to device instance data
7460 * Return Value: TRUE if test passed, otherwise FALSE
7461 */
7462static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7463{
Tobias Klauserfe971072006-01-09 20:54:02 -08007464 static unsigned long BitPatterns[] =
7465 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7466 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007467 unsigned long i;
7468 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7469 unsigned long * TestAddr;
7470
7471 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7472 return TRUE;
7473
7474 TestAddr = (unsigned long *)info->memory_base;
7475
7476 /* Test data lines with test pattern at one location. */
7477
7478 for ( i = 0 ; i < Patterncount ; i++ ) {
7479 *TestAddr = BitPatterns[i];
7480 if ( *TestAddr != BitPatterns[i] )
7481 return FALSE;
7482 }
7483
7484 /* Test address lines with incrementing pattern over */
7485 /* entire address range. */
7486
7487 for ( i = 0 ; i < TestLimit ; i++ ) {
7488 *TestAddr = i * 4;
7489 TestAddr++;
7490 }
7491
7492 TestAddr = (unsigned long *)info->memory_base;
7493
7494 for ( i = 0 ; i < TestLimit ; i++ ) {
7495 if ( *TestAddr != i * 4 )
7496 return FALSE;
7497 TestAddr++;
7498 }
7499
7500 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7501
7502 return TRUE;
7503
7504} /* End Of mgsl_memory_test() */
7505
7506
7507/* mgsl_load_pci_memory()
7508 *
7509 * Load a large block of data into the PCI shared memory.
7510 * Use this instead of memcpy() or memmove() to move data
7511 * into the PCI shared memory.
7512 *
7513 * Notes:
7514 *
7515 * This function prevents the PCI9050 interface chip from hogging
7516 * the adapter local bus, which can starve the 16C32 by preventing
7517 * 16C32 bus master cycles.
7518 *
7519 * The PCI9050 documentation says that the 9050 will always release
7520 * control of the local bus after completing the current read
7521 * or write operation.
7522 *
7523 * It appears that as long as the PCI9050 write FIFO is full, the
7524 * PCI9050 treats all of the writes as a single burst transaction
7525 * and will not release the bus. This causes DMA latency problems
7526 * at high speeds when copying large data blocks to the shared
7527 * memory.
7528 *
7529 * This function in effect, breaks the a large shared memory write
7530 * into multiple transations by interleaving a shared memory read
7531 * which will flush the write FIFO and 'complete' the write
7532 * transation. This allows any pending DMA request to gain control
7533 * of the local bus in a timely fasion.
7534 *
7535 * Arguments:
7536 *
7537 * TargetPtr pointer to target address in PCI shared memory
7538 * SourcePtr pointer to source buffer for data
7539 * count count in bytes of data to copy
7540 *
7541 * Return Value: None
7542 */
7543static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7544 unsigned short count )
7545{
7546 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7547#define PCI_LOAD_INTERVAL 64
7548
7549 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7550 unsigned short Index;
7551 unsigned long Dummy;
7552
7553 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7554 {
7555 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7556 Dummy = *((volatile unsigned long *)TargetPtr);
7557 TargetPtr += PCI_LOAD_INTERVAL;
7558 SourcePtr += PCI_LOAD_INTERVAL;
7559 }
7560
7561 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7562
7563} /* End Of mgsl_load_pci_memory() */
7564
7565static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7566{
7567 int i;
7568 int linecount;
7569 if (xmit)
7570 printk("%s tx data:\n",info->device_name);
7571 else
7572 printk("%s rx data:\n",info->device_name);
7573
7574 while(count) {
7575 if (count > 16)
7576 linecount = 16;
7577 else
7578 linecount = count;
7579
7580 for(i=0;i<linecount;i++)
7581 printk("%02X ",(unsigned char)data[i]);
7582 for(;i<17;i++)
7583 printk(" ");
7584 for(i=0;i<linecount;i++) {
7585 if (data[i]>=040 && data[i]<=0176)
7586 printk("%c",data[i]);
7587 else
7588 printk(".");
7589 }
7590 printk("\n");
7591
7592 data += linecount;
7593 count -= linecount;
7594 }
7595} /* end of mgsl_trace_block() */
7596
7597/* mgsl_tx_timeout()
7598 *
7599 * called when HDLC frame times out
7600 * update stats and do tx completion processing
7601 *
7602 * Arguments: context pointer to device instance data
7603 * Return Value: None
7604 */
7605static void mgsl_tx_timeout(unsigned long context)
7606{
7607 struct mgsl_struct *info = (struct mgsl_struct*)context;
7608 unsigned long flags;
7609
7610 if ( debug_level >= DEBUG_LEVEL_INFO )
7611 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7612 __FILE__,__LINE__,info->device_name);
7613 if(info->tx_active &&
7614 (info->params.mode == MGSL_MODE_HDLC ||
7615 info->params.mode == MGSL_MODE_RAW) ) {
7616 info->icount.txtimeout++;
7617 }
7618 spin_lock_irqsave(&info->irq_spinlock,flags);
7619 info->tx_active = 0;
7620 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7621
7622 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7623 usc_loopmode_cancel_transmit( info );
7624
7625 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7626
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08007627#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -07007628 if (info->netcount)
7629 hdlcdev_tx_done(info);
7630 else
7631#endif
7632 mgsl_bh_transmit(info);
7633
7634} /* end of mgsl_tx_timeout() */
7635
7636/* signal that there are no more frames to send, so that
7637 * line is 'released' by echoing RxD to TxD when current
7638 * transmission is complete (or immediately if no tx in progress).
7639 */
7640static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7641{
7642 unsigned long flags;
7643
7644 spin_lock_irqsave(&info->irq_spinlock,flags);
7645 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7646 if (info->tx_active)
7647 info->loopmode_send_done_requested = TRUE;
7648 else
7649 usc_loopmode_send_done(info);
7650 }
7651 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7652
7653 return 0;
7654}
7655
7656/* release the line by echoing RxD to TxD
7657 * upon completion of a transmit frame
7658 */
7659static void usc_loopmode_send_done( struct mgsl_struct * info )
7660{
7661 info->loopmode_send_done_requested = FALSE;
7662 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7663 info->cmr_value &= ~BIT13;
7664 usc_OutReg(info, CMR, info->cmr_value);
7665}
7666
7667/* abort a transmit in progress while in HDLC LoopMode
7668 */
7669static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7670{
7671 /* reset tx dma channel and purge TxFifo */
7672 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7673 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7674 usc_loopmode_send_done( info );
7675}
7676
7677/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7678 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7679 * we must clear CMR:13 to begin repeating TxData to RxData
7680 */
7681static void usc_loopmode_insert_request( struct mgsl_struct * info )
7682{
7683 info->loopmode_insert_requested = TRUE;
7684
7685 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7686 * begin repeating TxData on RxData (complete insertion)
7687 */
7688 usc_OutReg( info, RICR,
7689 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7690
7691 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7692 info->cmr_value |= BIT13;
7693 usc_OutReg(info, CMR, info->cmr_value);
7694}
7695
7696/* return 1 if station is inserted into the loop, otherwise 0
7697 */
7698static int usc_loopmode_active( struct mgsl_struct * info)
7699{
7700 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7701}
7702
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08007703#if SYNCLINK_GENERIC_HDLC
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704
7705/**
7706 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7707 * set encoding and frame check sequence (FCS) options
7708 *
7709 * dev pointer to network device structure
7710 * encoding serial encoding setting
7711 * parity FCS setting
7712 *
7713 * returns 0 if success, otherwise error code
7714 */
7715static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7716 unsigned short parity)
7717{
7718 struct mgsl_struct *info = dev_to_port(dev);
7719 unsigned char new_encoding;
7720 unsigned short new_crctype;
7721
7722 /* return error if TTY interface open */
7723 if (info->count)
7724 return -EBUSY;
7725
7726 switch (encoding)
7727 {
7728 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7729 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7730 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7731 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7732 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7733 default: return -EINVAL;
7734 }
7735
7736 switch (parity)
7737 {
7738 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7739 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7740 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7741 default: return -EINVAL;
7742 }
7743
7744 info->params.encoding = new_encoding;
Alexey Dobriyan53b35312006-03-24 03:16:13 -08007745 info->params.crc_type = new_crctype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007746
7747 /* if network interface up, reprogram hardware */
7748 if (info->netcount)
7749 mgsl_program_hw(info);
7750
7751 return 0;
7752}
7753
7754/**
7755 * called by generic HDLC layer to send frame
7756 *
7757 * skb socket buffer containing HDLC frame
7758 * dev pointer to network device structure
7759 *
7760 * returns 0 if success, otherwise error code
7761 */
7762static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7763{
7764 struct mgsl_struct *info = dev_to_port(dev);
7765 struct net_device_stats *stats = hdlc_stats(dev);
7766 unsigned long flags;
7767
7768 if (debug_level >= DEBUG_LEVEL_INFO)
7769 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7770
7771 /* stop sending until this frame completes */
7772 netif_stop_queue(dev);
7773
7774 /* copy data to device buffers */
7775 info->xmit_cnt = skb->len;
7776 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7777
7778 /* update network statistics */
7779 stats->tx_packets++;
7780 stats->tx_bytes += skb->len;
7781
7782 /* done with socket buffer, so free it */
7783 dev_kfree_skb(skb);
7784
7785 /* save start time for transmit timeout detection */
7786 dev->trans_start = jiffies;
7787
7788 /* start hardware transmitter if necessary */
7789 spin_lock_irqsave(&info->irq_spinlock,flags);
7790 if (!info->tx_active)
7791 usc_start_transmitter(info);
7792 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7793
7794 return 0;
7795}
7796
7797/**
7798 * called by network layer when interface enabled
7799 * claim resources and initialize hardware
7800 *
7801 * dev pointer to network device structure
7802 *
7803 * returns 0 if success, otherwise error code
7804 */
7805static int hdlcdev_open(struct net_device *dev)
7806{
7807 struct mgsl_struct *info = dev_to_port(dev);
7808 int rc;
7809 unsigned long flags;
7810
7811 if (debug_level >= DEBUG_LEVEL_INFO)
7812 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7813
7814 /* generic HDLC layer open processing */
7815 if ((rc = hdlc_open(dev)))
7816 return rc;
7817
7818 /* arbitrate between network and tty opens */
7819 spin_lock_irqsave(&info->netlock, flags);
7820 if (info->count != 0 || info->netcount != 0) {
7821 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7822 spin_unlock_irqrestore(&info->netlock, flags);
7823 return -EBUSY;
7824 }
7825 info->netcount=1;
7826 spin_unlock_irqrestore(&info->netlock, flags);
7827
7828 /* claim resources and init adapter */
7829 if ((rc = startup(info)) != 0) {
7830 spin_lock_irqsave(&info->netlock, flags);
7831 info->netcount=0;
7832 spin_unlock_irqrestore(&info->netlock, flags);
7833 return rc;
7834 }
7835
7836 /* assert DTR and RTS, apply hardware settings */
7837 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7838 mgsl_program_hw(info);
7839
7840 /* enable network layer transmit */
7841 dev->trans_start = jiffies;
7842 netif_start_queue(dev);
7843
7844 /* inform generic HDLC layer of current DCD status */
7845 spin_lock_irqsave(&info->irq_spinlock, flags);
7846 usc_get_serial_signals(info);
7847 spin_unlock_irqrestore(&info->irq_spinlock, flags);
Krzysztof Halasafbeff3c2006-07-21 14:44:55 -07007848 if (info->serial_signals & SerialSignal_DCD)
7849 netif_carrier_on(dev);
7850 else
7851 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007852 return 0;
7853}
7854
7855/**
7856 * called by network layer when interface is disabled
7857 * shutdown hardware and release resources
7858 *
7859 * dev pointer to network device structure
7860 *
7861 * returns 0 if success, otherwise error code
7862 */
7863static int hdlcdev_close(struct net_device *dev)
7864{
7865 struct mgsl_struct *info = dev_to_port(dev);
7866 unsigned long flags;
7867
7868 if (debug_level >= DEBUG_LEVEL_INFO)
7869 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7870
7871 netif_stop_queue(dev);
7872
7873 /* shutdown adapter and release resources */
7874 shutdown(info);
7875
7876 hdlc_close(dev);
7877
7878 spin_lock_irqsave(&info->netlock, flags);
7879 info->netcount=0;
7880 spin_unlock_irqrestore(&info->netlock, flags);
7881
7882 return 0;
7883}
7884
7885/**
7886 * called by network layer to process IOCTL call to network device
7887 *
7888 * dev pointer to network device structure
7889 * ifr pointer to network interface request structure
7890 * cmd IOCTL command code
7891 *
7892 * returns 0 if success, otherwise error code
7893 */
7894static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7895{
7896 const size_t size = sizeof(sync_serial_settings);
7897 sync_serial_settings new_line;
7898 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7899 struct mgsl_struct *info = dev_to_port(dev);
7900 unsigned int flags;
7901
7902 if (debug_level >= DEBUG_LEVEL_INFO)
7903 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7904
7905 /* return error if TTY interface open */
7906 if (info->count)
7907 return -EBUSY;
7908
7909 if (cmd != SIOCWANDEV)
7910 return hdlc_ioctl(dev, ifr, cmd);
7911
7912 switch(ifr->ifr_settings.type) {
7913 case IF_GET_IFACE: /* return current sync_serial_settings */
7914
7915 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7916 if (ifr->ifr_settings.size < size) {
7917 ifr->ifr_settings.size = size; /* data size wanted */
7918 return -ENOBUFS;
7919 }
7920
7921 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7922 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7923 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7924 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7925
7926 switch (flags){
7927 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7928 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7929 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7930 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7931 default: new_line.clock_type = CLOCK_DEFAULT;
7932 }
7933
7934 new_line.clock_rate = info->params.clock_speed;
7935 new_line.loopback = info->params.loopback ? 1:0;
7936
7937 if (copy_to_user(line, &new_line, size))
7938 return -EFAULT;
7939 return 0;
7940
7941 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7942
7943 if(!capable(CAP_NET_ADMIN))
7944 return -EPERM;
7945 if (copy_from_user(&new_line, line, size))
7946 return -EFAULT;
7947
7948 switch (new_line.clock_type)
7949 {
7950 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7951 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7952 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7953 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7954 case CLOCK_DEFAULT: flags = info->params.flags &
7955 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7956 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7957 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7958 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7959 default: return -EINVAL;
7960 }
7961
7962 if (new_line.loopback != 0 && new_line.loopback != 1)
7963 return -EINVAL;
7964
7965 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7966 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7967 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7968 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7969 info->params.flags |= flags;
7970
7971 info->params.loopback = new_line.loopback;
7972
7973 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7974 info->params.clock_speed = new_line.clock_rate;
7975 else
7976 info->params.clock_speed = 0;
7977
7978 /* if network interface up, reprogram hardware */
7979 if (info->netcount)
7980 mgsl_program_hw(info);
7981 return 0;
7982
7983 default:
7984 return hdlc_ioctl(dev, ifr, cmd);
7985 }
7986}
7987
7988/**
7989 * called by network layer when transmit timeout is detected
7990 *
7991 * dev pointer to network device structure
7992 */
7993static void hdlcdev_tx_timeout(struct net_device *dev)
7994{
7995 struct mgsl_struct *info = dev_to_port(dev);
7996 struct net_device_stats *stats = hdlc_stats(dev);
7997 unsigned long flags;
7998
7999 if (debug_level >= DEBUG_LEVEL_INFO)
8000 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
8001
8002 stats->tx_errors++;
8003 stats->tx_aborted_errors++;
8004
8005 spin_lock_irqsave(&info->irq_spinlock,flags);
8006 usc_stop_transmitter(info);
8007 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8008
8009 netif_wake_queue(dev);
8010}
8011
8012/**
8013 * called by device driver when transmit completes
8014 * reenable network layer transmit if stopped
8015 *
8016 * info pointer to device instance information
8017 */
8018static void hdlcdev_tx_done(struct mgsl_struct *info)
8019{
8020 if (netif_queue_stopped(info->netdev))
8021 netif_wake_queue(info->netdev);
8022}
8023
8024/**
8025 * called by device driver when frame received
8026 * pass frame to network layer
8027 *
8028 * info pointer to device instance information
8029 * buf pointer to buffer contianing frame data
8030 * size count of data bytes in buf
8031 */
8032static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8033{
8034 struct sk_buff *skb = dev_alloc_skb(size);
8035 struct net_device *dev = info->netdev;
8036 struct net_device_stats *stats = hdlc_stats(dev);
8037
8038 if (debug_level >= DEBUG_LEVEL_INFO)
8039 printk("hdlcdev_rx(%s)\n",dev->name);
8040
8041 if (skb == NULL) {
8042 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8043 stats->rx_dropped++;
8044 return;
8045 }
8046
8047 memcpy(skb_put(skb, size),buf,size);
8048
8049 skb->protocol = hdlc_type_trans(skb, info->netdev);
8050
8051 stats->rx_packets++;
8052 stats->rx_bytes += size;
8053
8054 netif_rx(skb);
8055
8056 info->netdev->last_rx = jiffies;
8057}
8058
8059/**
8060 * called by device driver when adding device instance
8061 * do generic HDLC initialization
8062 *
8063 * info pointer to device instance information
8064 *
8065 * returns 0 if success, otherwise error code
8066 */
8067static int hdlcdev_init(struct mgsl_struct *info)
8068{
8069 int rc;
8070 struct net_device *dev;
8071 hdlc_device *hdlc;
8072
8073 /* allocate and initialize network and HDLC layer objects */
8074
8075 if (!(dev = alloc_hdlcdev(info))) {
8076 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8077 return -ENOMEM;
8078 }
8079
8080 /* for network layer reporting purposes only */
8081 dev->base_addr = info->io_base;
8082 dev->irq = info->irq_level;
8083 dev->dma = info->dma_level;
8084
8085 /* network layer callbacks and settings */
8086 dev->do_ioctl = hdlcdev_ioctl;
8087 dev->open = hdlcdev_open;
8088 dev->stop = hdlcdev_close;
8089 dev->tx_timeout = hdlcdev_tx_timeout;
8090 dev->watchdog_timeo = 10*HZ;
8091 dev->tx_queue_len = 50;
8092
8093 /* generic HDLC layer callbacks and settings */
8094 hdlc = dev_to_hdlc(dev);
8095 hdlc->attach = hdlcdev_attach;
8096 hdlc->xmit = hdlcdev_xmit;
8097
8098 /* register objects with HDLC layer */
8099 if ((rc = register_hdlc_device(dev))) {
8100 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8101 free_netdev(dev);
8102 return rc;
8103 }
8104
8105 info->netdev = dev;
8106 return 0;
8107}
8108
8109/**
8110 * called by device driver when removing device instance
8111 * do generic HDLC cleanup
8112 *
8113 * info pointer to device instance information
8114 */
8115static void hdlcdev_exit(struct mgsl_struct *info)
8116{
8117 unregister_hdlc_device(info->netdev);
8118 free_netdev(info->netdev);
8119 info->netdev = NULL;
8120}
8121
8122#endif /* CONFIG_HDLC */
8123
8124
8125static int __devinit synclink_init_one (struct pci_dev *dev,
8126 const struct pci_device_id *ent)
8127{
8128 struct mgsl_struct *info;
8129
8130 if (pci_enable_device(dev)) {
8131 printk("error enabling pci device %p\n", dev);
8132 return -EIO;
8133 }
8134
8135 if (!(info = mgsl_allocate_device())) {
8136 printk("can't allocate device instance data.\n");
8137 return -EIO;
8138 }
8139
8140 /* Copy user configuration info to device instance data */
8141
8142 info->io_base = pci_resource_start(dev, 2);
8143 info->irq_level = dev->irq;
8144 info->phys_memory_base = pci_resource_start(dev, 3);
8145
8146 /* Because veremap only works on page boundaries we must map
8147 * a larger area than is actually implemented for the LCR
8148 * memory range. We map a full page starting at the page boundary.
8149 */
8150 info->phys_lcr_base = pci_resource_start(dev, 0);
8151 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8152 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8153
8154 info->bus_type = MGSL_BUS_TYPE_PCI;
8155 info->io_addr_size = 8;
Thomas Gleixner0f2ed4c2006-07-01 19:29:33 -07008156 info->irq_flags = IRQF_SHARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008157
8158 if (dev->device == 0x0210) {
8159 /* Version 1 PCI9030 based universal PCI adapter */
8160 info->misc_ctrl_value = 0x007c4080;
8161 info->hw_version = 1;
8162 } else {
8163 /* Version 0 PCI9050 based 5V PCI adapter
8164 * A PCI9050 bug prevents reading LCR registers if
8165 * LCR base address bit 7 is set. Maintain shadow
8166 * value so we can write to LCR misc control reg.
8167 */
8168 info->misc_ctrl_value = 0x087e4546;
8169 info->hw_version = 0;
8170 }
8171
8172 mgsl_add_device(info);
8173
8174 return 0;
8175}
8176
8177static void __devexit synclink_remove_one (struct pci_dev *dev)
8178{
8179}
8180