blob: 11999784383e7a412eefa63f7ef812236a967a86 [file] [log] [blame]
Paul Fulghum705b6c72006-01-08 01:02:06 -08001/*
Paul Fulghum705b6c72006-01-08 01:02:06 -08002 * Device driver for Microgate SyncLink GT serial adapters.
3 *
4 * written by Paul Fulghum for Microgate Corporation
5 * paulkf@microgate.com
6 *
7 * Microgate and SyncLink are trademarks of Microgate Corporation
8 *
9 * This code is released under the GNU General Public License (GPL)
10 *
11 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
12 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
13 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
15 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
16 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
18 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
19 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
21 * OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23
24/*
25 * DEBUG OUTPUT DEFINITIONS
26 *
27 * uncomment lines below to enable specific types of debug output
28 *
29 * DBGINFO information - most verbose output
30 * DBGERR serious errors
31 * DBGBH bottom half service routine debugging
32 * DBGISR interrupt service routine debugging
33 * DBGDATA output receive and transmit data
34 * DBGTBUF output transmit DMA buffers and registers
35 * DBGRBUF output receive DMA buffers and registers
36 */
37
38#define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
39#define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
40#define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
41#define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
42#define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
Alan Coxf6025012010-06-01 22:52:46 +020043/*#define DBGTBUF(info) dump_tbufs(info)*/
44/*#define DBGRBUF(info) dump_rbufs(info)*/
Paul Fulghum705b6c72006-01-08 01:02:06 -080045
46
Paul Fulghum705b6c72006-01-08 01:02:06 -080047#include <linux/module.h>
Paul Fulghum705b6c72006-01-08 01:02:06 -080048#include <linux/errno.h>
49#include <linux/signal.h>
50#include <linux/sched.h>
51#include <linux/timer.h>
52#include <linux/interrupt.h>
53#include <linux/pci.h>
54#include <linux/tty.h>
55#include <linux/tty_flip.h>
56#include <linux/serial.h>
57#include <linux/major.h>
58#include <linux/string.h>
59#include <linux/fcntl.h>
60#include <linux/ptrace.h>
61#include <linux/ioport.h>
62#include <linux/mm.h>
Alexey Dobriyana18c56e2009-03-31 15:19:19 -070063#include <linux/seq_file.h>
Paul Fulghum705b6c72006-01-08 01:02:06 -080064#include <linux/slab.h>
65#include <linux/netdevice.h>
66#include <linux/vmalloc.h>
67#include <linux/init.h>
68#include <linux/delay.h>
69#include <linux/ioctl.h>
70#include <linux/termios.h>
71#include <linux/bitops.h>
72#include <linux/workqueue.h>
73#include <linux/hdlc.h>
Robert P. J. Day3dd12472008-02-06 01:37:17 -080074#include <linux/synclink.h>
Paul Fulghum705b6c72006-01-08 01:02:06 -080075
Paul Fulghum705b6c72006-01-08 01:02:06 -080076#include <asm/system.h>
77#include <asm/io.h>
78#include <asm/irq.h>
79#include <asm/dma.h>
80#include <asm/types.h>
81#include <asm/uaccess.h>
82
Paul Fulghumaf69c7f2006-12-06 20:40:24 -080083#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
84#define SYNCLINK_GENERIC_HDLC 1
85#else
86#define SYNCLINK_GENERIC_HDLC 0
Paul Fulghum705b6c72006-01-08 01:02:06 -080087#endif
88
89/*
90 * module identification
91 */
92static char *driver_name = "SyncLink GT";
Paul Fulghum705b6c72006-01-08 01:02:06 -080093static char *tty_driver_name = "synclink_gt";
94static char *tty_dev_prefix = "ttySLG";
95MODULE_LICENSE("GPL");
96#define MGSL_MAGIC 0x5401
Paul Fulghuma077c1a2006-09-30 23:27:46 -070097#define MAX_DEVICES 32
Paul Fulghum705b6c72006-01-08 01:02:06 -080098
99static struct pci_device_id pci_table[] = {
100 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
Paul Fulghum6f84be82006-06-25 05:49:22 -0700101 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
Paul Fulghum705b6c72006-01-08 01:02:06 -0800102 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
103 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
104 {0,}, /* terminate list */
105};
106MODULE_DEVICE_TABLE(pci, pci_table);
107
108static int init_one(struct pci_dev *dev,const struct pci_device_id *ent);
109static void remove_one(struct pci_dev *dev);
110static struct pci_driver pci_driver = {
111 .name = "synclink_gt",
112 .id_table = pci_table,
113 .probe = init_one,
114 .remove = __devexit_p(remove_one),
115};
116
Joe Perches0fab6de2008-04-28 02:14:02 -0700117static bool pci_registered;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800118
119/*
120 * module configuration and status
121 */
122static struct slgt_info *slgt_device_list;
123static int slgt_device_count;
124
125static int ttymajor;
126static int debug_level;
127static int maxframe[MAX_DEVICES];
Paul Fulghum705b6c72006-01-08 01:02:06 -0800128
129module_param(ttymajor, int, 0);
130module_param(debug_level, int, 0);
131module_param_array(maxframe, int, NULL, 0);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800132
133MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
134MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
135MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
Paul Fulghum705b6c72006-01-08 01:02:06 -0800136
137/*
138 * tty support and callbacks
139 */
Paul Fulghum705b6c72006-01-08 01:02:06 -0800140static struct tty_driver *serial_driver;
141
142static int open(struct tty_struct *tty, struct file * filp);
143static void close(struct tty_struct *tty, struct file * filp);
144static void hangup(struct tty_struct *tty);
Alan Cox606d0992006-12-08 02:38:45 -0800145static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800146
147static int write(struct tty_struct *tty, const unsigned char *buf, int count);
Alan Cox55da7782008-04-30 00:54:07 -0700148static int put_char(struct tty_struct *tty, unsigned char ch);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800149static void send_xchar(struct tty_struct *tty, char ch);
150static void wait_until_sent(struct tty_struct *tty, int timeout);
151static int write_room(struct tty_struct *tty);
152static void flush_chars(struct tty_struct *tty);
153static void flush_buffer(struct tty_struct *tty);
154static void tx_hold(struct tty_struct *tty);
155static void tx_release(struct tty_struct *tty);
156
157static int ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800158static int chars_in_buffer(struct tty_struct *tty);
159static void throttle(struct tty_struct * tty);
160static void unthrottle(struct tty_struct * tty);
Alan Cox9e989662008-07-22 11:18:03 +0100161static int set_break(struct tty_struct *tty, int break_state);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800162
163/*
164 * generic HDLC support and callbacks
165 */
Paul Fulghumaf69c7f2006-12-06 20:40:24 -0800166#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -0800167#define dev_to_port(D) (dev_to_hdlc(D)->priv)
168static void hdlcdev_tx_done(struct slgt_info *info);
169static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
170static int hdlcdev_init(struct slgt_info *info);
171static void hdlcdev_exit(struct slgt_info *info);
172#endif
173
174
175/*
176 * device specific structures, macros and functions
177 */
178
179#define SLGT_MAX_PORTS 4
180#define SLGT_REG_SIZE 256
181
182/*
Paul Fulghum0080b7a2006-03-28 01:56:15 -0800183 * conditional wait facility
184 */
185struct cond_wait {
186 struct cond_wait *next;
187 wait_queue_head_t q;
188 wait_queue_t wait;
189 unsigned int data;
190};
191static void init_cond_wait(struct cond_wait *w, unsigned int data);
192static void add_cond_wait(struct cond_wait **head, struct cond_wait *w);
193static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w);
194static void flush_cond_wait(struct cond_wait **head);
195
196/*
Paul Fulghum705b6c72006-01-08 01:02:06 -0800197 * DMA buffer descriptor and access macros
198 */
199struct slgt_desc
200{
Al Viro51ef9c52007-10-14 19:34:30 +0100201 __le16 count;
202 __le16 status;
203 __le32 pbuf; /* physical address of data buffer */
204 __le32 next; /* physical address of next descriptor */
Paul Fulghum705b6c72006-01-08 01:02:06 -0800205
206 /* driver book keeping */
207 char *buf; /* virtual address of data buffer */
208 unsigned int pdesc; /* physical address of this descriptor */
209 dma_addr_t buf_dma_addr;
Paul Fulghum403214d2008-07-22 11:21:55 +0100210 unsigned short buf_count;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800211};
212
213#define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
214#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
215#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
216#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +0100217#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
Paul Fulghum705b6c72006-01-08 01:02:06 -0800218#define desc_count(a) (le16_to_cpu((a).count))
219#define desc_status(a) (le16_to_cpu((a).status))
220#define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
221#define desc_eof(a) (le16_to_cpu((a).status) & BIT2)
222#define desc_crc_error(a) (le16_to_cpu((a).status) & BIT1)
223#define desc_abort(a) (le16_to_cpu((a).status) & BIT0)
224#define desc_residue(a) ((le16_to_cpu((a).status) & 0x38) >> 3)
225
226struct _input_signal_events {
227 int ri_up;
228 int ri_down;
229 int dsr_up;
230 int dsr_down;
231 int dcd_up;
232 int dcd_down;
233 int cts_up;
234 int cts_down;
235};
236
237/*
238 * device instance data structure
239 */
240struct slgt_info {
241 void *if_ptr; /* General purpose pointer (used by SPPP) */
Alan Cox8fb06c72008-07-16 21:56:46 +0100242 struct tty_port port;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800243
244 struct slgt_info *next_device; /* device list link */
245
246 int magic;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800247
248 char device_name[25];
249 struct pci_dev *pdev;
250
251 int port_count; /* count of ports on adapter */
252 int adapter_num; /* adapter instance number */
253 int port_num; /* port instance number */
254
255 /* array of pointers to port contexts on this adapter */
256 struct slgt_info *port_array[SLGT_MAX_PORTS];
257
Paul Fulghum705b6c72006-01-08 01:02:06 -0800258 int line; /* tty line instance number */
Paul Fulghum705b6c72006-01-08 01:02:06 -0800259
260 struct mgsl_icount icount;
261
Paul Fulghum705b6c72006-01-08 01:02:06 -0800262 int timeout;
263 int x_char; /* xon/xoff character */
Paul Fulghum705b6c72006-01-08 01:02:06 -0800264 unsigned int read_status_mask;
265 unsigned int ignore_status_mask;
266
Paul Fulghum705b6c72006-01-08 01:02:06 -0800267 wait_queue_head_t status_event_wait_q;
268 wait_queue_head_t event_wait_q;
269 struct timer_list tx_timer;
270 struct timer_list rx_timer;
271
Paul Fulghum0080b7a2006-03-28 01:56:15 -0800272 unsigned int gpio_present;
273 struct cond_wait *gpio_wait_q;
274
Paul Fulghum705b6c72006-01-08 01:02:06 -0800275 spinlock_t lock; /* spinlock for synchronizing with ISR */
276
277 struct work_struct task;
278 u32 pending_bh;
Joe Perches0fab6de2008-04-28 02:14:02 -0700279 bool bh_requested;
280 bool bh_running;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800281
282 int isr_overflow;
Joe Perches0fab6de2008-04-28 02:14:02 -0700283 bool irq_requested; /* true if IRQ requested */
284 bool irq_occurred; /* for diagnostics use */
Paul Fulghum705b6c72006-01-08 01:02:06 -0800285
286 /* device configuration */
287
288 unsigned int bus_type;
289 unsigned int irq_level;
290 unsigned long irq_flags;
291
292 unsigned char __iomem * reg_addr; /* memory mapped registers address */
293 u32 phys_reg_addr;
Joe Perches0fab6de2008-04-28 02:14:02 -0700294 bool reg_addr_requested;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800295
296 MGSL_PARAMS params; /* communications parameters */
297 u32 idle_mode;
298 u32 max_frame_size; /* as set by device config */
299
Paul Fulghum814dae02008-07-22 11:22:14 +0100300 unsigned int rbuf_fill_level;
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +0100301 unsigned int rx_pio;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800302 unsigned int if_mode;
Paul Fulghum1f807692009-04-02 16:58:30 -0700303 unsigned int base_clock;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800304
305 /* device status */
306
Joe Perches0fab6de2008-04-28 02:14:02 -0700307 bool rx_enabled;
308 bool rx_restart;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800309
Joe Perches0fab6de2008-04-28 02:14:02 -0700310 bool tx_enabled;
311 bool tx_active;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800312
313 unsigned char signals; /* serial signal states */
Darren Jenkins2641dfd2006-02-28 16:59:20 -0800314 int init_error; /* initialization error */
Paul Fulghum705b6c72006-01-08 01:02:06 -0800315
316 unsigned char *tx_buf;
317 int tx_count;
318
319 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
320 char char_buf[MAX_ASYNC_BUFFER_SIZE];
Joe Perches0fab6de2008-04-28 02:14:02 -0700321 bool drop_rts_on_tx_done;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800322 struct _input_signal_events input_signal_events;
323
324 int dcd_chkcount; /* check counts to prevent */
325 int cts_chkcount; /* too many IRQs if a signal */
326 int dsr_chkcount; /* is floating */
327 int ri_chkcount;
328
329 char *bufs; /* virtual address of DMA buffer lists */
330 dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
331
332 unsigned int rbuf_count;
333 struct slgt_desc *rbufs;
334 unsigned int rbuf_current;
335 unsigned int rbuf_index;
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +0100336 unsigned int rbuf_fill_index;
337 unsigned short rbuf_fill_count;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800338
339 unsigned int tbuf_count;
340 struct slgt_desc *tbufs;
341 unsigned int tbuf_current;
342 unsigned int tbuf_start;
343
344 unsigned char *tmp_rbuf;
345 unsigned int tmp_rbuf_count;
346
347 /* SPPP/Cisco HDLC device parts */
348
349 int netcount;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800350 spinlock_t netlock;
Paul Fulghumaf69c7f2006-12-06 20:40:24 -0800351#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -0800352 struct net_device *netdev;
353#endif
354
355};
356
357static MGSL_PARAMS default_params = {
358 .mode = MGSL_MODE_HDLC,
359 .loopback = 0,
360 .flags = HDLC_FLAG_UNDERRUN_ABORT15,
361 .encoding = HDLC_ENCODING_NRZI_SPACE,
362 .clock_speed = 0,
363 .addr_filter = 0xff,
364 .crc_type = HDLC_CRC_16_CCITT,
365 .preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
366 .preamble = HDLC_PREAMBLE_PATTERN_NONE,
367 .data_rate = 9600,
368 .data_bits = 8,
369 .stop_bits = 1,
370 .parity = ASYNC_PARITY_NONE
371};
372
373
374#define BH_RECEIVE 1
375#define BH_TRANSMIT 2
376#define BH_STATUS 4
377#define IO_PIN_SHUTDOWN_LIMIT 100
378
379#define DMABUFSIZE 256
380#define DESC_LIST_SIZE 4096
381
382#define MASK_PARITY BIT1
Paul Fulghum202af6d2006-08-31 21:27:36 -0700383#define MASK_FRAMING BIT0
384#define MASK_BREAK BIT14
Paul Fulghum705b6c72006-01-08 01:02:06 -0800385#define MASK_OVERRUN BIT4
386
387#define GSR 0x00 /* global status */
Paul Fulghum0080b7a2006-03-28 01:56:15 -0800388#define JCR 0x04 /* JTAG control */
389#define IODR 0x08 /* GPIO direction */
390#define IOER 0x0c /* GPIO interrupt enable */
391#define IOVR 0x10 /* GPIO value */
392#define IOSR 0x14 /* GPIO interrupt status */
Paul Fulghum705b6c72006-01-08 01:02:06 -0800393#define TDR 0x80 /* tx data */
394#define RDR 0x80 /* rx data */
395#define TCR 0x82 /* tx control */
396#define TIR 0x84 /* tx idle */
397#define TPR 0x85 /* tx preamble */
398#define RCR 0x86 /* rx control */
399#define VCR 0x88 /* V.24 control */
400#define CCR 0x89 /* clock control */
401#define BDR 0x8a /* baud divisor */
402#define SCR 0x8c /* serial control */
403#define SSR 0x8e /* serial status */
404#define RDCSR 0x90 /* rx DMA control/status */
405#define TDCSR 0x94 /* tx DMA control/status */
406#define RDDAR 0x98 /* rx DMA descriptor address */
407#define TDDAR 0x9c /* tx DMA descriptor address */
408
409#define RXIDLE BIT14
410#define RXBREAK BIT14
411#define IRQ_TXDATA BIT13
412#define IRQ_TXIDLE BIT12
413#define IRQ_TXUNDER BIT11 /* HDLC */
414#define IRQ_RXDATA BIT10
415#define IRQ_RXIDLE BIT9 /* HDLC */
416#define IRQ_RXBREAK BIT9 /* async */
417#define IRQ_RXOVER BIT8
418#define IRQ_DSR BIT7
419#define IRQ_CTS BIT6
420#define IRQ_DCD BIT5
421#define IRQ_RI BIT4
422#define IRQ_ALL 0x3ff0
423#define IRQ_MASTER BIT0
424
425#define slgt_irq_on(info, mask) \
426 wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
427#define slgt_irq_off(info, mask) \
428 wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
429
430static __u8 rd_reg8(struct slgt_info *info, unsigned int addr);
431static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
432static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
433static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
434static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
435static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
436
437static void msc_set_vcr(struct slgt_info *info);
438
439static int startup(struct slgt_info *info);
440static int block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
441static void shutdown(struct slgt_info *info);
442static void program_hw(struct slgt_info *info);
443static void change_params(struct slgt_info *info);
444
445static int register_test(struct slgt_info *info);
446static int irq_test(struct slgt_info *info);
447static int loopback_test(struct slgt_info *info);
448static int adapter_test(struct slgt_info *info);
449
450static void reset_adapter(struct slgt_info *info);
451static void reset_port(struct slgt_info *info);
452static void async_mode(struct slgt_info *info);
Paul Fulghumcb10dc92006-09-30 23:27:45 -0700453static void sync_mode(struct slgt_info *info);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800454
455static void rx_stop(struct slgt_info *info);
456static void rx_start(struct slgt_info *info);
457static void reset_rbufs(struct slgt_info *info);
458static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
459static void rdma_reset(struct slgt_info *info);
Joe Perches0fab6de2008-04-28 02:14:02 -0700460static bool rx_get_frame(struct slgt_info *info);
461static bool rx_get_buf(struct slgt_info *info);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800462
463static void tx_start(struct slgt_info *info);
464static void tx_stop(struct slgt_info *info);
465static void tx_set_idle(struct slgt_info *info);
466static unsigned int free_tbuf_count(struct slgt_info *info);
Paul Fulghum403214d2008-07-22 11:21:55 +0100467static unsigned int tbuf_bytes(struct slgt_info *info);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800468static void reset_tbufs(struct slgt_info *info);
469static void tdma_reset(struct slgt_info *info);
Paul Fulghumde538eb2009-12-09 12:31:39 -0800470static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800471
472static void get_signals(struct slgt_info *info);
473static void set_signals(struct slgt_info *info);
474static void enable_loopback(struct slgt_info *info);
475static void set_rate(struct slgt_info *info, u32 data_rate);
476
477static int bh_action(struct slgt_info *info);
David Howellsc4028952006-11-22 14:57:56 +0000478static void bh_handler(struct work_struct *work);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800479static void bh_transmit(struct slgt_info *info);
480static void isr_serial(struct slgt_info *info);
481static void isr_rdma(struct slgt_info *info);
482static void isr_txeom(struct slgt_info *info, unsigned short status);
483static void isr_tdma(struct slgt_info *info);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800484
485static int alloc_dma_bufs(struct slgt_info *info);
486static void free_dma_bufs(struct slgt_info *info);
487static int alloc_desc(struct slgt_info *info);
488static void free_desc(struct slgt_info *info);
489static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
490static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
491
492static int alloc_tmp_rbuf(struct slgt_info *info);
493static void free_tmp_rbuf(struct slgt_info *info);
494
495static void tx_timeout(unsigned long context);
496static void rx_timeout(unsigned long context);
497
498/*
499 * ioctl handlers
500 */
501static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
502static int get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
503static int set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
504static int get_txidle(struct slgt_info *info, int __user *idle_mode);
505static int set_txidle(struct slgt_info *info, int idle_mode);
506static int tx_enable(struct slgt_info *info, int enable);
507static int tx_abort(struct slgt_info *info);
508static int rx_enable(struct slgt_info *info, int enable);
509static int modem_input_wait(struct slgt_info *info,int arg);
510static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
511static int tiocmget(struct tty_struct *tty, struct file *file);
512static int tiocmset(struct tty_struct *tty, struct file *file,
513 unsigned int set, unsigned int clear);
Alan Cox9e989662008-07-22 11:18:03 +0100514static int set_break(struct tty_struct *tty, int break_state);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800515static int get_interface(struct slgt_info *info, int __user *if_mode);
516static int set_interface(struct slgt_info *info, int if_mode);
Paul Fulghum0080b7a2006-03-28 01:56:15 -0800517static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
518static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
519static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800520
521/*
522 * driver functions
523 */
524static void add_device(struct slgt_info *info);
525static void device_init(int adapter_num, struct pci_dev *pdev);
526static int claim_resources(struct slgt_info *info);
527static void release_resources(struct slgt_info *info);
528
529/*
530 * DEBUG OUTPUT CODE
531 */
532#ifndef DBGINFO
533#define DBGINFO(fmt)
534#endif
535#ifndef DBGERR
536#define DBGERR(fmt)
537#endif
538#ifndef DBGBH
539#define DBGBH(fmt)
540#endif
541#ifndef DBGISR
542#define DBGISR(fmt)
543#endif
544
545#ifdef DBGDATA
546static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
547{
548 int i;
549 int linecount;
550 printk("%s %s data:\n",info->device_name, label);
551 while(count) {
552 linecount = (count > 16) ? 16 : count;
553 for(i=0; i < linecount; i++)
554 printk("%02X ",(unsigned char)data[i]);
555 for(;i<17;i++)
556 printk(" ");
557 for(i=0;i<linecount;i++) {
558 if (data[i]>=040 && data[i]<=0176)
559 printk("%c",data[i]);
560 else
561 printk(".");
562 }
563 printk("\n");
564 data += linecount;
565 count -= linecount;
566 }
567}
568#else
569#define DBGDATA(info, buf, size, label)
570#endif
571
572#ifdef DBGTBUF
573static void dump_tbufs(struct slgt_info *info)
574{
575 int i;
576 printk("tbuf_current=%d\n", info->tbuf_current);
577 for (i=0 ; i < info->tbuf_count ; i++) {
578 printk("%d: count=%04X status=%04X\n",
579 i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
580 }
581}
582#else
583#define DBGTBUF(info)
584#endif
585
586#ifdef DBGRBUF
587static void dump_rbufs(struct slgt_info *info)
588{
589 int i;
590 printk("rbuf_current=%d\n", info->rbuf_current);
591 for (i=0 ; i < info->rbuf_count ; i++) {
592 printk("%d: count=%04X status=%04X\n",
593 i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
594 }
595}
596#else
597#define DBGRBUF(info)
598#endif
599
600static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
601{
602#ifdef SANITY_CHECK
603 if (!info) {
604 printk("null struct slgt_info for (%s) in %s\n", devname, name);
605 return 1;
606 }
607 if (info->magic != MGSL_MAGIC) {
608 printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
609 return 1;
610 }
611#else
612 if (!info)
613 return 1;
614#endif
615 return 0;
616}
617
618/**
619 * line discipline callback wrappers
620 *
621 * The wrappers maintain line discipline references
622 * while calling into the line discipline.
623 *
624 * ldisc_receive_buf - pass receive data to line discipline
625 */
626static void ldisc_receive_buf(struct tty_struct *tty,
627 const __u8 *data, char *flags, int count)
628{
629 struct tty_ldisc *ld;
630 if (!tty)
631 return;
632 ld = tty_ldisc_ref(tty);
633 if (ld) {
Alan Coxa352def2008-07-16 21:53:12 +0100634 if (ld->ops->receive_buf)
635 ld->ops->receive_buf(tty, data, flags, count);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800636 tty_ldisc_deref(ld);
637 }
638}
639
640/* tty callbacks */
641
642static int open(struct tty_struct *tty, struct file *filp)
643{
644 struct slgt_info *info;
645 int retval, line;
646 unsigned long flags;
647
648 line = tty->index;
649 if ((line < 0) || (line >= slgt_device_count)) {
650 DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
651 return -ENODEV;
652 }
653
654 info = slgt_device_list;
655 while(info && info->line != line)
656 info = info->next_device;
657 if (sanity_check(info, tty->name, "open"))
658 return -ENODEV;
659 if (info->init_error) {
660 DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
661 return -ENODEV;
662 }
663
664 tty->driver_data = info;
Alan Cox8fb06c72008-07-16 21:56:46 +0100665 info->port.tty = tty;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800666
Alan Cox8fb06c72008-07-16 21:56:46 +0100667 DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
Paul Fulghum705b6c72006-01-08 01:02:06 -0800668
669 /* If port is closing, signal caller to try again */
Alan Cox8fb06c72008-07-16 21:56:46 +0100670 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
671 if (info->port.flags & ASYNC_CLOSING)
672 interruptible_sleep_on(&info->port.close_wait);
673 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
Paul Fulghum705b6c72006-01-08 01:02:06 -0800674 -EAGAIN : -ERESTARTSYS);
675 goto cleanup;
676 }
677
Alan Coxa360fae2010-06-01 22:52:50 +0200678 mutex_lock(&info->port.mutex);
Alan Cox8fb06c72008-07-16 21:56:46 +0100679 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800680
681 spin_lock_irqsave(&info->netlock, flags);
682 if (info->netcount) {
683 retval = -EBUSY;
684 spin_unlock_irqrestore(&info->netlock, flags);
Alan Coxa360fae2010-06-01 22:52:50 +0200685 mutex_unlock(&info->port.mutex);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800686 goto cleanup;
687 }
Alan Cox8fb06c72008-07-16 21:56:46 +0100688 info->port.count++;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800689 spin_unlock_irqrestore(&info->netlock, flags);
690
Alan Cox8fb06c72008-07-16 21:56:46 +0100691 if (info->port.count == 1) {
Paul Fulghum705b6c72006-01-08 01:02:06 -0800692 /* 1st open on this device, init hardware */
693 retval = startup(info);
Dan Carpenter80d04f22010-08-11 20:01:46 +0200694 if (retval < 0) {
695 mutex_unlock(&info->port.mutex);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800696 goto cleanup;
Dan Carpenter80d04f22010-08-11 20:01:46 +0200697 }
Paul Fulghum705b6c72006-01-08 01:02:06 -0800698 }
Alan Coxa360fae2010-06-01 22:52:50 +0200699 mutex_unlock(&info->port.mutex);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800700 retval = block_til_ready(tty, filp, info);
701 if (retval) {
702 DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
703 goto cleanup;
704 }
705
706 retval = 0;
707
708cleanup:
709 if (retval) {
710 if (tty->count == 1)
Alan Cox8fb06c72008-07-16 21:56:46 +0100711 info->port.tty = NULL; /* tty layer will release tty struct */
712 if(info->port.count)
713 info->port.count--;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800714 }
715
716 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
717 return retval;
718}
719
720static void close(struct tty_struct *tty, struct file *filp)
721{
722 struct slgt_info *info = tty->driver_data;
723
724 if (sanity_check(info, tty->name, "close"))
725 return;
Alan Cox8fb06c72008-07-16 21:56:46 +0100726 DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
Paul Fulghum705b6c72006-01-08 01:02:06 -0800727
Alan Coxa6614992009-01-02 13:46:50 +0000728 if (tty_port_close_start(&info->port, tty, filp) == 0)
Paul Fulghum705b6c72006-01-08 01:02:06 -0800729 goto cleanup;
730
Alan Coxa360fae2010-06-01 22:52:50 +0200731 mutex_lock(&info->port.mutex);
Alan Cox8fb06c72008-07-16 21:56:46 +0100732 if (info->port.flags & ASYNC_INITIALIZED)
Paul Fulghum705b6c72006-01-08 01:02:06 -0800733 wait_until_sent(tty, info->timeout);
Alan Cox978e5952008-04-30 00:53:59 -0700734 flush_buffer(tty);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800735 tty_ldisc_flush(tty);
736
737 shutdown(info);
Alan Coxa360fae2010-06-01 22:52:50 +0200738 mutex_unlock(&info->port.mutex);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800739
Alan Coxa6614992009-01-02 13:46:50 +0000740 tty_port_close_end(&info->port, tty);
Alan Cox8fb06c72008-07-16 21:56:46 +0100741 info->port.tty = NULL;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800742cleanup:
Alan Cox8fb06c72008-07-16 21:56:46 +0100743 DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
Paul Fulghum705b6c72006-01-08 01:02:06 -0800744}
745
746static void hangup(struct tty_struct *tty)
747{
748 struct slgt_info *info = tty->driver_data;
Alan Coxa360fae2010-06-01 22:52:50 +0200749 unsigned long flags;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800750
751 if (sanity_check(info, tty->name, "hangup"))
752 return;
753 DBGINFO(("%s hangup\n", info->device_name));
754
755 flush_buffer(tty);
Alan Coxa360fae2010-06-01 22:52:50 +0200756
757 mutex_lock(&info->port.mutex);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800758 shutdown(info);
759
Alan Coxa360fae2010-06-01 22:52:50 +0200760 spin_lock_irqsave(&info->port.lock, flags);
Alan Cox8fb06c72008-07-16 21:56:46 +0100761 info->port.count = 0;
762 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
763 info->port.tty = NULL;
Alan Coxa360fae2010-06-01 22:52:50 +0200764 spin_unlock_irqrestore(&info->port.lock, flags);
765 mutex_unlock(&info->port.mutex);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800766
Alan Cox8fb06c72008-07-16 21:56:46 +0100767 wake_up_interruptible(&info->port.open_wait);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800768}
769
Alan Cox606d0992006-12-08 02:38:45 -0800770static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
Paul Fulghum705b6c72006-01-08 01:02:06 -0800771{
772 struct slgt_info *info = tty->driver_data;
773 unsigned long flags;
774
775 DBGINFO(("%s set_termios\n", tty->driver->name));
776
Paul Fulghum705b6c72006-01-08 01:02:06 -0800777 change_params(info);
778
779 /* Handle transition to B0 status */
780 if (old_termios->c_cflag & CBAUD &&
781 !(tty->termios->c_cflag & CBAUD)) {
782 info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
783 spin_lock_irqsave(&info->lock,flags);
784 set_signals(info);
785 spin_unlock_irqrestore(&info->lock,flags);
786 }
787
788 /* Handle transition away from B0 status */
789 if (!(old_termios->c_cflag & CBAUD) &&
790 tty->termios->c_cflag & CBAUD) {
791 info->signals |= SerialSignal_DTR;
792 if (!(tty->termios->c_cflag & CRTSCTS) ||
793 !test_bit(TTY_THROTTLED, &tty->flags)) {
794 info->signals |= SerialSignal_RTS;
795 }
796 spin_lock_irqsave(&info->lock,flags);
797 set_signals(info);
798 spin_unlock_irqrestore(&info->lock,flags);
799 }
800
801 /* Handle turning off CRTSCTS */
802 if (old_termios->c_cflag & CRTSCTS &&
803 !(tty->termios->c_cflag & CRTSCTS)) {
804 tty->hw_stopped = 0;
805 tx_release(tty);
806 }
807}
808
Paul Fulghumce892942009-06-24 18:34:51 +0100809static void update_tx_timer(struct slgt_info *info)
810{
811 /*
812 * use worst case speed of 1200bps to calculate transmit timeout
813 * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
814 */
815 if (info->params.mode == MGSL_MODE_HDLC) {
816 int timeout = (tbuf_bytes(info) * 7) + 1000;
817 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
818 }
819}
820
Paul Fulghum705b6c72006-01-08 01:02:06 -0800821static int write(struct tty_struct *tty,
822 const unsigned char *buf, int count)
823{
824 int ret = 0;
825 struct slgt_info *info = tty->driver_data;
826 unsigned long flags;
827
828 if (sanity_check(info, tty->name, "write"))
Paul Fulghumde538eb2009-12-09 12:31:39 -0800829 return -EIO;
830
Paul Fulghum705b6c72006-01-08 01:02:06 -0800831 DBGINFO(("%s write count=%d\n", info->device_name, count));
832
Paul Fulghumde538eb2009-12-09 12:31:39 -0800833 if (!info->tx_buf || (count > info->max_frame_size))
834 return -EIO;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800835
Paul Fulghumde538eb2009-12-09 12:31:39 -0800836 if (!count || tty->stopped || tty->hw_stopped)
837 return 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800838
Paul Fulghumde538eb2009-12-09 12:31:39 -0800839 spin_lock_irqsave(&info->lock, flags);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800840
Paul Fulghumde538eb2009-12-09 12:31:39 -0800841 if (info->tx_count) {
Paul Fulghum8a38c282008-07-22 11:21:28 +0100842 /* send accumulated data from send_char() */
Paul Fulghumde538eb2009-12-09 12:31:39 -0800843 if (!tx_load(info, info->tx_buf, info->tx_count))
844 goto cleanup;
845 info->tx_count = 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800846 }
847
Paul Fulghumde538eb2009-12-09 12:31:39 -0800848 if (tx_load(info, buf, count))
849 ret = count;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800850
851cleanup:
Paul Fulghumde538eb2009-12-09 12:31:39 -0800852 spin_unlock_irqrestore(&info->lock, flags);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800853 DBGINFO(("%s write rc=%d\n", info->device_name, ret));
854 return ret;
855}
856
Alan Cox55da7782008-04-30 00:54:07 -0700857static int put_char(struct tty_struct *tty, unsigned char ch)
Paul Fulghum705b6c72006-01-08 01:02:06 -0800858{
859 struct slgt_info *info = tty->driver_data;
860 unsigned long flags;
Andrew Morton6c82c412008-05-12 14:02:34 -0700861 int ret = 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800862
863 if (sanity_check(info, tty->name, "put_char"))
Alan Cox55da7782008-04-30 00:54:07 -0700864 return 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800865 DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
Eric Sesterhenn326f28e92006-06-25 05:48:48 -0700866 if (!info->tx_buf)
Alan Cox55da7782008-04-30 00:54:07 -0700867 return 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800868 spin_lock_irqsave(&info->lock,flags);
Paul Fulghumde538eb2009-12-09 12:31:39 -0800869 if (info->tx_count < info->max_frame_size) {
Paul Fulghum705b6c72006-01-08 01:02:06 -0800870 info->tx_buf[info->tx_count++] = ch;
Alan Cox55da7782008-04-30 00:54:07 -0700871 ret = 1;
872 }
Paul Fulghum705b6c72006-01-08 01:02:06 -0800873 spin_unlock_irqrestore(&info->lock,flags);
Alan Cox55da7782008-04-30 00:54:07 -0700874 return ret;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800875}
876
877static void send_xchar(struct tty_struct *tty, char ch)
878{
879 struct slgt_info *info = tty->driver_data;
880 unsigned long flags;
881
882 if (sanity_check(info, tty->name, "send_xchar"))
883 return;
884 DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
885 info->x_char = ch;
886 if (ch) {
887 spin_lock_irqsave(&info->lock,flags);
888 if (!info->tx_enabled)
889 tx_start(info);
890 spin_unlock_irqrestore(&info->lock,flags);
891 }
892}
893
894static void wait_until_sent(struct tty_struct *tty, int timeout)
895{
896 struct slgt_info *info = tty->driver_data;
897 unsigned long orig_jiffies, char_time;
898
899 if (!info )
900 return;
901 if (sanity_check(info, tty->name, "wait_until_sent"))
902 return;
903 DBGINFO(("%s wait_until_sent entry\n", info->device_name));
Alan Cox8fb06c72008-07-16 21:56:46 +0100904 if (!(info->port.flags & ASYNC_INITIALIZED))
Paul Fulghum705b6c72006-01-08 01:02:06 -0800905 goto exit;
906
907 orig_jiffies = jiffies;
908
909 /* Set check interval to 1/5 of estimated time to
910 * send a character, and make it at least 1. The check
911 * interval should also be less than the timeout.
912 * Note: use tight timings here to satisfy the NIST-PCTS.
913 */
914
915 if (info->params.data_rate) {
916 char_time = info->timeout/(32 * 5);
917 if (!char_time)
918 char_time++;
919 } else
920 char_time = 1;
921
922 if (timeout)
923 char_time = min_t(unsigned long, char_time, timeout);
924
925 while (info->tx_active) {
926 msleep_interruptible(jiffies_to_msecs(char_time));
927 if (signal_pending(current))
928 break;
929 if (timeout && time_after(jiffies, orig_jiffies + timeout))
930 break;
931 }
Paul Fulghum705b6c72006-01-08 01:02:06 -0800932exit:
933 DBGINFO(("%s wait_until_sent exit\n", info->device_name));
934}
935
936static int write_room(struct tty_struct *tty)
937{
938 struct slgt_info *info = tty->driver_data;
939 int ret;
940
941 if (sanity_check(info, tty->name, "write_room"))
942 return 0;
943 ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
944 DBGINFO(("%s write_room=%d\n", info->device_name, ret));
945 return ret;
946}
947
948static void flush_chars(struct tty_struct *tty)
949{
950 struct slgt_info *info = tty->driver_data;
951 unsigned long flags;
952
953 if (sanity_check(info, tty->name, "flush_chars"))
954 return;
955 DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
956
957 if (info->tx_count <= 0 || tty->stopped ||
958 tty->hw_stopped || !info->tx_buf)
959 return;
960
961 DBGINFO(("%s flush_chars start transmit\n", info->device_name));
962
963 spin_lock_irqsave(&info->lock,flags);
Paul Fulghumde538eb2009-12-09 12:31:39 -0800964 if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
965 info->tx_count = 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -0800966 spin_unlock_irqrestore(&info->lock,flags);
967}
968
969static void flush_buffer(struct tty_struct *tty)
970{
971 struct slgt_info *info = tty->driver_data;
972 unsigned long flags;
973
974 if (sanity_check(info, tty->name, "flush_buffer"))
975 return;
976 DBGINFO(("%s flush_buffer\n", info->device_name));
977
Paul Fulghumde538eb2009-12-09 12:31:39 -0800978 spin_lock_irqsave(&info->lock, flags);
979 info->tx_count = 0;
980 spin_unlock_irqrestore(&info->lock, flags);
Paul Fulghum705b6c72006-01-08 01:02:06 -0800981
Paul Fulghum705b6c72006-01-08 01:02:06 -0800982 tty_wakeup(tty);
983}
984
985/*
986 * throttle (stop) transmitter
987 */
988static void tx_hold(struct tty_struct *tty)
989{
990 struct slgt_info *info = tty->driver_data;
991 unsigned long flags;
992
993 if (sanity_check(info, tty->name, "tx_hold"))
994 return;
995 DBGINFO(("%s tx_hold\n", info->device_name));
996 spin_lock_irqsave(&info->lock,flags);
997 if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
998 tx_stop(info);
999 spin_unlock_irqrestore(&info->lock,flags);
1000}
1001
1002/*
1003 * release (start) transmitter
1004 */
1005static void tx_release(struct tty_struct *tty)
1006{
1007 struct slgt_info *info = tty->driver_data;
1008 unsigned long flags;
1009
1010 if (sanity_check(info, tty->name, "tx_release"))
1011 return;
1012 DBGINFO(("%s tx_release\n", info->device_name));
Paul Fulghumde538eb2009-12-09 12:31:39 -08001013 spin_lock_irqsave(&info->lock, flags);
1014 if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
1015 info->tx_count = 0;
1016 spin_unlock_irqrestore(&info->lock, flags);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001017}
1018
1019/*
1020 * Service an IOCTL request
1021 *
1022 * Arguments
1023 *
1024 * tty pointer to tty instance data
1025 * file pointer to associated file object for device
1026 * cmd IOCTL command code
1027 * arg command argument/context
1028 *
1029 * Return 0 if success, otherwise error code
1030 */
1031static int ioctl(struct tty_struct *tty, struct file *file,
1032 unsigned int cmd, unsigned long arg)
1033{
1034 struct slgt_info *info = tty->driver_data;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001035 void __user *argp = (void __user *)arg;
Alan Cox1f8cabb2008-04-30 00:53:24 -07001036 int ret;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001037
1038 if (sanity_check(info, tty->name, "ioctl"))
1039 return -ENODEV;
1040 DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
1041
1042 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
Alan Cox05871022010-09-16 18:21:52 +01001043 (cmd != TIOCMIWAIT)) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08001044 if (tty->flags & (1 << TTY_IO_ERROR))
1045 return -EIO;
1046 }
1047
Alan Coxf6025012010-06-01 22:52:46 +02001048 switch (cmd) {
1049 case MGSL_IOCWAITEVENT:
1050 return wait_mgsl_event(info, argp);
1051 case TIOCMIWAIT:
1052 return modem_input_wait(info,(int)arg);
Alan Coxf6025012010-06-01 22:52:46 +02001053 case MGSL_IOCSGPIO:
1054 return set_gpio(info, argp);
1055 case MGSL_IOCGGPIO:
1056 return get_gpio(info, argp);
1057 case MGSL_IOCWAITGPIO:
1058 return wait_gpio(info, argp);
1059 }
1060 mutex_lock(&info->port.mutex);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001061 switch (cmd) {
1062 case MGSL_IOCGPARAMS:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001063 ret = get_params(info, argp);
1064 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001065 case MGSL_IOCSPARAMS:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001066 ret = set_params(info, argp);
1067 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001068 case MGSL_IOCGTXIDLE:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001069 ret = get_txidle(info, argp);
1070 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001071 case MGSL_IOCSTXIDLE:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001072 ret = set_txidle(info, (int)arg);
1073 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001074 case MGSL_IOCTXENABLE:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001075 ret = tx_enable(info, (int)arg);
1076 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001077 case MGSL_IOCRXENABLE:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001078 ret = rx_enable(info, (int)arg);
1079 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001080 case MGSL_IOCTXABORT:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001081 ret = tx_abort(info);
1082 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001083 case MGSL_IOCGSTATS:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001084 ret = get_stats(info, argp);
1085 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001086 case MGSL_IOCGIF:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001087 ret = get_interface(info, argp);
1088 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001089 case MGSL_IOCSIF:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001090 ret = set_interface(info,(int)arg);
1091 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001092 default:
Alan Cox1f8cabb2008-04-30 00:53:24 -07001093 ret = -ENOIOCTLCMD;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001094 }
Alan Coxf6025012010-06-01 22:52:46 +02001095 mutex_unlock(&info->port.mutex);
Alan Cox1f8cabb2008-04-30 00:53:24 -07001096 return ret;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001097}
1098
Alan Cox05871022010-09-16 18:21:52 +01001099static int get_icount(struct tty_struct *tty,
1100 struct serial_icounter_struct *icount)
1101
1102{
1103 struct slgt_info *info = tty->driver_data;
1104 struct mgsl_icount cnow; /* kernel counter temps */
1105 unsigned long flags;
1106
1107 spin_lock_irqsave(&info->lock,flags);
1108 cnow = info->icount;
1109 spin_unlock_irqrestore(&info->lock,flags);
1110
1111 icount->cts = cnow.cts;
1112 icount->dsr = cnow.dsr;
1113 icount->rng = cnow.rng;
1114 icount->dcd = cnow.dcd;
1115 icount->rx = cnow.rx;
1116 icount->tx = cnow.tx;
1117 icount->frame = cnow.frame;
1118 icount->overrun = cnow.overrun;
1119 icount->parity = cnow.parity;
1120 icount->brk = cnow.brk;
1121 icount->buf_overrun = cnow.buf_overrun;
1122
1123 return 0;
1124}
1125
Paul Fulghum705b6c72006-01-08 01:02:06 -08001126/*
Paul Fulghum2acdb162007-05-10 22:22:43 -07001127 * support for 32 bit ioctl calls on 64 bit systems
1128 */
1129#ifdef CONFIG_COMPAT
1130static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params)
1131{
1132 struct MGSL_PARAMS32 tmp_params;
1133
1134 DBGINFO(("%s get_params32\n", info->device_name));
1135 tmp_params.mode = (compat_ulong_t)info->params.mode;
1136 tmp_params.loopback = info->params.loopback;
1137 tmp_params.flags = info->params.flags;
1138 tmp_params.encoding = info->params.encoding;
1139 tmp_params.clock_speed = (compat_ulong_t)info->params.clock_speed;
1140 tmp_params.addr_filter = info->params.addr_filter;
1141 tmp_params.crc_type = info->params.crc_type;
1142 tmp_params.preamble_length = info->params.preamble_length;
1143 tmp_params.preamble = info->params.preamble;
1144 tmp_params.data_rate = (compat_ulong_t)info->params.data_rate;
1145 tmp_params.data_bits = info->params.data_bits;
1146 tmp_params.stop_bits = info->params.stop_bits;
1147 tmp_params.parity = info->params.parity;
1148 if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32)))
1149 return -EFAULT;
1150 return 0;
1151}
1152
1153static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
1154{
1155 struct MGSL_PARAMS32 tmp_params;
1156
1157 DBGINFO(("%s set_params32\n", info->device_name));
1158 if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
1159 return -EFAULT;
1160
1161 spin_lock(&info->lock);
Paul Fulghum1f807692009-04-02 16:58:30 -07001162 if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
1163 info->base_clock = tmp_params.clock_speed;
1164 } else {
1165 info->params.mode = tmp_params.mode;
1166 info->params.loopback = tmp_params.loopback;
1167 info->params.flags = tmp_params.flags;
1168 info->params.encoding = tmp_params.encoding;
1169 info->params.clock_speed = tmp_params.clock_speed;
1170 info->params.addr_filter = tmp_params.addr_filter;
1171 info->params.crc_type = tmp_params.crc_type;
1172 info->params.preamble_length = tmp_params.preamble_length;
1173 info->params.preamble = tmp_params.preamble;
1174 info->params.data_rate = tmp_params.data_rate;
1175 info->params.data_bits = tmp_params.data_bits;
1176 info->params.stop_bits = tmp_params.stop_bits;
1177 info->params.parity = tmp_params.parity;
1178 }
Paul Fulghum2acdb162007-05-10 22:22:43 -07001179 spin_unlock(&info->lock);
1180
Paul Fulghum1f807692009-04-02 16:58:30 -07001181 program_hw(info);
Paul Fulghum2acdb162007-05-10 22:22:43 -07001182
1183 return 0;
1184}
1185
1186static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
1187 unsigned int cmd, unsigned long arg)
1188{
1189 struct slgt_info *info = tty->driver_data;
1190 int rc = -ENOIOCTLCMD;
1191
1192 if (sanity_check(info, tty->name, "compat_ioctl"))
1193 return -ENODEV;
1194 DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
1195
1196 switch (cmd) {
1197
1198 case MGSL_IOCSPARAMS32:
1199 rc = set_params32(info, compat_ptr(arg));
1200 break;
1201
1202 case MGSL_IOCGPARAMS32:
1203 rc = get_params32(info, compat_ptr(arg));
1204 break;
1205
1206 case MGSL_IOCGPARAMS:
1207 case MGSL_IOCSPARAMS:
1208 case MGSL_IOCGTXIDLE:
1209 case MGSL_IOCGSTATS:
1210 case MGSL_IOCWAITEVENT:
1211 case MGSL_IOCGIF:
1212 case MGSL_IOCSGPIO:
1213 case MGSL_IOCGGPIO:
1214 case MGSL_IOCWAITGPIO:
Paul Fulghum2acdb162007-05-10 22:22:43 -07001215 case MGSL_IOCSTXIDLE:
1216 case MGSL_IOCTXENABLE:
1217 case MGSL_IOCRXENABLE:
1218 case MGSL_IOCTXABORT:
1219 case TIOCMIWAIT:
1220 case MGSL_IOCSIF:
1221 rc = ioctl(tty, file, cmd, arg);
1222 break;
1223 }
1224
1225 DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
1226 return rc;
1227}
1228#else
1229#define slgt_compat_ioctl NULL
1230#endif /* ifdef CONFIG_COMPAT */
1231
1232/*
Paul Fulghum705b6c72006-01-08 01:02:06 -08001233 * proc fs support
1234 */
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001235static inline void line_info(struct seq_file *m, struct slgt_info *info)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001236{
1237 char stat_buf[30];
Paul Fulghum705b6c72006-01-08 01:02:06 -08001238 unsigned long flags;
1239
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001240 seq_printf(m, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
Paul Fulghum705b6c72006-01-08 01:02:06 -08001241 info->device_name, info->phys_reg_addr,
1242 info->irq_level, info->max_frame_size);
1243
1244 /* output current serial signal states */
1245 spin_lock_irqsave(&info->lock,flags);
1246 get_signals(info);
1247 spin_unlock_irqrestore(&info->lock,flags);
1248
1249 stat_buf[0] = 0;
1250 stat_buf[1] = 0;
1251 if (info->signals & SerialSignal_RTS)
1252 strcat(stat_buf, "|RTS");
1253 if (info->signals & SerialSignal_CTS)
1254 strcat(stat_buf, "|CTS");
1255 if (info->signals & SerialSignal_DTR)
1256 strcat(stat_buf, "|DTR");
1257 if (info->signals & SerialSignal_DSR)
1258 strcat(stat_buf, "|DSR");
1259 if (info->signals & SerialSignal_DCD)
1260 strcat(stat_buf, "|CD");
1261 if (info->signals & SerialSignal_RI)
1262 strcat(stat_buf, "|RI");
1263
1264 if (info->params.mode != MGSL_MODE_ASYNC) {
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001265 seq_printf(m, "\tHDLC txok:%d rxok:%d",
Paul Fulghum705b6c72006-01-08 01:02:06 -08001266 info->icount.txok, info->icount.rxok);
1267 if (info->icount.txunder)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001268 seq_printf(m, " txunder:%d", info->icount.txunder);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001269 if (info->icount.txabort)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001270 seq_printf(m, " txabort:%d", info->icount.txabort);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001271 if (info->icount.rxshort)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001272 seq_printf(m, " rxshort:%d", info->icount.rxshort);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001273 if (info->icount.rxlong)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001274 seq_printf(m, " rxlong:%d", info->icount.rxlong);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001275 if (info->icount.rxover)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001276 seq_printf(m, " rxover:%d", info->icount.rxover);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001277 if (info->icount.rxcrc)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001278 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001279 } else {
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001280 seq_printf(m, "\tASYNC tx:%d rx:%d",
Paul Fulghum705b6c72006-01-08 01:02:06 -08001281 info->icount.tx, info->icount.rx);
1282 if (info->icount.frame)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001283 seq_printf(m, " fe:%d", info->icount.frame);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001284 if (info->icount.parity)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001285 seq_printf(m, " pe:%d", info->icount.parity);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001286 if (info->icount.brk)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001287 seq_printf(m, " brk:%d", info->icount.brk);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001288 if (info->icount.overrun)
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001289 seq_printf(m, " oe:%d", info->icount.overrun);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001290 }
1291
1292 /* Append serial signal status to end */
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001293 seq_printf(m, " %s\n", stat_buf+1);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001294
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001295 seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
Paul Fulghum705b6c72006-01-08 01:02:06 -08001296 info->tx_active,info->bh_requested,info->bh_running,
1297 info->pending_bh);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001298}
1299
1300/* Called to print information about devices
1301 */
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001302static int synclink_gt_proc_show(struct seq_file *m, void *v)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001303{
Paul Fulghum705b6c72006-01-08 01:02:06 -08001304 struct slgt_info *info;
1305
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001306 seq_puts(m, "synclink_gt driver\n");
Paul Fulghum705b6c72006-01-08 01:02:06 -08001307
1308 info = slgt_device_list;
1309 while( info ) {
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001310 line_info(m, info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001311 info = info->next_device;
1312 }
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001313 return 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001314}
1315
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07001316static int synclink_gt_proc_open(struct inode *inode, struct file *file)
1317{
1318 return single_open(file, synclink_gt_proc_show, NULL);
1319}
1320
1321static const struct file_operations synclink_gt_proc_fops = {
1322 .owner = THIS_MODULE,
1323 .open = synclink_gt_proc_open,
1324 .read = seq_read,
1325 .llseek = seq_lseek,
1326 .release = single_release,
1327};
1328
Paul Fulghum705b6c72006-01-08 01:02:06 -08001329/*
1330 * return count of bytes in transmit buffer
1331 */
1332static int chars_in_buffer(struct tty_struct *tty)
1333{
1334 struct slgt_info *info = tty->driver_data;
Paul Fulghum403214d2008-07-22 11:21:55 +01001335 int count;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001336 if (sanity_check(info, tty->name, "chars_in_buffer"))
1337 return 0;
Paul Fulghum403214d2008-07-22 11:21:55 +01001338 count = tbuf_bytes(info);
1339 DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count));
1340 return count;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001341}
1342
1343/*
1344 * signal remote device to throttle send data (our receive data)
1345 */
1346static void throttle(struct tty_struct * tty)
1347{
1348 struct slgt_info *info = tty->driver_data;
1349 unsigned long flags;
1350
1351 if (sanity_check(info, tty->name, "throttle"))
1352 return;
1353 DBGINFO(("%s throttle\n", info->device_name));
1354 if (I_IXOFF(tty))
1355 send_xchar(tty, STOP_CHAR(tty));
1356 if (tty->termios->c_cflag & CRTSCTS) {
1357 spin_lock_irqsave(&info->lock,flags);
1358 info->signals &= ~SerialSignal_RTS;
1359 set_signals(info);
1360 spin_unlock_irqrestore(&info->lock,flags);
1361 }
1362}
1363
1364/*
1365 * signal remote device to stop throttling send data (our receive data)
1366 */
1367static void unthrottle(struct tty_struct * tty)
1368{
1369 struct slgt_info *info = tty->driver_data;
1370 unsigned long flags;
1371
1372 if (sanity_check(info, tty->name, "unthrottle"))
1373 return;
1374 DBGINFO(("%s unthrottle\n", info->device_name));
1375 if (I_IXOFF(tty)) {
1376 if (info->x_char)
1377 info->x_char = 0;
1378 else
1379 send_xchar(tty, START_CHAR(tty));
1380 }
1381 if (tty->termios->c_cflag & CRTSCTS) {
1382 spin_lock_irqsave(&info->lock,flags);
1383 info->signals |= SerialSignal_RTS;
1384 set_signals(info);
1385 spin_unlock_irqrestore(&info->lock,flags);
1386 }
1387}
1388
1389/*
1390 * set or clear transmit break condition
1391 * break_state -1=set break condition, 0=clear
1392 */
Alan Cox9e989662008-07-22 11:18:03 +01001393static int set_break(struct tty_struct *tty, int break_state)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001394{
1395 struct slgt_info *info = tty->driver_data;
1396 unsigned short value;
1397 unsigned long flags;
1398
1399 if (sanity_check(info, tty->name, "set_break"))
Alan Cox9e989662008-07-22 11:18:03 +01001400 return -EINVAL;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001401 DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
1402
1403 spin_lock_irqsave(&info->lock,flags);
1404 value = rd_reg16(info, TCR);
1405 if (break_state == -1)
1406 value |= BIT6;
1407 else
1408 value &= ~BIT6;
1409 wr_reg16(info, TCR, value);
1410 spin_unlock_irqrestore(&info->lock,flags);
Alan Cox9e989662008-07-22 11:18:03 +01001411 return 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001412}
1413
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08001414#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -08001415
1416/**
1417 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
1418 * set encoding and frame check sequence (FCS) options
1419 *
1420 * dev pointer to network device structure
1421 * encoding serial encoding setting
1422 * parity FCS setting
1423 *
1424 * returns 0 if success, otherwise error code
1425 */
1426static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
1427 unsigned short parity)
1428{
1429 struct slgt_info *info = dev_to_port(dev);
1430 unsigned char new_encoding;
1431 unsigned short new_crctype;
1432
1433 /* return error if TTY interface open */
Alan Cox8fb06c72008-07-16 21:56:46 +01001434 if (info->port.count)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001435 return -EBUSY;
1436
1437 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
1438
1439 switch (encoding)
1440 {
1441 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
1442 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
1443 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
1444 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
1445 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
1446 default: return -EINVAL;
1447 }
1448
1449 switch (parity)
1450 {
1451 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
1452 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
1453 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
1454 default: return -EINVAL;
1455 }
1456
1457 info->params.encoding = new_encoding;
Alexey Dobriyan53b35312006-03-24 03:16:13 -08001458 info->params.crc_type = new_crctype;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001459
1460 /* if network interface up, reprogram hardware */
1461 if (info->netcount)
1462 program_hw(info);
1463
1464 return 0;
1465}
1466
1467/**
1468 * called by generic HDLC layer to send frame
1469 *
1470 * skb socket buffer containing HDLC frame
1471 * dev pointer to network device structure
Paul Fulghum705b6c72006-01-08 01:02:06 -08001472 */
Stephen Hemminger4c5d5022009-08-31 19:50:48 +00001473static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
1474 struct net_device *dev)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001475{
1476 struct slgt_info *info = dev_to_port(dev);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001477 unsigned long flags;
1478
1479 DBGINFO(("%s hdlc_xmit\n", dev->name));
1480
Paul Fulghumde538eb2009-12-09 12:31:39 -08001481 if (!skb->len)
1482 return NETDEV_TX_OK;
1483
Paul Fulghum705b6c72006-01-08 01:02:06 -08001484 /* stop sending until this frame completes */
1485 netif_stop_queue(dev);
1486
Paul Fulghum705b6c72006-01-08 01:02:06 -08001487 /* update network statistics */
Krzysztof Halasa198191c2008-06-30 23:26:53 +02001488 dev->stats.tx_packets++;
1489 dev->stats.tx_bytes += skb->len;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001490
Paul Fulghum705b6c72006-01-08 01:02:06 -08001491 /* save start time for transmit timeout detection */
1492 dev->trans_start = jiffies;
1493
Paul Fulghumde538eb2009-12-09 12:31:39 -08001494 spin_lock_irqsave(&info->lock, flags);
1495 tx_load(info, skb->data, skb->len);
1496 spin_unlock_irqrestore(&info->lock, flags);
1497
1498 /* done with socket buffer, so free it */
1499 dev_kfree_skb(skb);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001500
Stephen Hemminger4c5d5022009-08-31 19:50:48 +00001501 return NETDEV_TX_OK;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001502}
1503
1504/**
1505 * called by network layer when interface enabled
1506 * claim resources and initialize hardware
1507 *
1508 * dev pointer to network device structure
1509 *
1510 * returns 0 if success, otherwise error code
1511 */
1512static int hdlcdev_open(struct net_device *dev)
1513{
1514 struct slgt_info *info = dev_to_port(dev);
1515 int rc;
1516 unsigned long flags;
1517
Paul Fulghumd4c63b72007-08-22 14:01:50 -07001518 if (!try_module_get(THIS_MODULE))
1519 return -EBUSY;
1520
Paul Fulghum705b6c72006-01-08 01:02:06 -08001521 DBGINFO(("%s hdlcdev_open\n", dev->name));
1522
1523 /* generic HDLC layer open processing */
1524 if ((rc = hdlc_open(dev)))
1525 return rc;
1526
1527 /* arbitrate between network and tty opens */
1528 spin_lock_irqsave(&info->netlock, flags);
Alan Cox8fb06c72008-07-16 21:56:46 +01001529 if (info->port.count != 0 || info->netcount != 0) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08001530 DBGINFO(("%s hdlc_open busy\n", dev->name));
1531 spin_unlock_irqrestore(&info->netlock, flags);
1532 return -EBUSY;
1533 }
1534 info->netcount=1;
1535 spin_unlock_irqrestore(&info->netlock, flags);
1536
1537 /* claim resources and init adapter */
1538 if ((rc = startup(info)) != 0) {
1539 spin_lock_irqsave(&info->netlock, flags);
1540 info->netcount=0;
1541 spin_unlock_irqrestore(&info->netlock, flags);
1542 return rc;
1543 }
1544
1545 /* assert DTR and RTS, apply hardware settings */
1546 info->signals |= SerialSignal_RTS + SerialSignal_DTR;
1547 program_hw(info);
1548
1549 /* enable network layer transmit */
1550 dev->trans_start = jiffies;
1551 netif_start_queue(dev);
1552
1553 /* inform generic HDLC layer of current DCD status */
1554 spin_lock_irqsave(&info->lock, flags);
1555 get_signals(info);
1556 spin_unlock_irqrestore(&info->lock, flags);
Krzysztof Halasafbeff3c2006-07-21 14:44:55 -07001557 if (info->signals & SerialSignal_DCD)
1558 netif_carrier_on(dev);
1559 else
1560 netif_carrier_off(dev);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001561 return 0;
1562}
1563
1564/**
1565 * called by network layer when interface is disabled
1566 * shutdown hardware and release resources
1567 *
1568 * dev pointer to network device structure
1569 *
1570 * returns 0 if success, otherwise error code
1571 */
1572static int hdlcdev_close(struct net_device *dev)
1573{
1574 struct slgt_info *info = dev_to_port(dev);
1575 unsigned long flags;
1576
1577 DBGINFO(("%s hdlcdev_close\n", dev->name));
1578
1579 netif_stop_queue(dev);
1580
1581 /* shutdown adapter and release resources */
1582 shutdown(info);
1583
1584 hdlc_close(dev);
1585
1586 spin_lock_irqsave(&info->netlock, flags);
1587 info->netcount=0;
1588 spin_unlock_irqrestore(&info->netlock, flags);
1589
Paul Fulghumd4c63b72007-08-22 14:01:50 -07001590 module_put(THIS_MODULE);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001591 return 0;
1592}
1593
1594/**
1595 * called by network layer to process IOCTL call to network device
1596 *
1597 * dev pointer to network device structure
1598 * ifr pointer to network interface request structure
1599 * cmd IOCTL command code
1600 *
1601 * returns 0 if success, otherwise error code
1602 */
1603static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1604{
1605 const size_t size = sizeof(sync_serial_settings);
1606 sync_serial_settings new_line;
1607 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1608 struct slgt_info *info = dev_to_port(dev);
1609 unsigned int flags;
1610
1611 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
1612
1613 /* return error if TTY interface open */
Alan Cox8fb06c72008-07-16 21:56:46 +01001614 if (info->port.count)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001615 return -EBUSY;
1616
1617 if (cmd != SIOCWANDEV)
1618 return hdlc_ioctl(dev, ifr, cmd);
1619
1620 switch(ifr->ifr_settings.type) {
1621 case IF_GET_IFACE: /* return current sync_serial_settings */
1622
1623 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1624 if (ifr->ifr_settings.size < size) {
1625 ifr->ifr_settings.size = size; /* data size wanted */
1626 return -ENOBUFS;
1627 }
1628
1629 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1630 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
1631 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1632 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
1633
1634 switch (flags){
1635 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
1636 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
1637 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
1638 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
1639 default: new_line.clock_type = CLOCK_DEFAULT;
1640 }
1641
1642 new_line.clock_rate = info->params.clock_speed;
1643 new_line.loopback = info->params.loopback ? 1:0;
1644
1645 if (copy_to_user(line, &new_line, size))
1646 return -EFAULT;
1647 return 0;
1648
1649 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
1650
1651 if(!capable(CAP_NET_ADMIN))
1652 return -EPERM;
1653 if (copy_from_user(&new_line, line, size))
1654 return -EFAULT;
1655
1656 switch (new_line.clock_type)
1657 {
1658 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
1659 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
1660 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
1661 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
1662 case CLOCK_DEFAULT: flags = info->params.flags &
1663 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1664 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
1665 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1666 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
1667 default: return -EINVAL;
1668 }
1669
1670 if (new_line.loopback != 0 && new_line.loopback != 1)
1671 return -EINVAL;
1672
1673 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1674 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
1675 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1676 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
1677 info->params.flags |= flags;
1678
1679 info->params.loopback = new_line.loopback;
1680
1681 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
1682 info->params.clock_speed = new_line.clock_rate;
1683 else
1684 info->params.clock_speed = 0;
1685
1686 /* if network interface up, reprogram hardware */
1687 if (info->netcount)
1688 program_hw(info);
1689 return 0;
1690
1691 default:
1692 return hdlc_ioctl(dev, ifr, cmd);
1693 }
1694}
1695
1696/**
1697 * called by network layer when transmit timeout is detected
1698 *
1699 * dev pointer to network device structure
1700 */
1701static void hdlcdev_tx_timeout(struct net_device *dev)
1702{
1703 struct slgt_info *info = dev_to_port(dev);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001704 unsigned long flags;
1705
1706 DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
1707
Krzysztof Halasa198191c2008-06-30 23:26:53 +02001708 dev->stats.tx_errors++;
1709 dev->stats.tx_aborted_errors++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001710
1711 spin_lock_irqsave(&info->lock,flags);
1712 tx_stop(info);
1713 spin_unlock_irqrestore(&info->lock,flags);
1714
1715 netif_wake_queue(dev);
1716}
1717
1718/**
1719 * called by device driver when transmit completes
1720 * reenable network layer transmit if stopped
1721 *
1722 * info pointer to device instance information
1723 */
1724static void hdlcdev_tx_done(struct slgt_info *info)
1725{
1726 if (netif_queue_stopped(info->netdev))
1727 netif_wake_queue(info->netdev);
1728}
1729
1730/**
1731 * called by device driver when frame received
1732 * pass frame to network layer
1733 *
1734 * info pointer to device instance information
1735 * buf pointer to buffer contianing frame data
1736 * size count of data bytes in buf
1737 */
1738static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
1739{
1740 struct sk_buff *skb = dev_alloc_skb(size);
1741 struct net_device *dev = info->netdev;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001742
1743 DBGINFO(("%s hdlcdev_rx\n", dev->name));
1744
1745 if (skb == NULL) {
1746 DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
Krzysztof Halasa198191c2008-06-30 23:26:53 +02001747 dev->stats.rx_dropped++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001748 return;
1749 }
1750
Krzysztof Halasa198191c2008-06-30 23:26:53 +02001751 memcpy(skb_put(skb, size), buf, size);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001752
Krzysztof Halasa198191c2008-06-30 23:26:53 +02001753 skb->protocol = hdlc_type_trans(skb, dev);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001754
Krzysztof Halasa198191c2008-06-30 23:26:53 +02001755 dev->stats.rx_packets++;
1756 dev->stats.rx_bytes += size;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001757
1758 netif_rx(skb);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001759}
1760
Krzysztof Hałasa991990a2009-01-08 22:52:11 +01001761static const struct net_device_ops hdlcdev_ops = {
1762 .ndo_open = hdlcdev_open,
1763 .ndo_stop = hdlcdev_close,
1764 .ndo_change_mtu = hdlc_change_mtu,
1765 .ndo_start_xmit = hdlc_start_xmit,
1766 .ndo_do_ioctl = hdlcdev_ioctl,
1767 .ndo_tx_timeout = hdlcdev_tx_timeout,
1768};
1769
Paul Fulghum705b6c72006-01-08 01:02:06 -08001770/**
1771 * called by device driver when adding device instance
1772 * do generic HDLC initialization
1773 *
1774 * info pointer to device instance information
1775 *
1776 * returns 0 if success, otherwise error code
1777 */
1778static int hdlcdev_init(struct slgt_info *info)
1779{
1780 int rc;
1781 struct net_device *dev;
1782 hdlc_device *hdlc;
1783
1784 /* allocate and initialize network and HDLC layer objects */
1785
1786 if (!(dev = alloc_hdlcdev(info))) {
1787 printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
1788 return -ENOMEM;
1789 }
1790
1791 /* for network layer reporting purposes only */
1792 dev->mem_start = info->phys_reg_addr;
1793 dev->mem_end = info->phys_reg_addr + SLGT_REG_SIZE - 1;
1794 dev->irq = info->irq_level;
1795
1796 /* network layer callbacks and settings */
Krzysztof Hałasa991990a2009-01-08 22:52:11 +01001797 dev->netdev_ops = &hdlcdev_ops;
1798 dev->watchdog_timeo = 10 * HZ;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001799 dev->tx_queue_len = 50;
1800
1801 /* generic HDLC layer callbacks and settings */
1802 hdlc = dev_to_hdlc(dev);
1803 hdlc->attach = hdlcdev_attach;
1804 hdlc->xmit = hdlcdev_xmit;
1805
1806 /* register objects with HDLC layer */
1807 if ((rc = register_hdlc_device(dev))) {
1808 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
1809 free_netdev(dev);
1810 return rc;
1811 }
1812
1813 info->netdev = dev;
1814 return 0;
1815}
1816
1817/**
1818 * called by device driver when removing device instance
1819 * do generic HDLC cleanup
1820 *
1821 * info pointer to device instance information
1822 */
1823static void hdlcdev_exit(struct slgt_info *info)
1824{
1825 unregister_hdlc_device(info->netdev);
1826 free_netdev(info->netdev);
1827 info->netdev = NULL;
1828}
1829
1830#endif /* ifdef CONFIG_HDLC */
1831
1832/*
1833 * get async data from rx DMA buffers
1834 */
1835static void rx_async(struct slgt_info *info)
1836{
Alan Cox8fb06c72008-07-16 21:56:46 +01001837 struct tty_struct *tty = info->port.tty;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001838 struct mgsl_icount *icount = &info->icount;
1839 unsigned int start, end;
1840 unsigned char *p;
1841 unsigned char status;
1842 struct slgt_desc *bufs = info->rbufs;
1843 int i, count;
Alan Cox33f0f882006-01-09 20:54:13 -08001844 int chars = 0;
1845 int stat;
1846 unsigned char ch;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001847
1848 start = end = info->rbuf_current;
1849
1850 while(desc_complete(bufs[end])) {
1851 count = desc_count(bufs[end]) - info->rbuf_index;
1852 p = bufs[end].buf + info->rbuf_index;
1853
1854 DBGISR(("%s rx_async count=%d\n", info->device_name, count));
1855 DBGDATA(info, p, count, "rx");
1856
1857 for(i=0 ; i < count; i+=2, p+=2) {
Alan Cox33f0f882006-01-09 20:54:13 -08001858 ch = *p;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001859 icount->rx++;
1860
Alan Cox33f0f882006-01-09 20:54:13 -08001861 stat = 0;
1862
Paul Fulghum202af6d2006-08-31 21:27:36 -07001863 if ((status = *(p+1) & (BIT1 + BIT0))) {
1864 if (status & BIT1)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001865 icount->parity++;
Paul Fulghum202af6d2006-08-31 21:27:36 -07001866 else if (status & BIT0)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001867 icount->frame++;
1868 /* discard char if tty control flags say so */
1869 if (status & info->ignore_status_mask)
1870 continue;
Paul Fulghum202af6d2006-08-31 21:27:36 -07001871 if (status & BIT1)
Alan Cox33f0f882006-01-09 20:54:13 -08001872 stat = TTY_PARITY;
Paul Fulghum202af6d2006-08-31 21:27:36 -07001873 else if (status & BIT0)
Alan Cox33f0f882006-01-09 20:54:13 -08001874 stat = TTY_FRAME;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001875 }
1876 if (tty) {
Alan Cox33f0f882006-01-09 20:54:13 -08001877 tty_insert_flip_char(tty, ch, stat);
1878 chars++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001879 }
1880 }
1881
1882 if (i < count) {
1883 /* receive buffer not completed */
1884 info->rbuf_index += i;
Jiri Slaby40565f12007-02-12 00:52:31 -08001885 mod_timer(&info->rx_timer, jiffies + 1);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001886 break;
1887 }
1888
1889 info->rbuf_index = 0;
1890 free_rbufs(info, end, end);
1891
1892 if (++end == info->rbuf_count)
1893 end = 0;
1894
1895 /* if entire list searched then no frame available */
1896 if (end == start)
1897 break;
1898 }
1899
Alan Cox33f0f882006-01-09 20:54:13 -08001900 if (tty && chars)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001901 tty_flip_buffer_push(tty);
1902}
1903
1904/*
1905 * return next bottom half action to perform
1906 */
1907static int bh_action(struct slgt_info *info)
1908{
1909 unsigned long flags;
1910 int rc;
1911
1912 spin_lock_irqsave(&info->lock,flags);
1913
1914 if (info->pending_bh & BH_RECEIVE) {
1915 info->pending_bh &= ~BH_RECEIVE;
1916 rc = BH_RECEIVE;
1917 } else if (info->pending_bh & BH_TRANSMIT) {
1918 info->pending_bh &= ~BH_TRANSMIT;
1919 rc = BH_TRANSMIT;
1920 } else if (info->pending_bh & BH_STATUS) {
1921 info->pending_bh &= ~BH_STATUS;
1922 rc = BH_STATUS;
1923 } else {
1924 /* Mark BH routine as complete */
Joe Perches0fab6de2008-04-28 02:14:02 -07001925 info->bh_running = false;
1926 info->bh_requested = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001927 rc = 0;
1928 }
1929
1930 spin_unlock_irqrestore(&info->lock,flags);
1931
1932 return rc;
1933}
1934
1935/*
1936 * perform bottom half processing
1937 */
David Howellsc4028952006-11-22 14:57:56 +00001938static void bh_handler(struct work_struct *work)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001939{
David Howellsc4028952006-11-22 14:57:56 +00001940 struct slgt_info *info = container_of(work, struct slgt_info, task);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001941 int action;
1942
1943 if (!info)
1944 return;
Joe Perches0fab6de2008-04-28 02:14:02 -07001945 info->bh_running = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001946
1947 while((action = bh_action(info))) {
1948 switch (action) {
1949 case BH_RECEIVE:
1950 DBGBH(("%s bh receive\n", info->device_name));
1951 switch(info->params.mode) {
1952 case MGSL_MODE_ASYNC:
1953 rx_async(info);
1954 break;
1955 case MGSL_MODE_HDLC:
1956 while(rx_get_frame(info));
1957 break;
1958 case MGSL_MODE_RAW:
Paul Fulghumcb10dc92006-09-30 23:27:45 -07001959 case MGSL_MODE_MONOSYNC:
1960 case MGSL_MODE_BISYNC:
Paul Fulghum705b6c72006-01-08 01:02:06 -08001961 while(rx_get_buf(info));
1962 break;
1963 }
1964 /* restart receiver if rx DMA buffers exhausted */
1965 if (info->rx_restart)
1966 rx_start(info);
1967 break;
1968 case BH_TRANSMIT:
1969 bh_transmit(info);
1970 break;
1971 case BH_STATUS:
1972 DBGBH(("%s bh status\n", info->device_name));
1973 info->ri_chkcount = 0;
1974 info->dsr_chkcount = 0;
1975 info->dcd_chkcount = 0;
1976 info->cts_chkcount = 0;
1977 break;
1978 default:
1979 DBGBH(("%s unknown action\n", info->device_name));
1980 break;
1981 }
1982 }
1983 DBGBH(("%s bh_handler exit\n", info->device_name));
1984}
1985
1986static void bh_transmit(struct slgt_info *info)
1987{
Alan Cox8fb06c72008-07-16 21:56:46 +01001988 struct tty_struct *tty = info->port.tty;
Paul Fulghum705b6c72006-01-08 01:02:06 -08001989
1990 DBGBH(("%s bh_transmit\n", info->device_name));
Jiri Slabyb963a842007-02-10 01:44:55 -08001991 if (tty)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001992 tty_wakeup(tty);
Paul Fulghum705b6c72006-01-08 01:02:06 -08001993}
1994
Paul Fulghumed8485f2008-02-06 01:37:18 -08001995static void dsr_change(struct slgt_info *info, unsigned short status)
Paul Fulghum705b6c72006-01-08 01:02:06 -08001996{
Paul Fulghumed8485f2008-02-06 01:37:18 -08001997 if (status & BIT3) {
1998 info->signals |= SerialSignal_DSR;
1999 info->input_signal_events.dsr_up++;
2000 } else {
2001 info->signals &= ~SerialSignal_DSR;
2002 info->input_signal_events.dsr_down++;
2003 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08002004 DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
2005 if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2006 slgt_irq_off(info, IRQ_DSR);
2007 return;
2008 }
2009 info->icount.dsr++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002010 wake_up_interruptible(&info->status_event_wait_q);
2011 wake_up_interruptible(&info->event_wait_q);
2012 info->pending_bh |= BH_STATUS;
2013}
2014
Paul Fulghumed8485f2008-02-06 01:37:18 -08002015static void cts_change(struct slgt_info *info, unsigned short status)
Paul Fulghum705b6c72006-01-08 01:02:06 -08002016{
Paul Fulghumed8485f2008-02-06 01:37:18 -08002017 if (status & BIT2) {
2018 info->signals |= SerialSignal_CTS;
2019 info->input_signal_events.cts_up++;
2020 } else {
2021 info->signals &= ~SerialSignal_CTS;
2022 info->input_signal_events.cts_down++;
2023 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08002024 DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
2025 if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2026 slgt_irq_off(info, IRQ_CTS);
2027 return;
2028 }
2029 info->icount.cts++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002030 wake_up_interruptible(&info->status_event_wait_q);
2031 wake_up_interruptible(&info->event_wait_q);
2032 info->pending_bh |= BH_STATUS;
2033
Alan Cox8fb06c72008-07-16 21:56:46 +01002034 if (info->port.flags & ASYNC_CTS_FLOW) {
2035 if (info->port.tty) {
2036 if (info->port.tty->hw_stopped) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08002037 if (info->signals & SerialSignal_CTS) {
Alan Cox8fb06c72008-07-16 21:56:46 +01002038 info->port.tty->hw_stopped = 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002039 info->pending_bh |= BH_TRANSMIT;
2040 return;
2041 }
2042 } else {
2043 if (!(info->signals & SerialSignal_CTS))
Alan Cox8fb06c72008-07-16 21:56:46 +01002044 info->port.tty->hw_stopped = 1;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002045 }
2046 }
2047 }
2048}
2049
Paul Fulghumed8485f2008-02-06 01:37:18 -08002050static void dcd_change(struct slgt_info *info, unsigned short status)
Paul Fulghum705b6c72006-01-08 01:02:06 -08002051{
Paul Fulghumed8485f2008-02-06 01:37:18 -08002052 if (status & BIT1) {
2053 info->signals |= SerialSignal_DCD;
2054 info->input_signal_events.dcd_up++;
2055 } else {
2056 info->signals &= ~SerialSignal_DCD;
2057 info->input_signal_events.dcd_down++;
2058 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08002059 DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
2060 if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2061 slgt_irq_off(info, IRQ_DCD);
2062 return;
2063 }
2064 info->icount.dcd++;
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08002065#if SYNCLINK_GENERIC_HDLC
Krzysztof Halasafbeff3c2006-07-21 14:44:55 -07002066 if (info->netcount) {
2067 if (info->signals & SerialSignal_DCD)
2068 netif_carrier_on(info->netdev);
2069 else
2070 netif_carrier_off(info->netdev);
2071 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08002072#endif
2073 wake_up_interruptible(&info->status_event_wait_q);
2074 wake_up_interruptible(&info->event_wait_q);
2075 info->pending_bh |= BH_STATUS;
2076
Alan Cox8fb06c72008-07-16 21:56:46 +01002077 if (info->port.flags & ASYNC_CHECK_CD) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08002078 if (info->signals & SerialSignal_DCD)
Alan Cox8fb06c72008-07-16 21:56:46 +01002079 wake_up_interruptible(&info->port.open_wait);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002080 else {
Alan Cox8fb06c72008-07-16 21:56:46 +01002081 if (info->port.tty)
2082 tty_hangup(info->port.tty);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002083 }
2084 }
2085}
2086
Paul Fulghumed8485f2008-02-06 01:37:18 -08002087static void ri_change(struct slgt_info *info, unsigned short status)
Paul Fulghum705b6c72006-01-08 01:02:06 -08002088{
Paul Fulghumed8485f2008-02-06 01:37:18 -08002089 if (status & BIT0) {
2090 info->signals |= SerialSignal_RI;
2091 info->input_signal_events.ri_up++;
2092 } else {
2093 info->signals &= ~SerialSignal_RI;
2094 info->input_signal_events.ri_down++;
2095 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08002096 DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
2097 if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2098 slgt_irq_off(info, IRQ_RI);
2099 return;
2100 }
Paul Fulghumed8485f2008-02-06 01:37:18 -08002101 info->icount.rng++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002102 wake_up_interruptible(&info->status_event_wait_q);
2103 wake_up_interruptible(&info->event_wait_q);
2104 info->pending_bh |= BH_STATUS;
2105}
2106
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +01002107static void isr_rxdata(struct slgt_info *info)
2108{
2109 unsigned int count = info->rbuf_fill_count;
2110 unsigned int i = info->rbuf_fill_index;
2111 unsigned short reg;
2112
2113 while (rd_reg16(info, SSR) & IRQ_RXDATA) {
2114 reg = rd_reg16(info, RDR);
2115 DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
2116 if (desc_complete(info->rbufs[i])) {
2117 /* all buffers full */
2118 rx_stop(info);
2119 info->rx_restart = 1;
2120 continue;
2121 }
2122 info->rbufs[i].buf[count++] = (unsigned char)reg;
2123 /* async mode saves status byte to buffer for each data byte */
2124 if (info->params.mode == MGSL_MODE_ASYNC)
2125 info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
2126 if (count == info->rbuf_fill_level || (reg & BIT10)) {
2127 /* buffer full or end of frame */
2128 set_desc_count(info->rbufs[i], count);
2129 set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
2130 info->rbuf_fill_count = count = 0;
2131 if (++i == info->rbuf_count)
2132 i = 0;
2133 info->pending_bh |= BH_RECEIVE;
2134 }
2135 }
2136
2137 info->rbuf_fill_index = i;
2138 info->rbuf_fill_count = count;
2139}
2140
Paul Fulghum705b6c72006-01-08 01:02:06 -08002141static void isr_serial(struct slgt_info *info)
2142{
2143 unsigned short status = rd_reg16(info, SSR);
2144
2145 DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
2146
2147 wr_reg16(info, SSR, status); /* clear pending */
2148
Joe Perches0fab6de2008-04-28 02:14:02 -07002149 info->irq_occurred = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002150
2151 if (info->params.mode == MGSL_MODE_ASYNC) {
2152 if (status & IRQ_TXIDLE) {
Paul Fulghumde538eb2009-12-09 12:31:39 -08002153 if (info->tx_active)
Paul Fulghum705b6c72006-01-08 01:02:06 -08002154 isr_txeom(info, status);
2155 }
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +01002156 if (info->rx_pio && (status & IRQ_RXDATA))
2157 isr_rxdata(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002158 if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
2159 info->icount.brk++;
2160 /* process break detection if tty control allows */
Alan Cox8fb06c72008-07-16 21:56:46 +01002161 if (info->port.tty) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08002162 if (!(status & info->ignore_status_mask)) {
2163 if (info->read_status_mask & MASK_BREAK) {
Alan Cox8fb06c72008-07-16 21:56:46 +01002164 tty_insert_flip_char(info->port.tty, 0, TTY_BREAK);
2165 if (info->port.flags & ASYNC_SAK)
2166 do_SAK(info->port.tty);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002167 }
2168 }
2169 }
2170 }
2171 } else {
2172 if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
2173 isr_txeom(info, status);
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +01002174 if (info->rx_pio && (status & IRQ_RXDATA))
2175 isr_rxdata(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002176 if (status & IRQ_RXIDLE) {
2177 if (status & RXIDLE)
2178 info->icount.rxidle++;
2179 else
2180 info->icount.exithunt++;
2181 wake_up_interruptible(&info->event_wait_q);
2182 }
2183
2184 if (status & IRQ_RXOVER)
2185 rx_start(info);
2186 }
2187
2188 if (status & IRQ_DSR)
Paul Fulghumed8485f2008-02-06 01:37:18 -08002189 dsr_change(info, status);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002190 if (status & IRQ_CTS)
Paul Fulghumed8485f2008-02-06 01:37:18 -08002191 cts_change(info, status);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002192 if (status & IRQ_DCD)
Paul Fulghumed8485f2008-02-06 01:37:18 -08002193 dcd_change(info, status);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002194 if (status & IRQ_RI)
Paul Fulghumed8485f2008-02-06 01:37:18 -08002195 ri_change(info, status);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002196}
2197
2198static void isr_rdma(struct slgt_info *info)
2199{
2200 unsigned int status = rd_reg32(info, RDCSR);
2201
2202 DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
2203
2204 /* RDCSR (rx DMA control/status)
2205 *
2206 * 31..07 reserved
2207 * 06 save status byte to DMA buffer
2208 * 05 error
2209 * 04 eol (end of list)
2210 * 03 eob (end of buffer)
2211 * 02 IRQ enable
2212 * 01 reset
2213 * 00 enable
2214 */
2215 wr_reg32(info, RDCSR, status); /* clear pending */
2216
2217 if (status & (BIT5 + BIT4)) {
2218 DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
Joe Perches0fab6de2008-04-28 02:14:02 -07002219 info->rx_restart = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002220 }
2221 info->pending_bh |= BH_RECEIVE;
2222}
2223
2224static void isr_tdma(struct slgt_info *info)
2225{
2226 unsigned int status = rd_reg32(info, TDCSR);
2227
2228 DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
2229
2230 /* TDCSR (tx DMA control/status)
2231 *
2232 * 31..06 reserved
2233 * 05 error
2234 * 04 eol (end of list)
2235 * 03 eob (end of buffer)
2236 * 02 IRQ enable
2237 * 01 reset
2238 * 00 enable
2239 */
2240 wr_reg32(info, TDCSR, status); /* clear pending */
2241
2242 if (status & (BIT5 + BIT4 + BIT3)) {
2243 // another transmit buffer has completed
2244 // run bottom half to get more send data from user
2245 info->pending_bh |= BH_TRANSMIT;
2246 }
2247}
2248
Paul Fulghumde538eb2009-12-09 12:31:39 -08002249/*
2250 * return true if there are unsent tx DMA buffers, otherwise false
2251 *
2252 * if there are unsent buffers then info->tbuf_start
2253 * is set to index of first unsent buffer
2254 */
2255static bool unsent_tbufs(struct slgt_info *info)
2256{
2257 unsigned int i = info->tbuf_current;
2258 bool rc = false;
2259
2260 /*
2261 * search backwards from last loaded buffer (precedes tbuf_current)
2262 * for first unsent buffer (desc_count > 0)
2263 */
2264
2265 do {
2266 if (i)
2267 i--;
2268 else
2269 i = info->tbuf_count - 1;
2270 if (!desc_count(info->tbufs[i]))
2271 break;
2272 info->tbuf_start = i;
2273 rc = true;
2274 } while (i != info->tbuf_current);
2275
2276 return rc;
2277}
2278
Paul Fulghum705b6c72006-01-08 01:02:06 -08002279static void isr_txeom(struct slgt_info *info, unsigned short status)
2280{
2281 DBGISR(("%s txeom status=%04x\n", info->device_name, status));
2282
2283 slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
2284 tdma_reset(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002285 if (status & IRQ_TXUNDER) {
2286 unsigned short val = rd_reg16(info, TCR);
2287 wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
2288 wr_reg16(info, TCR, val); /* clear reset bit */
2289 }
2290
2291 if (info->tx_active) {
2292 if (info->params.mode != MGSL_MODE_ASYNC) {
2293 if (status & IRQ_TXUNDER)
2294 info->icount.txunder++;
2295 else if (status & IRQ_TXIDLE)
2296 info->icount.txok++;
2297 }
2298
Paul Fulghumde538eb2009-12-09 12:31:39 -08002299 if (unsent_tbufs(info)) {
2300 tx_start(info);
2301 update_tx_timer(info);
2302 return;
2303 }
Joe Perches0fab6de2008-04-28 02:14:02 -07002304 info->tx_active = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002305
2306 del_timer(&info->tx_timer);
2307
2308 if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
2309 info->signals &= ~SerialSignal_RTS;
Joe Perches0fab6de2008-04-28 02:14:02 -07002310 info->drop_rts_on_tx_done = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002311 set_signals(info);
2312 }
2313
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08002314#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -08002315 if (info->netcount)
2316 hdlcdev_tx_done(info);
2317 else
2318#endif
2319 {
Alan Cox8fb06c72008-07-16 21:56:46 +01002320 if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08002321 tx_stop(info);
2322 return;
2323 }
2324 info->pending_bh |= BH_TRANSMIT;
2325 }
2326 }
2327}
2328
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002329static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
2330{
2331 struct cond_wait *w, *prev;
2332
2333 /* wake processes waiting for specific transitions */
2334 for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
2335 if (w->data & changed) {
2336 w->data = state;
2337 wake_up_interruptible(&w->q);
2338 if (prev != NULL)
2339 prev->next = w->next;
2340 else
2341 info->gpio_wait_q = w->next;
2342 } else
2343 prev = w;
2344 }
2345}
2346
Paul Fulghum705b6c72006-01-08 01:02:06 -08002347/* interrupt service routine
2348 *
2349 * irq interrupt number
2350 * dev_id device ID supplied during interrupt registration
Paul Fulghum705b6c72006-01-08 01:02:06 -08002351 */
Jeff Garzika6f97b22007-10-31 05:20:49 -04002352static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
Paul Fulghum705b6c72006-01-08 01:02:06 -08002353{
Jeff Garzika6f97b22007-10-31 05:20:49 -04002354 struct slgt_info *info = dev_id;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002355 unsigned int gsr;
2356 unsigned int i;
2357
Jeff Garzika6f97b22007-10-31 05:20:49 -04002358 DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
Paul Fulghum705b6c72006-01-08 01:02:06 -08002359
Paul Fulghum705b6c72006-01-08 01:02:06 -08002360 while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
2361 DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
Joe Perches0fab6de2008-04-28 02:14:02 -07002362 info->irq_occurred = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002363 for(i=0; i < info->port_count ; i++) {
2364 if (info->port_array[i] == NULL)
2365 continue;
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002366 spin_lock(&info->port_array[i]->lock);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002367 if (gsr & (BIT8 << i))
2368 isr_serial(info->port_array[i]);
2369 if (gsr & (BIT16 << (i*2)))
2370 isr_rdma(info->port_array[i]);
2371 if (gsr & (BIT17 << (i*2)))
2372 isr_tdma(info->port_array[i]);
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002373 spin_unlock(&info->port_array[i]->lock);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002374 }
2375 }
2376
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002377 if (info->gpio_present) {
2378 unsigned int state;
2379 unsigned int changed;
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002380 spin_lock(&info->lock);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002381 while ((changed = rd_reg32(info, IOSR)) != 0) {
2382 DBGISR(("%s iosr=%08x\n", info->device_name, changed));
2383 /* read latched state of GPIO signals */
2384 state = rd_reg32(info, IOVR);
2385 /* clear pending GPIO interrupt bits */
2386 wr_reg32(info, IOSR, changed);
2387 for (i=0 ; i < info->port_count ; i++) {
2388 if (info->port_array[i] != NULL)
2389 isr_gpio(info->port_array[i], changed, state);
2390 }
2391 }
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002392 spin_unlock(&info->lock);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002393 }
2394
Paul Fulghum705b6c72006-01-08 01:02:06 -08002395 for(i=0; i < info->port_count ; i++) {
2396 struct slgt_info *port = info->port_array[i];
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002397 if (port == NULL)
2398 continue;
2399 spin_lock(&port->lock);
2400 if ((port->port.count || port->netcount) &&
Paul Fulghum705b6c72006-01-08 01:02:06 -08002401 port->pending_bh && !port->bh_running &&
2402 !port->bh_requested) {
2403 DBGISR(("%s bh queued\n", port->device_name));
2404 schedule_work(&port->task);
Joe Perches0fab6de2008-04-28 02:14:02 -07002405 port->bh_requested = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002406 }
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002407 spin_unlock(&port->lock);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002408 }
2409
Jeff Garzika6f97b22007-10-31 05:20:49 -04002410 DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
Paul Fulghum705b6c72006-01-08 01:02:06 -08002411 return IRQ_HANDLED;
2412}
2413
2414static int startup(struct slgt_info *info)
2415{
2416 DBGINFO(("%s startup\n", info->device_name));
2417
Alan Cox8fb06c72008-07-16 21:56:46 +01002418 if (info->port.flags & ASYNC_INITIALIZED)
Paul Fulghum705b6c72006-01-08 01:02:06 -08002419 return 0;
2420
2421 if (!info->tx_buf) {
2422 info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
2423 if (!info->tx_buf) {
2424 DBGERR(("%s can't allocate tx buffer\n", info->device_name));
2425 return -ENOMEM;
2426 }
2427 }
2428
2429 info->pending_bh = 0;
2430
2431 memset(&info->icount, 0, sizeof(info->icount));
2432
2433 /* program hardware for current parameters */
2434 change_params(info);
2435
Alan Cox8fb06c72008-07-16 21:56:46 +01002436 if (info->port.tty)
2437 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002438
Alan Cox8fb06c72008-07-16 21:56:46 +01002439 info->port.flags |= ASYNC_INITIALIZED;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002440
2441 return 0;
2442}
2443
2444/*
2445 * called by close() and hangup() to shutdown hardware
2446 */
2447static void shutdown(struct slgt_info *info)
2448{
2449 unsigned long flags;
2450
Alan Cox8fb06c72008-07-16 21:56:46 +01002451 if (!(info->port.flags & ASYNC_INITIALIZED))
Paul Fulghum705b6c72006-01-08 01:02:06 -08002452 return;
2453
2454 DBGINFO(("%s shutdown\n", info->device_name));
2455
2456 /* clear status wait queue because status changes */
2457 /* can't happen after shutting down the hardware */
2458 wake_up_interruptible(&info->status_event_wait_q);
2459 wake_up_interruptible(&info->event_wait_q);
2460
2461 del_timer_sync(&info->tx_timer);
2462 del_timer_sync(&info->rx_timer);
2463
2464 kfree(info->tx_buf);
2465 info->tx_buf = NULL;
2466
2467 spin_lock_irqsave(&info->lock,flags);
2468
2469 tx_stop(info);
2470 rx_stop(info);
2471
2472 slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
2473
Alan Cox8fb06c72008-07-16 21:56:46 +01002474 if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08002475 info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
2476 set_signals(info);
2477 }
2478
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002479 flush_cond_wait(&info->gpio_wait_q);
2480
Paul Fulghum705b6c72006-01-08 01:02:06 -08002481 spin_unlock_irqrestore(&info->lock,flags);
2482
Alan Cox8fb06c72008-07-16 21:56:46 +01002483 if (info->port.tty)
2484 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002485
Alan Cox8fb06c72008-07-16 21:56:46 +01002486 info->port.flags &= ~ASYNC_INITIALIZED;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002487}
2488
2489static void program_hw(struct slgt_info *info)
2490{
2491 unsigned long flags;
2492
2493 spin_lock_irqsave(&info->lock,flags);
2494
2495 rx_stop(info);
2496 tx_stop(info);
2497
Paul Fulghumcb10dc92006-09-30 23:27:45 -07002498 if (info->params.mode != MGSL_MODE_ASYNC ||
Paul Fulghum705b6c72006-01-08 01:02:06 -08002499 info->netcount)
Paul Fulghumcb10dc92006-09-30 23:27:45 -07002500 sync_mode(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002501 else
2502 async_mode(info);
2503
2504 set_signals(info);
2505
2506 info->dcd_chkcount = 0;
2507 info->cts_chkcount = 0;
2508 info->ri_chkcount = 0;
2509 info->dsr_chkcount = 0;
2510
Paul Fulghuma6b2f872009-01-15 13:50:57 -08002511 slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002512 get_signals(info);
2513
2514 if (info->netcount ||
Alan Cox8fb06c72008-07-16 21:56:46 +01002515 (info->port.tty && info->port.tty->termios->c_cflag & CREAD))
Paul Fulghum705b6c72006-01-08 01:02:06 -08002516 rx_start(info);
2517
2518 spin_unlock_irqrestore(&info->lock,flags);
2519}
2520
2521/*
2522 * reconfigure adapter based on new parameters
2523 */
2524static void change_params(struct slgt_info *info)
2525{
2526 unsigned cflag;
2527 int bits_per_char;
2528
Alan Cox8fb06c72008-07-16 21:56:46 +01002529 if (!info->port.tty || !info->port.tty->termios)
Paul Fulghum705b6c72006-01-08 01:02:06 -08002530 return;
2531 DBGINFO(("%s change_params\n", info->device_name));
2532
Alan Cox8fb06c72008-07-16 21:56:46 +01002533 cflag = info->port.tty->termios->c_cflag;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002534
2535 /* if B0 rate (hangup) specified then negate DTR and RTS */
2536 /* otherwise assert DTR and RTS */
2537 if (cflag & CBAUD)
2538 info->signals |= SerialSignal_RTS + SerialSignal_DTR;
2539 else
2540 info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
2541
2542 /* byte size and parity */
2543
2544 switch (cflag & CSIZE) {
2545 case CS5: info->params.data_bits = 5; break;
2546 case CS6: info->params.data_bits = 6; break;
2547 case CS7: info->params.data_bits = 7; break;
2548 case CS8: info->params.data_bits = 8; break;
2549 default: info->params.data_bits = 7; break;
2550 }
2551
2552 info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
2553
2554 if (cflag & PARENB)
2555 info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
2556 else
2557 info->params.parity = ASYNC_PARITY_NONE;
2558
2559 /* calculate number of jiffies to transmit a full
2560 * FIFO (32 bytes) at specified data rate
2561 */
2562 bits_per_char = info->params.data_bits +
2563 info->params.stop_bits + 1;
2564
Alan Cox8fb06c72008-07-16 21:56:46 +01002565 info->params.data_rate = tty_get_baud_rate(info->port.tty);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002566
2567 if (info->params.data_rate) {
2568 info->timeout = (32*HZ*bits_per_char) /
2569 info->params.data_rate;
2570 }
2571 info->timeout += HZ/50; /* Add .02 seconds of slop */
2572
2573 if (cflag & CRTSCTS)
Alan Cox8fb06c72008-07-16 21:56:46 +01002574 info->port.flags |= ASYNC_CTS_FLOW;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002575 else
Alan Cox8fb06c72008-07-16 21:56:46 +01002576 info->port.flags &= ~ASYNC_CTS_FLOW;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002577
2578 if (cflag & CLOCAL)
Alan Cox8fb06c72008-07-16 21:56:46 +01002579 info->port.flags &= ~ASYNC_CHECK_CD;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002580 else
Alan Cox8fb06c72008-07-16 21:56:46 +01002581 info->port.flags |= ASYNC_CHECK_CD;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002582
2583 /* process tty input control flags */
2584
2585 info->read_status_mask = IRQ_RXOVER;
Alan Cox8fb06c72008-07-16 21:56:46 +01002586 if (I_INPCK(info->port.tty))
Paul Fulghum705b6c72006-01-08 01:02:06 -08002587 info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
Alan Cox8fb06c72008-07-16 21:56:46 +01002588 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
Paul Fulghum705b6c72006-01-08 01:02:06 -08002589 info->read_status_mask |= MASK_BREAK;
Alan Cox8fb06c72008-07-16 21:56:46 +01002590 if (I_IGNPAR(info->port.tty))
Paul Fulghum705b6c72006-01-08 01:02:06 -08002591 info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
Alan Cox8fb06c72008-07-16 21:56:46 +01002592 if (I_IGNBRK(info->port.tty)) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08002593 info->ignore_status_mask |= MASK_BREAK;
2594 /* If ignoring parity and break indicators, ignore
2595 * overruns too. (For real raw support).
2596 */
Alan Cox8fb06c72008-07-16 21:56:46 +01002597 if (I_IGNPAR(info->port.tty))
Paul Fulghum705b6c72006-01-08 01:02:06 -08002598 info->ignore_status_mask |= MASK_OVERRUN;
2599 }
2600
2601 program_hw(info);
2602}
2603
2604static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
2605{
2606 DBGINFO(("%s get_stats\n", info->device_name));
2607 if (!user_icount) {
2608 memset(&info->icount, 0, sizeof(info->icount));
2609 } else {
2610 if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
2611 return -EFAULT;
2612 }
2613 return 0;
2614}
2615
2616static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
2617{
2618 DBGINFO(("%s get_params\n", info->device_name));
2619 if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
2620 return -EFAULT;
2621 return 0;
2622}
2623
2624static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
2625{
2626 unsigned long flags;
2627 MGSL_PARAMS tmp_params;
2628
2629 DBGINFO(("%s set_params\n", info->device_name));
2630 if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
2631 return -EFAULT;
2632
2633 spin_lock_irqsave(&info->lock, flags);
Paul Fulghum1f807692009-04-02 16:58:30 -07002634 if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
2635 info->base_clock = tmp_params.clock_speed;
2636 else
2637 memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
Paul Fulghum705b6c72006-01-08 01:02:06 -08002638 spin_unlock_irqrestore(&info->lock, flags);
2639
Paul Fulghum1f807692009-04-02 16:58:30 -07002640 program_hw(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002641
2642 return 0;
2643}
2644
2645static int get_txidle(struct slgt_info *info, int __user *idle_mode)
2646{
2647 DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
2648 if (put_user(info->idle_mode, idle_mode))
2649 return -EFAULT;
2650 return 0;
2651}
2652
2653static int set_txidle(struct slgt_info *info, int idle_mode)
2654{
2655 unsigned long flags;
2656 DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
2657 spin_lock_irqsave(&info->lock,flags);
2658 info->idle_mode = idle_mode;
Paul Fulghum643f3312006-06-25 05:49:20 -07002659 if (info->params.mode != MGSL_MODE_ASYNC)
2660 tx_set_idle(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08002661 spin_unlock_irqrestore(&info->lock,flags);
2662 return 0;
2663}
2664
2665static int tx_enable(struct slgt_info *info, int enable)
2666{
2667 unsigned long flags;
2668 DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
2669 spin_lock_irqsave(&info->lock,flags);
2670 if (enable) {
2671 if (!info->tx_enabled)
2672 tx_start(info);
2673 } else {
2674 if (info->tx_enabled)
2675 tx_stop(info);
2676 }
2677 spin_unlock_irqrestore(&info->lock,flags);
2678 return 0;
2679}
2680
2681/*
2682 * abort transmit HDLC frame
2683 */
2684static int tx_abort(struct slgt_info *info)
2685{
2686 unsigned long flags;
2687 DBGINFO(("%s tx_abort\n", info->device_name));
2688 spin_lock_irqsave(&info->lock,flags);
2689 tdma_reset(info);
2690 spin_unlock_irqrestore(&info->lock,flags);
2691 return 0;
2692}
2693
2694static int rx_enable(struct slgt_info *info, int enable)
2695{
2696 unsigned long flags;
Paul Fulghum814dae02008-07-22 11:22:14 +01002697 unsigned int rbuf_fill_level;
2698 DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable));
Paul Fulghum705b6c72006-01-08 01:02:06 -08002699 spin_lock_irqsave(&info->lock,flags);
Paul Fulghum814dae02008-07-22 11:22:14 +01002700 /*
2701 * enable[31..16] = receive DMA buffer fill level
2702 * 0 = noop (leave fill level unchanged)
2703 * fill level must be multiple of 4 and <= buffer size
2704 */
2705 rbuf_fill_level = ((unsigned int)enable) >> 16;
2706 if (rbuf_fill_level) {
Paul Fulghumc68a99c2008-07-22 11:23:24 +01002707 if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) {
2708 spin_unlock_irqrestore(&info->lock, flags);
Paul Fulghum814dae02008-07-22 11:22:14 +01002709 return -EINVAL;
Paul Fulghumc68a99c2008-07-22 11:23:24 +01002710 }
Paul Fulghum814dae02008-07-22 11:22:14 +01002711 info->rbuf_fill_level = rbuf_fill_level;
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +01002712 if (rbuf_fill_level < 128)
2713 info->rx_pio = 1; /* PIO mode */
2714 else
2715 info->rx_pio = 0; /* DMA mode */
Paul Fulghum814dae02008-07-22 11:22:14 +01002716 rx_stop(info); /* restart receiver to use new fill level */
2717 }
2718
2719 /*
2720 * enable[1..0] = receiver enable command
2721 * 0 = disable
2722 * 1 = enable
2723 * 2 = enable or force hunt mode if already enabled
2724 */
2725 enable &= 3;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002726 if (enable) {
2727 if (!info->rx_enabled)
2728 rx_start(info);
Paul Fulghumcb10dc92006-09-30 23:27:45 -07002729 else if (enable == 2) {
2730 /* force hunt mode (write 1 to RCR[3]) */
2731 wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3);
2732 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08002733 } else {
2734 if (info->rx_enabled)
2735 rx_stop(info);
2736 }
2737 spin_unlock_irqrestore(&info->lock,flags);
2738 return 0;
2739}
2740
2741/*
2742 * wait for specified event to occur
2743 */
2744static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
2745{
2746 unsigned long flags;
2747 int s;
2748 int rc=0;
2749 struct mgsl_icount cprev, cnow;
2750 int events;
2751 int mask;
2752 struct _input_signal_events oldsigs, newsigs;
2753 DECLARE_WAITQUEUE(wait, current);
2754
2755 if (get_user(mask, mask_ptr))
2756 return -EFAULT;
2757
2758 DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
2759
2760 spin_lock_irqsave(&info->lock,flags);
2761
2762 /* return immediately if state matches requested events */
2763 get_signals(info);
2764 s = info->signals;
2765
2766 events = mask &
2767 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2768 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2769 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2770 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2771 if (events) {
2772 spin_unlock_irqrestore(&info->lock,flags);
2773 goto exit;
2774 }
2775
2776 /* save current irq counts */
2777 cprev = info->icount;
2778 oldsigs = info->input_signal_events;
2779
2780 /* enable hunt and idle irqs if needed */
2781 if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
2782 unsigned short val = rd_reg16(info, SCR);
2783 if (!(val & IRQ_RXIDLE))
2784 wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
2785 }
2786
2787 set_current_state(TASK_INTERRUPTIBLE);
2788 add_wait_queue(&info->event_wait_q, &wait);
2789
2790 spin_unlock_irqrestore(&info->lock,flags);
2791
2792 for(;;) {
2793 schedule();
2794 if (signal_pending(current)) {
2795 rc = -ERESTARTSYS;
2796 break;
2797 }
2798
2799 /* get current irq counts */
2800 spin_lock_irqsave(&info->lock,flags);
2801 cnow = info->icount;
2802 newsigs = info->input_signal_events;
2803 set_current_state(TASK_INTERRUPTIBLE);
2804 spin_unlock_irqrestore(&info->lock,flags);
2805
2806 /* if no change, wait aborted for some reason */
2807 if (newsigs.dsr_up == oldsigs.dsr_up &&
2808 newsigs.dsr_down == oldsigs.dsr_down &&
2809 newsigs.dcd_up == oldsigs.dcd_up &&
2810 newsigs.dcd_down == oldsigs.dcd_down &&
2811 newsigs.cts_up == oldsigs.cts_up &&
2812 newsigs.cts_down == oldsigs.cts_down &&
2813 newsigs.ri_up == oldsigs.ri_up &&
2814 newsigs.ri_down == oldsigs.ri_down &&
2815 cnow.exithunt == cprev.exithunt &&
2816 cnow.rxidle == cprev.rxidle) {
2817 rc = -EIO;
2818 break;
2819 }
2820
2821 events = mask &
2822 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2823 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2824 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2825 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2826 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2827 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2828 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2829 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2830 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2831 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2832 if (events)
2833 break;
2834
2835 cprev = cnow;
2836 oldsigs = newsigs;
2837 }
2838
2839 remove_wait_queue(&info->event_wait_q, &wait);
2840 set_current_state(TASK_RUNNING);
2841
2842
2843 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2844 spin_lock_irqsave(&info->lock,flags);
2845 if (!waitqueue_active(&info->event_wait_q)) {
2846 /* disable enable exit hunt mode/idle rcvd IRQs */
2847 wr_reg16(info, SCR,
2848 (unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
2849 }
2850 spin_unlock_irqrestore(&info->lock,flags);
2851 }
2852exit:
2853 if (rc == 0)
2854 rc = put_user(events, mask_ptr);
2855 return rc;
2856}
2857
2858static int get_interface(struct slgt_info *info, int __user *if_mode)
2859{
2860 DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
2861 if (put_user(info->if_mode, if_mode))
2862 return -EFAULT;
2863 return 0;
2864}
2865
2866static int set_interface(struct slgt_info *info, int if_mode)
2867{
2868 unsigned long flags;
Paul Fulghum35fbd392006-01-18 17:42:24 -08002869 unsigned short val;
Paul Fulghum705b6c72006-01-08 01:02:06 -08002870
2871 DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
2872 spin_lock_irqsave(&info->lock,flags);
2873 info->if_mode = if_mode;
2874
2875 msc_set_vcr(info);
2876
2877 /* TCR (tx control) 07 1=RTS driver control */
2878 val = rd_reg16(info, TCR);
2879 if (info->if_mode & MGSL_INTERFACE_RTS_EN)
2880 val |= BIT7;
2881 else
2882 val &= ~BIT7;
2883 wr_reg16(info, TCR, val);
2884
2885 spin_unlock_irqrestore(&info->lock,flags);
2886 return 0;
2887}
2888
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002889/*
2890 * set general purpose IO pin state and direction
2891 *
2892 * user_gpio fields:
2893 * state each bit indicates a pin state
2894 * smask set bit indicates pin state to set
2895 * dir each bit indicates a pin direction (0=input, 1=output)
2896 * dmask set bit indicates pin direction to set
2897 */
2898static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2899{
2900 unsigned long flags;
2901 struct gpio_desc gpio;
2902 __u32 data;
2903
2904 if (!info->gpio_present)
2905 return -EINVAL;
2906 if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2907 return -EFAULT;
2908 DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
2909 info->device_name, gpio.state, gpio.smask,
2910 gpio.dir, gpio.dmask));
2911
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002912 spin_lock_irqsave(&info->port_array[0]->lock, flags);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002913 if (gpio.dmask) {
2914 data = rd_reg32(info, IODR);
2915 data |= gpio.dmask & gpio.dir;
2916 data &= ~(gpio.dmask & ~gpio.dir);
2917 wr_reg32(info, IODR, data);
2918 }
2919 if (gpio.smask) {
2920 data = rd_reg32(info, IOVR);
2921 data |= gpio.smask & gpio.state;
2922 data &= ~(gpio.smask & ~gpio.state);
2923 wr_reg32(info, IOVR, data);
2924 }
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07002925 spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08002926
2927 return 0;
2928}
2929
2930/*
2931 * get general purpose IO pin state and direction
2932 */
2933static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2934{
2935 struct gpio_desc gpio;
2936 if (!info->gpio_present)
2937 return -EINVAL;
2938 gpio.state = rd_reg32(info, IOVR);
2939 gpio.smask = 0xffffffff;
2940 gpio.dir = rd_reg32(info, IODR);
2941 gpio.dmask = 0xffffffff;
2942 if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
2943 return -EFAULT;
2944 DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
2945 info->device_name, gpio.state, gpio.dir));
2946 return 0;
2947}
2948
2949/*
2950 * conditional wait facility
2951 */
2952static void init_cond_wait(struct cond_wait *w, unsigned int data)
2953{
2954 init_waitqueue_head(&w->q);
2955 init_waitqueue_entry(&w->wait, current);
2956 w->data = data;
2957}
2958
2959static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
2960{
2961 set_current_state(TASK_INTERRUPTIBLE);
2962 add_wait_queue(&w->q, &w->wait);
2963 w->next = *head;
2964 *head = w;
2965}
2966
2967static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
2968{
2969 struct cond_wait *w, *prev;
2970 remove_wait_queue(&cw->q, &cw->wait);
2971 set_current_state(TASK_RUNNING);
2972 for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
2973 if (w == cw) {
2974 if (prev != NULL)
2975 prev->next = w->next;
2976 else
2977 *head = w->next;
2978 break;
2979 }
2980 }
2981}
2982
2983static void flush_cond_wait(struct cond_wait **head)
2984{
2985 while (*head != NULL) {
2986 wake_up_interruptible(&(*head)->q);
2987 *head = (*head)->next;
2988 }
2989}
2990
2991/*
2992 * wait for general purpose I/O pin(s) to enter specified state
2993 *
2994 * user_gpio fields:
2995 * state - bit indicates target pin state
2996 * smask - set bit indicates watched pin
2997 *
2998 * The wait ends when at least one watched pin enters the specified
2999 * state. When 0 (no error) is returned, user_gpio->state is set to the
3000 * state of all GPIO pins when the wait ends.
3001 *
3002 * Note: Each pin may be a dedicated input, dedicated output, or
3003 * configurable input/output. The number and configuration of pins
3004 * varies with the specific adapter model. Only input pins (dedicated
3005 * or configured) can be monitored with this function.
3006 */
3007static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
3008{
3009 unsigned long flags;
3010 int rc = 0;
3011 struct gpio_desc gpio;
3012 struct cond_wait wait;
3013 u32 state;
3014
3015 if (!info->gpio_present)
3016 return -EINVAL;
3017 if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
3018 return -EFAULT;
3019 DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
3020 info->device_name, gpio.state, gpio.smask));
3021 /* ignore output pins identified by set IODR bit */
3022 if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
3023 return -EINVAL;
3024 init_cond_wait(&wait, gpio.smask);
3025
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07003026 spin_lock_irqsave(&info->port_array[0]->lock, flags);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08003027 /* enable interrupts for watched pins */
3028 wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
3029 /* get current pin states */
3030 state = rd_reg32(info, IOVR);
3031
3032 if (gpio.smask & ~(state ^ gpio.state)) {
3033 /* already in target state */
3034 gpio.state = state;
3035 } else {
3036 /* wait for target state */
3037 add_cond_wait(&info->gpio_wait_q, &wait);
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07003038 spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08003039 schedule();
3040 if (signal_pending(current))
3041 rc = -ERESTARTSYS;
3042 else
3043 gpio.state = wait.data;
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07003044 spin_lock_irqsave(&info->port_array[0]->lock, flags);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08003045 remove_cond_wait(&info->gpio_wait_q, &wait);
3046 }
3047
3048 /* disable all GPIO interrupts if no waiting processes */
3049 if (info->gpio_wait_q == NULL)
3050 wr_reg32(info, IOER, 0);
Paul Fulghumffd7d6b2010-10-27 15:34:20 -07003051 spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08003052
3053 if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
3054 rc = -EFAULT;
3055 return rc;
3056}
3057
Paul Fulghum705b6c72006-01-08 01:02:06 -08003058static int modem_input_wait(struct slgt_info *info,int arg)
3059{
3060 unsigned long flags;
3061 int rc;
3062 struct mgsl_icount cprev, cnow;
3063 DECLARE_WAITQUEUE(wait, current);
3064
3065 /* save current irq counts */
3066 spin_lock_irqsave(&info->lock,flags);
3067 cprev = info->icount;
3068 add_wait_queue(&info->status_event_wait_q, &wait);
3069 set_current_state(TASK_INTERRUPTIBLE);
3070 spin_unlock_irqrestore(&info->lock,flags);
3071
3072 for(;;) {
3073 schedule();
3074 if (signal_pending(current)) {
3075 rc = -ERESTARTSYS;
3076 break;
3077 }
3078
3079 /* get new irq counts */
3080 spin_lock_irqsave(&info->lock,flags);
3081 cnow = info->icount;
3082 set_current_state(TASK_INTERRUPTIBLE);
3083 spin_unlock_irqrestore(&info->lock,flags);
3084
3085 /* if no change, wait aborted for some reason */
3086 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
3087 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
3088 rc = -EIO;
3089 break;
3090 }
3091
3092 /* check for change in caller specified modem input */
3093 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
3094 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
3095 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
3096 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
3097 rc = 0;
3098 break;
3099 }
3100
3101 cprev = cnow;
3102 }
3103 remove_wait_queue(&info->status_event_wait_q, &wait);
3104 set_current_state(TASK_RUNNING);
3105 return rc;
3106}
3107
3108/*
3109 * return state of serial control and status signals
3110 */
3111static int tiocmget(struct tty_struct *tty, struct file *file)
3112{
3113 struct slgt_info *info = tty->driver_data;
3114 unsigned int result;
3115 unsigned long flags;
3116
3117 spin_lock_irqsave(&info->lock,flags);
3118 get_signals(info);
3119 spin_unlock_irqrestore(&info->lock,flags);
3120
3121 result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
3122 ((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
3123 ((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
3124 ((info->signals & SerialSignal_RI) ? TIOCM_RNG:0) +
3125 ((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
3126 ((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
3127
3128 DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
3129 return result;
3130}
3131
3132/*
3133 * set modem control signals (DTR/RTS)
3134 *
3135 * cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
3136 * TIOCMSET = set/clear signal values
3137 * value bit mask for command
3138 */
3139static int tiocmset(struct tty_struct *tty, struct file *file,
3140 unsigned int set, unsigned int clear)
3141{
3142 struct slgt_info *info = tty->driver_data;
3143 unsigned long flags;
3144
3145 DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
3146
3147 if (set & TIOCM_RTS)
3148 info->signals |= SerialSignal_RTS;
3149 if (set & TIOCM_DTR)
3150 info->signals |= SerialSignal_DTR;
3151 if (clear & TIOCM_RTS)
3152 info->signals &= ~SerialSignal_RTS;
3153 if (clear & TIOCM_DTR)
3154 info->signals &= ~SerialSignal_DTR;
3155
3156 spin_lock_irqsave(&info->lock,flags);
3157 set_signals(info);
3158 spin_unlock_irqrestore(&info->lock,flags);
3159 return 0;
3160}
3161
Alan Cox31f35932009-01-02 13:45:05 +00003162static int carrier_raised(struct tty_port *port)
3163{
3164 unsigned long flags;
3165 struct slgt_info *info = container_of(port, struct slgt_info, port);
3166
3167 spin_lock_irqsave(&info->lock,flags);
3168 get_signals(info);
3169 spin_unlock_irqrestore(&info->lock,flags);
3170 return (info->signals & SerialSignal_DCD) ? 1 : 0;
3171}
3172
Alan Coxfcc8ac12009-06-11 12:24:17 +01003173static void dtr_rts(struct tty_port *port, int on)
Alan Cox5d951fb2009-01-02 13:45:19 +00003174{
3175 unsigned long flags;
3176 struct slgt_info *info = container_of(port, struct slgt_info, port);
3177
3178 spin_lock_irqsave(&info->lock,flags);
Alan Coxfcc8ac12009-06-11 12:24:17 +01003179 if (on)
3180 info->signals |= SerialSignal_RTS + SerialSignal_DTR;
3181 else
3182 info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
Alan Cox5d951fb2009-01-02 13:45:19 +00003183 set_signals(info);
3184 spin_unlock_irqrestore(&info->lock,flags);
3185}
3186
3187
Paul Fulghum705b6c72006-01-08 01:02:06 -08003188/*
3189 * block current process until the device is ready to open
3190 */
3191static int block_til_ready(struct tty_struct *tty, struct file *filp,
3192 struct slgt_info *info)
3193{
3194 DECLARE_WAITQUEUE(wait, current);
3195 int retval;
Joe Perches0fab6de2008-04-28 02:14:02 -07003196 bool do_clocal = false;
3197 bool extra_count = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003198 unsigned long flags;
Alan Cox31f35932009-01-02 13:45:05 +00003199 int cd;
3200 struct tty_port *port = &info->port;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003201
3202 DBGINFO(("%s block_til_ready\n", tty->driver->name));
3203
3204 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3205 /* nonblock mode is set or port is not enabled */
Alan Cox31f35932009-01-02 13:45:05 +00003206 port->flags |= ASYNC_NORMAL_ACTIVE;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003207 return 0;
3208 }
3209
3210 if (tty->termios->c_cflag & CLOCAL)
Joe Perches0fab6de2008-04-28 02:14:02 -07003211 do_clocal = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003212
3213 /* Wait for carrier detect and the line to become
3214 * free (i.e., not in use by the callout). While we are in
Alan Cox31f35932009-01-02 13:45:05 +00003215 * this loop, port->count is dropped by one, so that
Paul Fulghum705b6c72006-01-08 01:02:06 -08003216 * close() knows when to free things. We restore it upon
3217 * exit, either normal or abnormal.
3218 */
3219
3220 retval = 0;
Alan Cox31f35932009-01-02 13:45:05 +00003221 add_wait_queue(&port->open_wait, &wait);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003222
3223 spin_lock_irqsave(&info->lock, flags);
3224 if (!tty_hung_up_p(filp)) {
Joe Perches0fab6de2008-04-28 02:14:02 -07003225 extra_count = true;
Alan Cox31f35932009-01-02 13:45:05 +00003226 port->count--;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003227 }
3228 spin_unlock_irqrestore(&info->lock, flags);
Alan Cox31f35932009-01-02 13:45:05 +00003229 port->blocked_open++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003230
3231 while (1) {
Alan Cox5d951fb2009-01-02 13:45:19 +00003232 if ((tty->termios->c_cflag & CBAUD))
3233 tty_port_raise_dtr_rts(port);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003234
3235 set_current_state(TASK_INTERRUPTIBLE);
3236
Alan Cox31f35932009-01-02 13:45:05 +00003237 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3238 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
Paul Fulghum705b6c72006-01-08 01:02:06 -08003239 -EAGAIN : -ERESTARTSYS;
3240 break;
3241 }
3242
Alan Cox31f35932009-01-02 13:45:05 +00003243 cd = tty_port_carrier_raised(port);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003244
Alan Cox31f35932009-01-02 13:45:05 +00003245 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd ))
Paul Fulghum705b6c72006-01-08 01:02:06 -08003246 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003247
3248 if (signal_pending(current)) {
3249 retval = -ERESTARTSYS;
3250 break;
3251 }
3252
3253 DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
Arnd Bergmanne142a312010-06-01 22:53:10 +02003254 tty_unlock();
Paul Fulghum705b6c72006-01-08 01:02:06 -08003255 schedule();
Arnd Bergmanne142a312010-06-01 22:53:10 +02003256 tty_lock();
Paul Fulghum705b6c72006-01-08 01:02:06 -08003257 }
3258
3259 set_current_state(TASK_RUNNING);
Alan Cox31f35932009-01-02 13:45:05 +00003260 remove_wait_queue(&port->open_wait, &wait);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003261
3262 if (extra_count)
Alan Cox31f35932009-01-02 13:45:05 +00003263 port->count++;
3264 port->blocked_open--;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003265
3266 if (!retval)
Alan Cox31f35932009-01-02 13:45:05 +00003267 port->flags |= ASYNC_NORMAL_ACTIVE;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003268
3269 DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
3270 return retval;
3271}
3272
3273static int alloc_tmp_rbuf(struct slgt_info *info)
3274{
Paul Fulghum04b374d2006-06-25 05:49:21 -07003275 info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003276 if (info->tmp_rbuf == NULL)
3277 return -ENOMEM;
3278 return 0;
3279}
3280
3281static void free_tmp_rbuf(struct slgt_info *info)
3282{
3283 kfree(info->tmp_rbuf);
3284 info->tmp_rbuf = NULL;
3285}
3286
3287/*
3288 * allocate DMA descriptor lists.
3289 */
3290static int alloc_desc(struct slgt_info *info)
3291{
3292 unsigned int i;
3293 unsigned int pbufs;
3294
3295 /* allocate memory to hold descriptor lists */
3296 info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr);
3297 if (info->bufs == NULL)
3298 return -ENOMEM;
3299
3300 memset(info->bufs, 0, DESC_LIST_SIZE);
3301
3302 info->rbufs = (struct slgt_desc*)info->bufs;
3303 info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
3304
3305 pbufs = (unsigned int)info->bufs_dma_addr;
3306
3307 /*
3308 * Build circular lists of descriptors
3309 */
3310
3311 for (i=0; i < info->rbuf_count; i++) {
3312 /* physical address of this descriptor */
3313 info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
3314
3315 /* physical address of next descriptor */
3316 if (i == info->rbuf_count - 1)
3317 info->rbufs[i].next = cpu_to_le32(pbufs);
3318 else
3319 info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
3320 set_desc_count(info->rbufs[i], DMABUFSIZE);
3321 }
3322
3323 for (i=0; i < info->tbuf_count; i++) {
3324 /* physical address of this descriptor */
3325 info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
3326
3327 /* physical address of next descriptor */
3328 if (i == info->tbuf_count - 1)
3329 info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
3330 else
3331 info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
3332 }
3333
3334 return 0;
3335}
3336
3337static void free_desc(struct slgt_info *info)
3338{
3339 if (info->bufs != NULL) {
3340 pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr);
3341 info->bufs = NULL;
3342 info->rbufs = NULL;
3343 info->tbufs = NULL;
3344 }
3345}
3346
3347static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3348{
3349 int i;
3350 for (i=0; i < count; i++) {
3351 if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL)
3352 return -ENOMEM;
3353 bufs[i].pbuf = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
3354 }
3355 return 0;
3356}
3357
3358static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3359{
3360 int i;
3361 for (i=0; i < count; i++) {
3362 if (bufs[i].buf == NULL)
3363 continue;
3364 pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr);
3365 bufs[i].buf = NULL;
3366 }
3367}
3368
3369static int alloc_dma_bufs(struct slgt_info *info)
3370{
3371 info->rbuf_count = 32;
3372 info->tbuf_count = 32;
3373
3374 if (alloc_desc(info) < 0 ||
3375 alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
3376 alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
3377 alloc_tmp_rbuf(info) < 0) {
3378 DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
3379 return -ENOMEM;
3380 }
3381 reset_rbufs(info);
3382 return 0;
3383}
3384
3385static void free_dma_bufs(struct slgt_info *info)
3386{
3387 if (info->bufs) {
3388 free_bufs(info, info->rbufs, info->rbuf_count);
3389 free_bufs(info, info->tbufs, info->tbuf_count);
3390 free_desc(info);
3391 }
3392 free_tmp_rbuf(info);
3393}
3394
3395static int claim_resources(struct slgt_info *info)
3396{
3397 if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
3398 DBGERR(("%s reg addr conflict, addr=%08X\n",
3399 info->device_name, info->phys_reg_addr));
3400 info->init_error = DiagStatus_AddressConflict;
3401 goto errout;
3402 }
3403 else
Joe Perches0fab6de2008-04-28 02:14:02 -07003404 info->reg_addr_requested = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003405
Alan Cox24cb2332008-04-30 00:54:19 -07003406 info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003407 if (!info->reg_addr) {
3408 DBGERR(("%s cant map device registers, addr=%08X\n",
3409 info->device_name, info->phys_reg_addr));
3410 info->init_error = DiagStatus_CantAssignPciResources;
3411 goto errout;
3412 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08003413 return 0;
3414
3415errout:
3416 release_resources(info);
3417 return -ENODEV;
3418}
3419
3420static void release_resources(struct slgt_info *info)
3421{
3422 if (info->irq_requested) {
3423 free_irq(info->irq_level, info);
Joe Perches0fab6de2008-04-28 02:14:02 -07003424 info->irq_requested = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003425 }
3426
3427 if (info->reg_addr_requested) {
3428 release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
Joe Perches0fab6de2008-04-28 02:14:02 -07003429 info->reg_addr_requested = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003430 }
3431
3432 if (info->reg_addr) {
Paul Fulghum0c8365e2006-01-11 12:17:39 -08003433 iounmap(info->reg_addr);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003434 info->reg_addr = NULL;
3435 }
3436}
3437
3438/* Add the specified device instance data structure to the
3439 * global linked list of devices and increment the device count.
3440 */
3441static void add_device(struct slgt_info *info)
3442{
3443 char *devstr;
3444
3445 info->next_device = NULL;
3446 info->line = slgt_device_count;
3447 sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
3448
3449 if (info->line < MAX_DEVICES) {
3450 if (maxframe[info->line])
3451 info->max_frame_size = maxframe[info->line];
Paul Fulghum705b6c72006-01-08 01:02:06 -08003452 }
3453
3454 slgt_device_count++;
3455
3456 if (!slgt_device_list)
3457 slgt_device_list = info;
3458 else {
3459 struct slgt_info *current_dev = slgt_device_list;
3460 while(current_dev->next_device)
3461 current_dev = current_dev->next_device;
3462 current_dev->next_device = info;
3463 }
3464
3465 if (info->max_frame_size < 4096)
3466 info->max_frame_size = 4096;
3467 else if (info->max_frame_size > 65535)
3468 info->max_frame_size = 65535;
3469
3470 switch(info->pdev->device) {
3471 case SYNCLINK_GT_DEVICE_ID:
3472 devstr = "GT";
3473 break;
Paul Fulghum6f84be82006-06-25 05:49:22 -07003474 case SYNCLINK_GT2_DEVICE_ID:
3475 devstr = "GT2";
3476 break;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003477 case SYNCLINK_GT4_DEVICE_ID:
3478 devstr = "GT4";
3479 break;
3480 case SYNCLINK_AC_DEVICE_ID:
3481 devstr = "AC";
3482 info->params.mode = MGSL_MODE_ASYNC;
3483 break;
3484 default:
3485 devstr = "(unknown model)";
3486 }
3487 printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
3488 devstr, info->device_name, info->phys_reg_addr,
3489 info->irq_level, info->max_frame_size);
3490
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08003491#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -08003492 hdlcdev_init(info);
3493#endif
3494}
3495
Alan Cox31f35932009-01-02 13:45:05 +00003496static const struct tty_port_operations slgt_port_ops = {
3497 .carrier_raised = carrier_raised,
Alan Coxfcc8ac12009-06-11 12:24:17 +01003498 .dtr_rts = dtr_rts,
Alan Cox31f35932009-01-02 13:45:05 +00003499};
3500
Paul Fulghum705b6c72006-01-08 01:02:06 -08003501/*
3502 * allocate device instance structure, return NULL on failure
3503 */
3504static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3505{
3506 struct slgt_info *info;
3507
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07003508 info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003509
3510 if (!info) {
3511 DBGERR(("%s device alloc failed adapter=%d port=%d\n",
3512 driver_name, adapter_num, port_num));
3513 } else {
Alan Cox44b7d1b2008-07-16 21:57:18 +01003514 tty_port_init(&info->port);
Alan Cox31f35932009-01-02 13:45:05 +00003515 info->port.ops = &slgt_port_ops;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003516 info->magic = MGSL_MAGIC;
David Howellsc4028952006-11-22 14:57:56 +00003517 INIT_WORK(&info->task, bh_handler);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003518 info->max_frame_size = 4096;
Paul Fulghum1f807692009-04-02 16:58:30 -07003519 info->base_clock = 14745600;
Paul Fulghum814dae02008-07-22 11:22:14 +01003520 info->rbuf_fill_level = DMABUFSIZE;
Alan Cox44b7d1b2008-07-16 21:57:18 +01003521 info->port.close_delay = 5*HZ/10;
3522 info->port.closing_wait = 30*HZ;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003523 init_waitqueue_head(&info->status_event_wait_q);
3524 init_waitqueue_head(&info->event_wait_q);
3525 spin_lock_init(&info->netlock);
3526 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
3527 info->idle_mode = HDLC_TXIDLE_FLAGS;
3528 info->adapter_num = adapter_num;
3529 info->port_num = port_num;
3530
Jiri Slaby40565f12007-02-12 00:52:31 -08003531 setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
3532 setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003533
3534 /* Copy configuration info to device instance data */
3535 info->pdev = pdev;
3536 info->irq_level = pdev->irq;
3537 info->phys_reg_addr = pci_resource_start(pdev,0);
3538
Paul Fulghum705b6c72006-01-08 01:02:06 -08003539 info->bus_type = MGSL_BUS_TYPE_PCI;
Thomas Gleixner0f2ed4c2006-07-01 19:29:33 -07003540 info->irq_flags = IRQF_SHARED;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003541
3542 info->init_error = -1; /* assume error, set to 0 on successful init */
3543 }
3544
3545 return info;
3546}
3547
3548static void device_init(int adapter_num, struct pci_dev *pdev)
3549{
3550 struct slgt_info *port_array[SLGT_MAX_PORTS];
3551 int i;
3552 int port_count = 1;
3553
Paul Fulghum6f84be82006-06-25 05:49:22 -07003554 if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
3555 port_count = 2;
3556 else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
Paul Fulghum705b6c72006-01-08 01:02:06 -08003557 port_count = 4;
3558
3559 /* allocate device instances for all ports */
3560 for (i=0; i < port_count; ++i) {
3561 port_array[i] = alloc_dev(adapter_num, i, pdev);
3562 if (port_array[i] == NULL) {
3563 for (--i; i >= 0; --i)
3564 kfree(port_array[i]);
3565 return;
3566 }
3567 }
3568
3569 /* give copy of port_array to all ports and add to device list */
3570 for (i=0; i < port_count; ++i) {
3571 memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
3572 add_device(port_array[i]);
3573 port_array[i]->port_count = port_count;
3574 spin_lock_init(&port_array[i]->lock);
3575 }
3576
3577 /* Allocate and claim adapter resources */
3578 if (!claim_resources(port_array[0])) {
3579
3580 alloc_dma_bufs(port_array[0]);
3581
3582 /* copy resource information from first port to others */
3583 for (i = 1; i < port_count; ++i) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08003584 port_array[i]->irq_level = port_array[0]->irq_level;
3585 port_array[i]->reg_addr = port_array[0]->reg_addr;
3586 alloc_dma_bufs(port_array[i]);
3587 }
3588
3589 if (request_irq(port_array[0]->irq_level,
3590 slgt_interrupt,
3591 port_array[0]->irq_flags,
3592 port_array[0]->device_name,
3593 port_array[0]) < 0) {
3594 DBGERR(("%s request_irq failed IRQ=%d\n",
3595 port_array[0]->device_name,
3596 port_array[0]->irq_level));
3597 } else {
Joe Perches0fab6de2008-04-28 02:14:02 -07003598 port_array[0]->irq_requested = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003599 adapter_test(port_array[0]);
Paul Fulghum0080b7a2006-03-28 01:56:15 -08003600 for (i=1 ; i < port_count ; i++) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08003601 port_array[i]->init_error = port_array[0]->init_error;
Paul Fulghum0080b7a2006-03-28 01:56:15 -08003602 port_array[i]->gpio_present = port_array[0]->gpio_present;
3603 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08003604 }
3605 }
Paul Fulghum62eb5b12007-05-08 00:31:48 -07003606
3607 for (i=0; i < port_count; ++i)
3608 tty_register_device(serial_driver, port_array[i]->line, &(port_array[i]->pdev->dev));
Paul Fulghum705b6c72006-01-08 01:02:06 -08003609}
3610
3611static int __devinit init_one(struct pci_dev *dev,
3612 const struct pci_device_id *ent)
3613{
3614 if (pci_enable_device(dev)) {
3615 printk("error enabling pci device %p\n", dev);
3616 return -EIO;
3617 }
3618 pci_set_master(dev);
3619 device_init(slgt_device_count, dev);
3620 return 0;
3621}
3622
3623static void __devexit remove_one(struct pci_dev *dev)
3624{
3625}
3626
Jeff Dikeb68e31d2006-10-02 02:17:18 -07003627static const struct tty_operations ops = {
Paul Fulghum705b6c72006-01-08 01:02:06 -08003628 .open = open,
3629 .close = close,
3630 .write = write,
3631 .put_char = put_char,
3632 .flush_chars = flush_chars,
3633 .write_room = write_room,
3634 .chars_in_buffer = chars_in_buffer,
3635 .flush_buffer = flush_buffer,
3636 .ioctl = ioctl,
Paul Fulghum2acdb162007-05-10 22:22:43 -07003637 .compat_ioctl = slgt_compat_ioctl,
Paul Fulghum705b6c72006-01-08 01:02:06 -08003638 .throttle = throttle,
3639 .unthrottle = unthrottle,
3640 .send_xchar = send_xchar,
3641 .break_ctl = set_break,
3642 .wait_until_sent = wait_until_sent,
Paul Fulghum705b6c72006-01-08 01:02:06 -08003643 .set_termios = set_termios,
3644 .stop = tx_hold,
3645 .start = tx_release,
3646 .hangup = hangup,
3647 .tiocmget = tiocmget,
3648 .tiocmset = tiocmset,
Alan Cox05871022010-09-16 18:21:52 +01003649 .get_icount = get_icount,
Alexey Dobriyana18c56e2009-03-31 15:19:19 -07003650 .proc_fops = &synclink_gt_proc_fops,
Paul Fulghum705b6c72006-01-08 01:02:06 -08003651};
3652
3653static void slgt_cleanup(void)
3654{
3655 int rc;
3656 struct slgt_info *info;
3657 struct slgt_info *tmp;
3658
Paul Fulghuma6b2f872009-01-15 13:50:57 -08003659 printk(KERN_INFO "unload %s\n", driver_name);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003660
3661 if (serial_driver) {
Paul Fulghum62eb5b12007-05-08 00:31:48 -07003662 for (info=slgt_device_list ; info != NULL ; info=info->next_device)
3663 tty_unregister_device(serial_driver, info->line);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003664 if ((rc = tty_unregister_driver(serial_driver)))
3665 DBGERR(("tty_unregister_driver error=%d\n", rc));
3666 put_tty_driver(serial_driver);
3667 }
3668
3669 /* reset devices */
3670 info = slgt_device_list;
3671 while(info) {
3672 reset_port(info);
3673 info = info->next_device;
3674 }
3675
3676 /* release devices */
3677 info = slgt_device_list;
3678 while(info) {
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08003679#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -08003680 hdlcdev_exit(info);
3681#endif
3682 free_dma_bufs(info);
3683 free_tmp_rbuf(info);
3684 if (info->port_num == 0)
3685 release_resources(info);
3686 tmp = info;
3687 info = info->next_device;
3688 kfree(tmp);
3689 }
3690
3691 if (pci_registered)
3692 pci_unregister_driver(&pci_driver);
3693}
3694
3695/*
3696 * Driver initialization entry point.
3697 */
3698static int __init slgt_init(void)
3699{
3700 int rc;
3701
Paul Fulghuma6b2f872009-01-15 13:50:57 -08003702 printk(KERN_INFO "%s\n", driver_name);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003703
Paul Fulghum705b6c72006-01-08 01:02:06 -08003704 serial_driver = alloc_tty_driver(MAX_DEVICES);
3705 if (!serial_driver) {
Paul Fulghum62eb5b12007-05-08 00:31:48 -07003706 printk("%s can't allocate tty driver\n", driver_name);
3707 return -ENOMEM;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003708 }
3709
3710 /* Initialize the tty_driver structure */
3711
3712 serial_driver->owner = THIS_MODULE;
3713 serial_driver->driver_name = tty_driver_name;
3714 serial_driver->name = tty_dev_prefix;
3715 serial_driver->major = ttymajor;
3716 serial_driver->minor_start = 64;
3717 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
3718 serial_driver->subtype = SERIAL_TYPE_NORMAL;
3719 serial_driver->init_termios = tty_std_termios;
3720 serial_driver->init_termios.c_cflag =
3721 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
Alan Cox606d0992006-12-08 02:38:45 -08003722 serial_driver->init_termios.c_ispeed = 9600;
3723 serial_driver->init_termios.c_ospeed = 9600;
Paul Fulghum62eb5b12007-05-08 00:31:48 -07003724 serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003725 tty_set_operations(serial_driver, &ops);
3726 if ((rc = tty_register_driver(serial_driver)) < 0) {
3727 DBGERR(("%s can't register serial driver\n", driver_name));
3728 put_tty_driver(serial_driver);
3729 serial_driver = NULL;
3730 goto error;
3731 }
3732
Paul Fulghuma6b2f872009-01-15 13:50:57 -08003733 printk(KERN_INFO "%s, tty major#%d\n",
3734 driver_name, serial_driver->major);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003735
Paul Fulghum62eb5b12007-05-08 00:31:48 -07003736 slgt_device_count = 0;
3737 if ((rc = pci_register_driver(&pci_driver)) < 0) {
3738 printk("%s pci_register_driver error=%d\n", driver_name, rc);
3739 goto error;
3740 }
Joe Perches0fab6de2008-04-28 02:14:02 -07003741 pci_registered = true;
Paul Fulghum62eb5b12007-05-08 00:31:48 -07003742
3743 if (!slgt_device_list)
3744 printk("%s no devices found\n",driver_name);
3745
Paul Fulghum705b6c72006-01-08 01:02:06 -08003746 return 0;
3747
3748error:
3749 slgt_cleanup();
3750 return rc;
3751}
3752
3753static void __exit slgt_exit(void)
3754{
3755 slgt_cleanup();
3756}
3757
3758module_init(slgt_init);
3759module_exit(slgt_exit);
3760
3761/*
3762 * register access routines
3763 */
3764
3765#define CALC_REGADDR() \
3766 unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
3767 if (addr >= 0x80) \
3768 reg_addr += (info->port_num) * 32;
3769
3770static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
3771{
3772 CALC_REGADDR();
3773 return readb((void __iomem *)reg_addr);
3774}
3775
3776static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
3777{
3778 CALC_REGADDR();
3779 writeb(value, (void __iomem *)reg_addr);
3780}
3781
3782static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
3783{
3784 CALC_REGADDR();
3785 return readw((void __iomem *)reg_addr);
3786}
3787
3788static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
3789{
3790 CALC_REGADDR();
3791 writew(value, (void __iomem *)reg_addr);
3792}
3793
3794static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
3795{
3796 CALC_REGADDR();
3797 return readl((void __iomem *)reg_addr);
3798}
3799
3800static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
3801{
3802 CALC_REGADDR();
3803 writel(value, (void __iomem *)reg_addr);
3804}
3805
3806static void rdma_reset(struct slgt_info *info)
3807{
3808 unsigned int i;
3809
3810 /* set reset bit */
3811 wr_reg32(info, RDCSR, BIT1);
3812
3813 /* wait for enable bit cleared */
3814 for(i=0 ; i < 1000 ; i++)
3815 if (!(rd_reg32(info, RDCSR) & BIT0))
3816 break;
3817}
3818
3819static void tdma_reset(struct slgt_info *info)
3820{
3821 unsigned int i;
3822
3823 /* set reset bit */
3824 wr_reg32(info, TDCSR, BIT1);
3825
3826 /* wait for enable bit cleared */
3827 for(i=0 ; i < 1000 ; i++)
3828 if (!(rd_reg32(info, TDCSR) & BIT0))
3829 break;
3830}
3831
3832/*
3833 * enable internal loopback
3834 * TxCLK and RxCLK are generated from BRG
3835 * and TxD is looped back to RxD internally.
3836 */
3837static void enable_loopback(struct slgt_info *info)
3838{
3839 /* SCR (serial control) BIT2=looopback enable */
3840 wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
3841
3842 if (info->params.mode != MGSL_MODE_ASYNC) {
3843 /* CCR (clock control)
3844 * 07..05 tx clock source (010 = BRG)
3845 * 04..02 rx clock source (010 = BRG)
3846 * 01 auxclk enable (0 = disable)
3847 * 00 BRG enable (1 = enable)
3848 *
3849 * 0100 1001
3850 */
3851 wr_reg8(info, CCR, 0x49);
3852
3853 /* set speed if available, otherwise use default */
3854 if (info->params.clock_speed)
3855 set_rate(info, info->params.clock_speed);
3856 else
3857 set_rate(info, 3686400);
3858 }
3859}
3860
3861/*
3862 * set baud rate generator to specified rate
3863 */
3864static void set_rate(struct slgt_info *info, u32 rate)
3865{
3866 unsigned int div;
Paul Fulghum1f807692009-04-02 16:58:30 -07003867 unsigned int osc = info->base_clock;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003868
3869 /* div = osc/rate - 1
3870 *
3871 * Round div up if osc/rate is not integer to
3872 * force to next slowest rate.
3873 */
3874
3875 if (rate) {
3876 div = osc/rate;
3877 if (!(osc % rate) && div)
3878 div--;
3879 wr_reg16(info, BDR, (unsigned short)div);
3880 }
3881}
3882
3883static void rx_stop(struct slgt_info *info)
3884{
3885 unsigned short val;
3886
3887 /* disable and reset receiver */
3888 val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
3889 wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3890 wr_reg16(info, RCR, val); /* clear reset bit */
3891
3892 slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
3893
3894 /* clear pending rx interrupts */
3895 wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
3896
3897 rdma_reset(info);
3898
Joe Perches0fab6de2008-04-28 02:14:02 -07003899 info->rx_enabled = false;
3900 info->rx_restart = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003901}
3902
3903static void rx_start(struct slgt_info *info)
3904{
3905 unsigned short val;
3906
3907 slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
3908
3909 /* clear pending rx overrun IRQ */
3910 wr_reg16(info, SSR, IRQ_RXOVER);
3911
3912 /* reset and disable receiver */
3913 val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
3914 wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3915 wr_reg16(info, RCR, val); /* clear reset bit */
3916
3917 rdma_reset(info);
3918 reset_rbufs(info);
3919
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +01003920 if (info->rx_pio) {
3921 /* rx request when rx FIFO not empty */
3922 wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
3923 slgt_irq_on(info, IRQ_RXDATA);
3924 if (info->params.mode == MGSL_MODE_ASYNC) {
3925 /* enable saving of rx status */
3926 wr_reg32(info, RDCSR, BIT6);
3927 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08003928 } else {
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +01003929 /* rx request when rx FIFO half full */
3930 wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
3931 /* set 1st descriptor address */
3932 wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
3933
3934 if (info->params.mode != MGSL_MODE_ASYNC) {
3935 /* enable rx DMA and DMA interrupt */
3936 wr_reg32(info, RDCSR, (BIT2 + BIT0));
3937 } else {
3938 /* enable saving of rx status, rx DMA and DMA interrupt */
3939 wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
3940 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08003941 }
3942
3943 slgt_irq_on(info, IRQ_RXOVER);
3944
3945 /* enable receiver */
3946 wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
3947
Joe Perches0fab6de2008-04-28 02:14:02 -07003948 info->rx_restart = false;
3949 info->rx_enabled = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003950}
3951
3952static void tx_start(struct slgt_info *info)
3953{
3954 if (!info->tx_enabled) {
3955 wr_reg16(info, TCR,
Paul Fulghumcb10dc92006-09-30 23:27:45 -07003956 (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
Joe Perches0fab6de2008-04-28 02:14:02 -07003957 info->tx_enabled = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003958 }
3959
Paul Fulghumde538eb2009-12-09 12:31:39 -08003960 if (desc_count(info->tbufs[info->tbuf_start])) {
Joe Perches0fab6de2008-04-28 02:14:02 -07003961 info->drop_rts_on_tx_done = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003962
3963 if (info->params.mode != MGSL_MODE_ASYNC) {
3964 if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
3965 get_signals(info);
3966 if (!(info->signals & SerialSignal_RTS)) {
3967 info->signals |= SerialSignal_RTS;
3968 set_signals(info);
Joe Perches0fab6de2008-04-28 02:14:02 -07003969 info->drop_rts_on_tx_done = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003970 }
3971 }
3972
3973 slgt_irq_off(info, IRQ_TXDATA);
3974 slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
3975 /* clear tx idle and underrun status bits */
3976 wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
Paul Fulghum705b6c72006-01-08 01:02:06 -08003977 } else {
Paul Fulghum705b6c72006-01-08 01:02:06 -08003978 slgt_irq_off(info, IRQ_TXDATA);
3979 slgt_irq_on(info, IRQ_TXIDLE);
3980 /* clear tx idle status bit */
3981 wr_reg16(info, SSR, IRQ_TXIDLE);
Paul Fulghum705b6c72006-01-08 01:02:06 -08003982 }
Paul Fulghumce892942009-06-24 18:34:51 +01003983 /* set 1st descriptor address and start DMA */
3984 wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
3985 wr_reg32(info, TDCSR, BIT2 + BIT0);
Joe Perches0fab6de2008-04-28 02:14:02 -07003986 info->tx_active = true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08003987 }
3988}
3989
3990static void tx_stop(struct slgt_info *info)
3991{
3992 unsigned short val;
3993
3994 del_timer(&info->tx_timer);
3995
3996 tdma_reset(info);
3997
3998 /* reset and disable transmitter */
3999 val = rd_reg16(info, TCR) & ~BIT1; /* clear enable bit */
4000 wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
Paul Fulghum705b6c72006-01-08 01:02:06 -08004001
4002 slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
4003
4004 /* clear tx idle and underrun status bit */
4005 wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
4006
4007 reset_tbufs(info);
4008
Joe Perches0fab6de2008-04-28 02:14:02 -07004009 info->tx_enabled = false;
4010 info->tx_active = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004011}
4012
4013static void reset_port(struct slgt_info *info)
4014{
4015 if (!info->reg_addr)
4016 return;
4017
4018 tx_stop(info);
4019 rx_stop(info);
4020
4021 info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
4022 set_signals(info);
4023
4024 slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4025}
4026
4027static void reset_adapter(struct slgt_info *info)
4028{
4029 int i;
4030 for (i=0; i < info->port_count; ++i) {
4031 if (info->port_array[i])
4032 reset_port(info->port_array[i]);
4033 }
4034}
4035
4036static void async_mode(struct slgt_info *info)
4037{
4038 unsigned short val;
4039
4040 slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4041 tx_stop(info);
4042 rx_stop(info);
4043
4044 /* TCR (tx control)
4045 *
4046 * 15..13 mode, 010=async
4047 * 12..10 encoding, 000=NRZ
4048 * 09 parity enable
4049 * 08 1=odd parity, 0=even parity
4050 * 07 1=RTS driver control
4051 * 06 1=break enable
4052 * 05..04 character length
4053 * 00=5 bits
4054 * 01=6 bits
4055 * 10=7 bits
4056 * 11=8 bits
4057 * 03 0=1 stop bit, 1=2 stop bits
4058 * 02 reset
4059 * 01 enable
4060 * 00 auto-CTS enable
4061 */
4062 val = 0x4000;
4063
4064 if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4065 val |= BIT7;
4066
4067 if (info->params.parity != ASYNC_PARITY_NONE) {
4068 val |= BIT9;
4069 if (info->params.parity == ASYNC_PARITY_ODD)
4070 val |= BIT8;
4071 }
4072
4073 switch (info->params.data_bits)
4074 {
4075 case 6: val |= BIT4; break;
4076 case 7: val |= BIT5; break;
4077 case 8: val |= BIT5 + BIT4; break;
4078 }
4079
4080 if (info->params.stop_bits != 1)
4081 val |= BIT3;
4082
4083 if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4084 val |= BIT0;
4085
4086 wr_reg16(info, TCR, val);
4087
4088 /* RCR (rx control)
4089 *
4090 * 15..13 mode, 010=async
4091 * 12..10 encoding, 000=NRZ
4092 * 09 parity enable
4093 * 08 1=odd parity, 0=even parity
4094 * 07..06 reserved, must be 0
4095 * 05..04 character length
4096 * 00=5 bits
4097 * 01=6 bits
4098 * 10=7 bits
4099 * 11=8 bits
4100 * 03 reserved, must be zero
4101 * 02 reset
4102 * 01 enable
4103 * 00 auto-DCD enable
4104 */
4105 val = 0x4000;
4106
4107 if (info->params.parity != ASYNC_PARITY_NONE) {
4108 val |= BIT9;
4109 if (info->params.parity == ASYNC_PARITY_ODD)
4110 val |= BIT8;
4111 }
4112
4113 switch (info->params.data_bits)
4114 {
4115 case 6: val |= BIT4; break;
4116 case 7: val |= BIT5; break;
4117 case 8: val |= BIT5 + BIT4; break;
4118 }
4119
4120 if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4121 val |= BIT0;
4122
4123 wr_reg16(info, RCR, val);
4124
4125 /* CCR (clock control)
4126 *
4127 * 07..05 011 = tx clock source is BRG/16
4128 * 04..02 010 = rx clock source is BRG
4129 * 01 0 = auxclk disabled
4130 * 00 1 = BRG enabled
4131 *
4132 * 0110 1001
4133 */
4134 wr_reg8(info, CCR, 0x69);
4135
4136 msc_set_vcr(info);
4137
Paul Fulghum705b6c72006-01-08 01:02:06 -08004138 /* SCR (serial control)
4139 *
4140 * 15 1=tx req on FIFO half empty
4141 * 14 1=rx req on FIFO half full
4142 * 13 tx data IRQ enable
4143 * 12 tx idle IRQ enable
4144 * 11 rx break on IRQ enable
4145 * 10 rx data IRQ enable
4146 * 09 rx break off IRQ enable
4147 * 08 overrun IRQ enable
4148 * 07 DSR IRQ enable
4149 * 06 CTS IRQ enable
4150 * 05 DCD IRQ enable
4151 * 04 RI IRQ enable
Paul Fulghum1f807692009-04-02 16:58:30 -07004152 * 03 0=16x sampling, 1=8x sampling
Paul Fulghum705b6c72006-01-08 01:02:06 -08004153 * 02 1=txd->rxd internal loopback enable
4154 * 01 reserved, must be zero
4155 * 00 1=master IRQ enable
4156 */
4157 val = BIT15 + BIT14 + BIT0;
Paul Fulghum1f807692009-04-02 16:58:30 -07004158 /* JCR[8] : 1 = x8 async mode feature available */
4159 if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
4160 ((info->base_clock < (info->params.data_rate * 16)) ||
4161 (info->base_clock % (info->params.data_rate * 16)))) {
4162 /* use 8x sampling */
4163 val |= BIT3;
4164 set_rate(info, info->params.data_rate * 8);
4165 } else {
4166 /* use 16x sampling */
4167 set_rate(info, info->params.data_rate * 16);
4168 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08004169 wr_reg16(info, SCR, val);
4170
4171 slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
4172
Paul Fulghum705b6c72006-01-08 01:02:06 -08004173 if (info->params.loopback)
4174 enable_loopback(info);
4175}
4176
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004177static void sync_mode(struct slgt_info *info)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004178{
4179 unsigned short val;
4180
4181 slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4182 tx_stop(info);
4183 rx_stop(info);
4184
4185 /* TCR (tx control)
4186 *
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004187 * 15..13 mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
Paul Fulghum705b6c72006-01-08 01:02:06 -08004188 * 12..10 encoding
4189 * 09 CRC enable
4190 * 08 CRC32
4191 * 07 1=RTS driver control
4192 * 06 preamble enable
4193 * 05..04 preamble length
4194 * 03 share open/close flag
4195 * 02 reset
4196 * 01 enable
4197 * 00 auto-CTS enable
4198 */
Paul Fulghum993456c2008-07-22 11:22:04 +01004199 val = BIT2;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004200
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004201 switch(info->params.mode) {
4202 case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4203 case MGSL_MODE_BISYNC: val |= BIT15; break;
4204 case MGSL_MODE_RAW: val |= BIT13; break;
4205 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08004206 if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4207 val |= BIT7;
4208
4209 switch(info->params.encoding)
4210 {
4211 case HDLC_ENCODING_NRZB: val |= BIT10; break;
4212 case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
4213 case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
4214 case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
4215 case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4216 case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4217 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4218 }
4219
Paul Fulghum04b374d2006-06-25 05:49:21 -07004220 switch (info->params.crc_type & HDLC_CRC_MASK)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004221 {
4222 case HDLC_CRC_16_CCITT: val |= BIT9; break;
4223 case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4224 }
4225
4226 if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
4227 val |= BIT6;
4228
4229 switch (info->params.preamble_length)
4230 {
4231 case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
4232 case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
4233 case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
4234 }
4235
4236 if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4237 val |= BIT0;
4238
4239 wr_reg16(info, TCR, val);
4240
4241 /* TPR (transmit preamble) */
4242
4243 switch (info->params.preamble)
4244 {
4245 case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
4246 case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break;
4247 case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
4248 case HDLC_PREAMBLE_PATTERN_10: val = 0x55; break;
4249 case HDLC_PREAMBLE_PATTERN_01: val = 0xaa; break;
4250 default: val = 0x7e; break;
4251 }
4252 wr_reg8(info, TPR, (unsigned char)val);
4253
4254 /* RCR (rx control)
4255 *
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004256 * 15..13 mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
Paul Fulghum705b6c72006-01-08 01:02:06 -08004257 * 12..10 encoding
4258 * 09 CRC enable
4259 * 08 CRC32
4260 * 07..03 reserved, must be 0
4261 * 02 reset
4262 * 01 enable
4263 * 00 auto-DCD enable
4264 */
4265 val = 0;
4266
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004267 switch(info->params.mode) {
4268 case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4269 case MGSL_MODE_BISYNC: val |= BIT15; break;
4270 case MGSL_MODE_RAW: val |= BIT13; break;
4271 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08004272
4273 switch(info->params.encoding)
4274 {
4275 case HDLC_ENCODING_NRZB: val |= BIT10; break;
4276 case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
4277 case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
4278 case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
4279 case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4280 case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4281 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4282 }
4283
Paul Fulghum04b374d2006-06-25 05:49:21 -07004284 switch (info->params.crc_type & HDLC_CRC_MASK)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004285 {
4286 case HDLC_CRC_16_CCITT: val |= BIT9; break;
4287 case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4288 }
4289
4290 if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4291 val |= BIT0;
4292
4293 wr_reg16(info, RCR, val);
4294
4295 /* CCR (clock control)
4296 *
4297 * 07..05 tx clock source
4298 * 04..02 rx clock source
4299 * 01 auxclk enable
4300 * 00 BRG enable
4301 */
4302 val = 0;
4303
4304 if (info->params.flags & HDLC_FLAG_TXC_BRG)
4305 {
4306 // when RxC source is DPLL, BRG generates 16X DPLL
4307 // reference clock, so take TxC from BRG/16 to get
4308 // transmit clock at actual data rate
4309 if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4310 val |= BIT6 + BIT5; /* 011, txclk = BRG/16 */
4311 else
4312 val |= BIT6; /* 010, txclk = BRG */
4313 }
4314 else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
4315 val |= BIT7; /* 100, txclk = DPLL Input */
4316 else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4317 val |= BIT5; /* 001, txclk = RXC Input */
4318
4319 if (info->params.flags & HDLC_FLAG_RXC_BRG)
4320 val |= BIT3; /* 010, rxclk = BRG */
4321 else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4322 val |= BIT4; /* 100, rxclk = DPLL */
4323 else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4324 val |= BIT2; /* 001, rxclk = TXC Input */
4325
4326 if (info->params.clock_speed)
4327 val |= BIT1 + BIT0;
4328
4329 wr_reg8(info, CCR, (unsigned char)val);
4330
4331 if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
4332 {
4333 // program DPLL mode
4334 switch(info->params.encoding)
4335 {
4336 case HDLC_ENCODING_BIPHASE_MARK:
4337 case HDLC_ENCODING_BIPHASE_SPACE:
4338 val = BIT7; break;
4339 case HDLC_ENCODING_BIPHASE_LEVEL:
4340 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
4341 val = BIT7 + BIT6; break;
4342 default: val = BIT6; // NRZ encodings
4343 }
4344 wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
4345
4346 // DPLL requires a 16X reference clock from BRG
4347 set_rate(info, info->params.clock_speed * 16);
4348 }
4349 else
4350 set_rate(info, info->params.clock_speed);
4351
4352 tx_set_idle(info);
4353
4354 msc_set_vcr(info);
4355
4356 /* SCR (serial control)
4357 *
4358 * 15 1=tx req on FIFO half empty
4359 * 14 1=rx req on FIFO half full
4360 * 13 tx data IRQ enable
4361 * 12 tx idle IRQ enable
4362 * 11 underrun IRQ enable
4363 * 10 rx data IRQ enable
4364 * 09 rx idle IRQ enable
4365 * 08 overrun IRQ enable
4366 * 07 DSR IRQ enable
4367 * 06 CTS IRQ enable
4368 * 05 DCD IRQ enable
4369 * 04 RI IRQ enable
4370 * 03 reserved, must be zero
4371 * 02 1=txd->rxd internal loopback enable
4372 * 01 reserved, must be zero
4373 * 00 1=master IRQ enable
4374 */
4375 wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
4376
4377 if (info->params.loopback)
4378 enable_loopback(info);
4379}
4380
4381/*
4382 * set transmit idle mode
4383 */
4384static void tx_set_idle(struct slgt_info *info)
4385{
Paul Fulghum643f3312006-06-25 05:49:20 -07004386 unsigned char val;
4387 unsigned short tcr;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004388
Paul Fulghum643f3312006-06-25 05:49:20 -07004389 /* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
4390 * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
4391 */
4392 tcr = rd_reg16(info, TCR);
4393 if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
4394 /* disable preamble, set idle size to 16 bits */
4395 tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
4396 /* MSB of 16 bit idle specified in tx preamble register (TPR) */
4397 wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
4398 } else if (!(tcr & BIT6)) {
4399 /* preamble is disabled, set idle size to 8 bits */
4400 tcr &= ~(BIT5 + BIT4);
4401 }
4402 wr_reg16(info, TCR, tcr);
4403
4404 if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
4405 /* LSB of custom tx idle specified in tx idle register */
4406 val = (unsigned char)(info->idle_mode & 0xff);
4407 } else {
4408 /* standard 8 bit idle patterns */
4409 switch(info->idle_mode)
4410 {
4411 case HDLC_TXIDLE_FLAGS: val = 0x7e; break;
4412 case HDLC_TXIDLE_ALT_ZEROS_ONES:
4413 case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
4414 case HDLC_TXIDLE_ZEROS:
4415 case HDLC_TXIDLE_SPACE: val = 0x00; break;
4416 default: val = 0xff;
4417 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08004418 }
4419
4420 wr_reg8(info, TIR, val);
4421}
4422
4423/*
4424 * get state of V24 status (input) signals
4425 */
4426static void get_signals(struct slgt_info *info)
4427{
4428 unsigned short status = rd_reg16(info, SSR);
4429
4430 /* clear all serial signals except DTR and RTS */
4431 info->signals &= SerialSignal_DTR + SerialSignal_RTS;
4432
4433 if (status & BIT3)
4434 info->signals |= SerialSignal_DSR;
4435 if (status & BIT2)
4436 info->signals |= SerialSignal_CTS;
4437 if (status & BIT1)
4438 info->signals |= SerialSignal_DCD;
4439 if (status & BIT0)
4440 info->signals |= SerialSignal_RI;
4441}
4442
4443/*
4444 * set V.24 Control Register based on current configuration
4445 */
4446static void msc_set_vcr(struct slgt_info *info)
4447{
4448 unsigned char val = 0;
4449
4450 /* VCR (V.24 control)
4451 *
4452 * 07..04 serial IF select
4453 * 03 DTR
4454 * 02 RTS
4455 * 01 LL
4456 * 00 RL
4457 */
4458
4459 switch(info->if_mode & MGSL_INTERFACE_MASK)
4460 {
4461 case MGSL_INTERFACE_RS232:
4462 val |= BIT5; /* 0010 */
4463 break;
4464 case MGSL_INTERFACE_V35:
4465 val |= BIT7 + BIT6 + BIT5; /* 1110 */
4466 break;
4467 case MGSL_INTERFACE_RS422:
4468 val |= BIT6; /* 0100 */
4469 break;
4470 }
4471
Paul Fulghume5590712008-07-22 11:21:39 +01004472 if (info->if_mode & MGSL_INTERFACE_MSB_FIRST)
4473 val |= BIT4;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004474 if (info->signals & SerialSignal_DTR)
4475 val |= BIT3;
4476 if (info->signals & SerialSignal_RTS)
4477 val |= BIT2;
4478 if (info->if_mode & MGSL_INTERFACE_LL)
4479 val |= BIT1;
4480 if (info->if_mode & MGSL_INTERFACE_RL)
4481 val |= BIT0;
4482 wr_reg8(info, VCR, val);
4483}
4484
4485/*
4486 * set state of V24 control (output) signals
4487 */
4488static void set_signals(struct slgt_info *info)
4489{
4490 unsigned char val = rd_reg8(info, VCR);
4491 if (info->signals & SerialSignal_DTR)
4492 val |= BIT3;
4493 else
4494 val &= ~BIT3;
4495 if (info->signals & SerialSignal_RTS)
4496 val |= BIT2;
4497 else
4498 val &= ~BIT2;
4499 wr_reg8(info, VCR, val);
4500}
4501
4502/*
4503 * free range of receive DMA buffers (i to last)
4504 */
4505static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
4506{
4507 int done = 0;
4508
4509 while(!done) {
4510 /* reset current buffer for reuse */
4511 info->rbufs[i].status = 0;
Paul Fulghum814dae02008-07-22 11:22:14 +01004512 set_desc_count(info->rbufs[i], info->rbuf_fill_level);
Paul Fulghum705b6c72006-01-08 01:02:06 -08004513 if (i == last)
4514 done = 1;
4515 if (++i == info->rbuf_count)
4516 i = 0;
4517 }
4518 info->rbuf_current = i;
4519}
4520
4521/*
4522 * mark all receive DMA buffers as free
4523 */
4524static void reset_rbufs(struct slgt_info *info)
4525{
4526 free_rbufs(info, 0, info->rbuf_count - 1);
Paul Fulghum5ba5a5d2009-06-11 12:28:37 +01004527 info->rbuf_fill_index = 0;
4528 info->rbuf_fill_count = 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004529}
4530
4531/*
4532 * pass receive HDLC frame to upper layer
4533 *
Joe Perches0fab6de2008-04-28 02:14:02 -07004534 * return true if frame available, otherwise false
Paul Fulghum705b6c72006-01-08 01:02:06 -08004535 */
Joe Perches0fab6de2008-04-28 02:14:02 -07004536static bool rx_get_frame(struct slgt_info *info)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004537{
4538 unsigned int start, end;
4539 unsigned short status;
4540 unsigned int framesize = 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004541 unsigned long flags;
Alan Cox8fb06c72008-07-16 21:56:46 +01004542 struct tty_struct *tty = info->port.tty;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004543 unsigned char addr_field = 0xff;
Paul Fulghum04b374d2006-06-25 05:49:21 -07004544 unsigned int crc_size = 0;
4545
4546 switch (info->params.crc_type & HDLC_CRC_MASK) {
4547 case HDLC_CRC_16_CCITT: crc_size = 2; break;
4548 case HDLC_CRC_32_CCITT: crc_size = 4; break;
4549 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08004550
4551check_again:
4552
4553 framesize = 0;
4554 addr_field = 0xff;
4555 start = end = info->rbuf_current;
4556
4557 for (;;) {
4558 if (!desc_complete(info->rbufs[end]))
4559 goto cleanup;
4560
4561 if (framesize == 0 && info->params.addr_filter != 0xff)
4562 addr_field = info->rbufs[end].buf[0];
4563
4564 framesize += desc_count(info->rbufs[end]);
4565
4566 if (desc_eof(info->rbufs[end]))
4567 break;
4568
4569 if (++end == info->rbuf_count)
4570 end = 0;
4571
4572 if (end == info->rbuf_current) {
4573 if (info->rx_enabled){
4574 spin_lock_irqsave(&info->lock,flags);
4575 rx_start(info);
4576 spin_unlock_irqrestore(&info->lock,flags);
4577 }
4578 goto cleanup;
4579 }
4580 }
4581
4582 /* status
4583 *
4584 * 15 buffer complete
4585 * 14..06 reserved
4586 * 05..04 residue
4587 * 02 eof (end of frame)
4588 * 01 CRC error
4589 * 00 abort
4590 */
4591 status = desc_status(info->rbufs[end]);
4592
4593 /* ignore CRC bit if not using CRC (bit is undefined) */
Paul Fulghum04b374d2006-06-25 05:49:21 -07004594 if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004595 status &= ~BIT1;
4596
4597 if (framesize == 0 ||
4598 (addr_field != 0xff && addr_field != info->params.addr_filter)) {
4599 free_rbufs(info, start, end);
4600 goto check_again;
4601 }
4602
Paul Fulghum04b374d2006-06-25 05:49:21 -07004603 if (framesize < (2 + crc_size) || status & BIT0) {
4604 info->icount.rxshort++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004605 framesize = 0;
Paul Fulghum04b374d2006-06-25 05:49:21 -07004606 } else if (status & BIT1) {
4607 info->icount.rxcrc++;
4608 if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
4609 framesize = 0;
4610 }
Paul Fulghum705b6c72006-01-08 01:02:06 -08004611
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08004612#if SYNCLINK_GENERIC_HDLC
Paul Fulghum04b374d2006-06-25 05:49:21 -07004613 if (framesize == 0) {
Krzysztof Halasa198191c2008-06-30 23:26:53 +02004614 info->netdev->stats.rx_errors++;
4615 info->netdev->stats.rx_frame_errors++;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004616 }
Paul Fulghum04b374d2006-06-25 05:49:21 -07004617#endif
Paul Fulghum705b6c72006-01-08 01:02:06 -08004618
4619 DBGBH(("%s rx frame status=%04X size=%d\n",
4620 info->device_name, status, framesize));
Paul Fulghum814dae02008-07-22 11:22:14 +01004621 DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx");
Paul Fulghum705b6c72006-01-08 01:02:06 -08004622
4623 if (framesize) {
Paul Fulghum04b374d2006-06-25 05:49:21 -07004624 if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
4625 framesize -= crc_size;
4626 crc_size = 0;
4627 }
4628
4629 if (framesize > info->max_frame_size + crc_size)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004630 info->icount.rxlong++;
4631 else {
4632 /* copy dma buffer(s) to contiguous temp buffer */
4633 int copy_count = framesize;
4634 int i = start;
4635 unsigned char *p = info->tmp_rbuf;
4636 info->tmp_rbuf_count = framesize;
4637
4638 info->icount.rxok++;
4639
4640 while(copy_count) {
Paul Fulghum814dae02008-07-22 11:22:14 +01004641 int partial_count = min_t(int, copy_count, info->rbuf_fill_level);
Paul Fulghum705b6c72006-01-08 01:02:06 -08004642 memcpy(p, info->rbufs[i].buf, partial_count);
4643 p += partial_count;
4644 copy_count -= partial_count;
4645 if (++i == info->rbuf_count)
4646 i = 0;
4647 }
4648
Paul Fulghum04b374d2006-06-25 05:49:21 -07004649 if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
4650 *p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
4651 framesize++;
4652 }
4653
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08004654#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -08004655 if (info->netcount)
4656 hdlcdev_rx(info,info->tmp_rbuf, framesize);
4657 else
4658#endif
4659 ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
4660 }
4661 }
4662 free_rbufs(info, start, end);
Joe Perches0fab6de2008-04-28 02:14:02 -07004663 return true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004664
4665cleanup:
Joe Perches0fab6de2008-04-28 02:14:02 -07004666 return false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004667}
4668
4669/*
4670 * pass receive buffer (RAW synchronous mode) to tty layer
Joe Perches0fab6de2008-04-28 02:14:02 -07004671 * return true if buffer available, otherwise false
Paul Fulghum705b6c72006-01-08 01:02:06 -08004672 */
Joe Perches0fab6de2008-04-28 02:14:02 -07004673static bool rx_get_buf(struct slgt_info *info)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004674{
4675 unsigned int i = info->rbuf_current;
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004676 unsigned int count;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004677
4678 if (!desc_complete(info->rbufs[i]))
Joe Perches0fab6de2008-04-28 02:14:02 -07004679 return false;
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004680 count = desc_count(info->rbufs[i]);
4681 switch(info->params.mode) {
4682 case MGSL_MODE_MONOSYNC:
4683 case MGSL_MODE_BISYNC:
4684 /* ignore residue in byte synchronous modes */
4685 if (desc_residue(info->rbufs[i]))
4686 count--;
4687 break;
4688 }
4689 DBGDATA(info, info->rbufs[i].buf, count, "rx");
4690 DBGINFO(("rx_get_buf size=%d\n", count));
4691 if (count)
Alan Cox8fb06c72008-07-16 21:56:46 +01004692 ldisc_receive_buf(info->port.tty, info->rbufs[i].buf,
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004693 info->flag_buf, count);
Paul Fulghum705b6c72006-01-08 01:02:06 -08004694 free_rbufs(info, i, i);
Joe Perches0fab6de2008-04-28 02:14:02 -07004695 return true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004696}
4697
4698static void reset_tbufs(struct slgt_info *info)
4699{
4700 unsigned int i;
4701 info->tbuf_current = 0;
4702 for (i=0 ; i < info->tbuf_count ; i++) {
4703 info->tbufs[i].status = 0;
4704 info->tbufs[i].count = 0;
4705 }
4706}
4707
4708/*
4709 * return number of free transmit DMA buffers
4710 */
4711static unsigned int free_tbuf_count(struct slgt_info *info)
4712{
4713 unsigned int count = 0;
4714 unsigned int i = info->tbuf_current;
4715
4716 do
4717 {
4718 if (desc_count(info->tbufs[i]))
4719 break; /* buffer in use */
4720 ++count;
4721 if (++i == info->tbuf_count)
4722 i=0;
4723 } while (i != info->tbuf_current);
4724
Paul Fulghumbb029c62007-07-31 00:37:35 -07004725 /* if tx DMA active, last zero count buffer is in use */
4726 if (count && (rd_reg32(info, TDCSR) & BIT0))
Paul Fulghum705b6c72006-01-08 01:02:06 -08004727 --count;
4728
4729 return count;
4730}
4731
4732/*
Paul Fulghum403214d2008-07-22 11:21:55 +01004733 * return number of bytes in unsent transmit DMA buffers
4734 * and the serial controller tx FIFO
4735 */
4736static unsigned int tbuf_bytes(struct slgt_info *info)
4737{
4738 unsigned int total_count = 0;
4739 unsigned int i = info->tbuf_current;
4740 unsigned int reg_value;
4741 unsigned int count;
4742 unsigned int active_buf_count = 0;
4743
4744 /*
4745 * Add descriptor counts for all tx DMA buffers.
4746 * If count is zero (cleared by DMA controller after read),
4747 * the buffer is complete or is actively being read from.
4748 *
4749 * Record buf_count of last buffer with zero count starting
4750 * from current ring position. buf_count is mirror
4751 * copy of count and is not cleared by serial controller.
4752 * If DMA controller is active, that buffer is actively
4753 * being read so add to total.
4754 */
4755 do {
4756 count = desc_count(info->tbufs[i]);
4757 if (count)
4758 total_count += count;
4759 else if (!total_count)
4760 active_buf_count = info->tbufs[i].buf_count;
4761 if (++i == info->tbuf_count)
4762 i = 0;
4763 } while (i != info->tbuf_current);
4764
4765 /* read tx DMA status register */
4766 reg_value = rd_reg32(info, TDCSR);
4767
4768 /* if tx DMA active, last zero count buffer is in use */
4769 if (reg_value & BIT0)
4770 total_count += active_buf_count;
4771
4772 /* add tx FIFO count = reg_value[15..8] */
4773 total_count += (reg_value >> 8) & 0xff;
4774
4775 /* if transmitter active add one byte for shift register */
4776 if (info->tx_active)
4777 total_count++;
4778
4779 return total_count;
4780}
4781
4782/*
Paul Fulghumde538eb2009-12-09 12:31:39 -08004783 * load data into transmit DMA buffer ring and start transmitter if needed
4784 * return true if data accepted, otherwise false (buffers full)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004785 */
Paul Fulghumde538eb2009-12-09 12:31:39 -08004786static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
Paul Fulghum705b6c72006-01-08 01:02:06 -08004787{
4788 unsigned short count;
4789 unsigned int i;
4790 struct slgt_desc *d;
4791
Paul Fulghumde538eb2009-12-09 12:31:39 -08004792 /* check required buffer space */
4793 if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
4794 return false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004795
4796 DBGDATA(info, buf, size, "tx");
4797
Paul Fulghumde538eb2009-12-09 12:31:39 -08004798 /*
4799 * copy data to one or more DMA buffers in circular ring
4800 * tbuf_start = first buffer for this data
4801 * tbuf_current = next free buffer
4802 *
4803 * Copy all data before making data visible to DMA controller by
4804 * setting descriptor count of the first buffer.
4805 * This prevents an active DMA controller from reading the first DMA
4806 * buffers of a frame and stopping before the final buffers are filled.
4807 */
4808
Paul Fulghum705b6c72006-01-08 01:02:06 -08004809 info->tbuf_start = i = info->tbuf_current;
4810
4811 while (size) {
4812 d = &info->tbufs[i];
Paul Fulghum705b6c72006-01-08 01:02:06 -08004813
4814 count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
4815 memcpy(d->buf, buf, count);
4816
4817 size -= count;
4818 buf += count;
4819
Paul Fulghumcb10dc92006-09-30 23:27:45 -07004820 /*
4821 * set EOF bit for last buffer of HDLC frame or
4822 * for every buffer in raw mode
4823 */
4824 if ((!size && info->params.mode == MGSL_MODE_HDLC) ||
4825 info->params.mode == MGSL_MODE_RAW)
4826 set_desc_eof(*d, 1);
Paul Fulghum705b6c72006-01-08 01:02:06 -08004827 else
4828 set_desc_eof(*d, 0);
4829
Paul Fulghumde538eb2009-12-09 12:31:39 -08004830 /* set descriptor count for all but first buffer */
4831 if (i != info->tbuf_start)
4832 set_desc_count(*d, count);
Paul Fulghum403214d2008-07-22 11:21:55 +01004833 d->buf_count = count;
Paul Fulghumde538eb2009-12-09 12:31:39 -08004834
4835 if (++i == info->tbuf_count)
4836 i = 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004837 }
4838
4839 info->tbuf_current = i;
Paul Fulghumde538eb2009-12-09 12:31:39 -08004840
4841 /* set first buffer count to make new data visible to DMA controller */
4842 d = &info->tbufs[info->tbuf_start];
4843 set_desc_count(*d, d->buf_count);
4844
4845 /* start transmitter if needed and update transmit timeout */
4846 if (!info->tx_active)
4847 tx_start(info);
4848 update_tx_timer(info);
4849
4850 return true;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004851}
4852
4853static int register_test(struct slgt_info *info)
4854{
4855 static unsigned short patterns[] =
4856 {0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
Kulikov Vasiliy7ea7c6d2010-06-28 15:54:48 +04004857 static unsigned int count = ARRAY_SIZE(patterns);
Paul Fulghum705b6c72006-01-08 01:02:06 -08004858 unsigned int i;
4859 int rc = 0;
4860
4861 for (i=0 ; i < count ; i++) {
4862 wr_reg16(info, TIR, patterns[i]);
4863 wr_reg16(info, BDR, patterns[(i+1)%count]);
4864 if ((rd_reg16(info, TIR) != patterns[i]) ||
4865 (rd_reg16(info, BDR) != patterns[(i+1)%count])) {
4866 rc = -ENODEV;
4867 break;
4868 }
4869 }
Paul Fulghum0080b7a2006-03-28 01:56:15 -08004870 info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004871 info->init_error = rc ? 0 : DiagStatus_AddressFailure;
4872 return rc;
4873}
4874
4875static int irq_test(struct slgt_info *info)
4876{
4877 unsigned long timeout;
4878 unsigned long flags;
Alan Cox8fb06c72008-07-16 21:56:46 +01004879 struct tty_struct *oldtty = info->port.tty;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004880 u32 speed = info->params.data_rate;
4881
4882 info->params.data_rate = 921600;
Alan Cox8fb06c72008-07-16 21:56:46 +01004883 info->port.tty = NULL;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004884
4885 spin_lock_irqsave(&info->lock, flags);
4886 async_mode(info);
4887 slgt_irq_on(info, IRQ_TXIDLE);
4888
4889 /* enable transmitter */
4890 wr_reg16(info, TCR,
4891 (unsigned short)(rd_reg16(info, TCR) | BIT1));
4892
4893 /* write one byte and wait for tx idle */
4894 wr_reg16(info, TDR, 0);
4895
4896 /* assume failure */
4897 info->init_error = DiagStatus_IrqFailure;
Joe Perches0fab6de2008-04-28 02:14:02 -07004898 info->irq_occurred = false;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004899
4900 spin_unlock_irqrestore(&info->lock, flags);
4901
4902 timeout=100;
4903 while(timeout-- && !info->irq_occurred)
4904 msleep_interruptible(10);
4905
4906 spin_lock_irqsave(&info->lock,flags);
4907 reset_port(info);
4908 spin_unlock_irqrestore(&info->lock,flags);
4909
4910 info->params.data_rate = speed;
Alan Cox8fb06c72008-07-16 21:56:46 +01004911 info->port.tty = oldtty;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004912
4913 info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
4914 return info->irq_occurred ? 0 : -ENODEV;
4915}
4916
4917static int loopback_test_rx(struct slgt_info *info)
4918{
4919 unsigned char *src, *dest;
4920 int count;
4921
4922 if (desc_complete(info->rbufs[0])) {
4923 count = desc_count(info->rbufs[0]);
4924 src = info->rbufs[0].buf;
4925 dest = info->tmp_rbuf;
4926
4927 for( ; count ; count-=2, src+=2) {
4928 /* src=data byte (src+1)=status byte */
4929 if (!(*(src+1) & (BIT9 + BIT8))) {
4930 *dest = *src;
4931 dest++;
4932 info->tmp_rbuf_count++;
4933 }
4934 }
4935 DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
4936 return 1;
4937 }
4938 return 0;
4939}
4940
4941static int loopback_test(struct slgt_info *info)
4942{
4943#define TESTFRAMESIZE 20
4944
4945 unsigned long timeout;
4946 u16 count = TESTFRAMESIZE;
4947 unsigned char buf[TESTFRAMESIZE];
4948 int rc = -ENODEV;
4949 unsigned long flags;
4950
Alan Cox8fb06c72008-07-16 21:56:46 +01004951 struct tty_struct *oldtty = info->port.tty;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004952 MGSL_PARAMS params;
4953
4954 memcpy(&params, &info->params, sizeof(params));
4955
4956 info->params.mode = MGSL_MODE_ASYNC;
4957 info->params.data_rate = 921600;
4958 info->params.loopback = 1;
Alan Cox8fb06c72008-07-16 21:56:46 +01004959 info->port.tty = NULL;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004960
4961 /* build and send transmit frame */
4962 for (count = 0; count < TESTFRAMESIZE; ++count)
4963 buf[count] = (unsigned char)count;
4964
4965 info->tmp_rbuf_count = 0;
4966 memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
4967
4968 /* program hardware for HDLC and enabled receiver */
4969 spin_lock_irqsave(&info->lock,flags);
4970 async_mode(info);
4971 rx_start(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08004972 tx_load(info, buf, count);
Paul Fulghum705b6c72006-01-08 01:02:06 -08004973 spin_unlock_irqrestore(&info->lock, flags);
4974
4975 /* wait for receive complete */
4976 for (timeout = 100; timeout; --timeout) {
4977 msleep_interruptible(10);
4978 if (loopback_test_rx(info)) {
4979 rc = 0;
4980 break;
4981 }
4982 }
4983
4984 /* verify received frame length and contents */
4985 if (!rc && (info->tmp_rbuf_count != count ||
4986 memcmp(buf, info->tmp_rbuf, count))) {
4987 rc = -ENODEV;
4988 }
4989
4990 spin_lock_irqsave(&info->lock,flags);
4991 reset_adapter(info);
4992 spin_unlock_irqrestore(&info->lock,flags);
4993
4994 memcpy(&info->params, &params, sizeof(info->params));
Alan Cox8fb06c72008-07-16 21:56:46 +01004995 info->port.tty = oldtty;
Paul Fulghum705b6c72006-01-08 01:02:06 -08004996
4997 info->init_error = rc ? DiagStatus_DmaFailure : 0;
4998 return rc;
4999}
5000
5001static int adapter_test(struct slgt_info *info)
5002{
5003 DBGINFO(("testing %s\n", info->device_name));
Paul Fulghum294dad02006-06-25 05:49:21 -07005004 if (register_test(info) < 0) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08005005 printk("register test failure %s addr=%08X\n",
5006 info->device_name, info->phys_reg_addr);
Paul Fulghum294dad02006-06-25 05:49:21 -07005007 } else if (irq_test(info) < 0) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08005008 printk("IRQ test failure %s IRQ=%d\n",
5009 info->device_name, info->irq_level);
Paul Fulghum294dad02006-06-25 05:49:21 -07005010 } else if (loopback_test(info) < 0) {
Paul Fulghum705b6c72006-01-08 01:02:06 -08005011 printk("loopback test failure %s\n", info->device_name);
5012 }
5013 return info->init_error;
5014}
5015
5016/*
5017 * transmit timeout handler
5018 */
5019static void tx_timeout(unsigned long context)
5020{
5021 struct slgt_info *info = (struct slgt_info*)context;
5022 unsigned long flags;
5023
5024 DBGINFO(("%s tx_timeout\n", info->device_name));
5025 if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
5026 info->icount.txtimeout++;
5027 }
5028 spin_lock_irqsave(&info->lock,flags);
Paul Fulghumce892942009-06-24 18:34:51 +01005029 tx_stop(info);
Paul Fulghum705b6c72006-01-08 01:02:06 -08005030 spin_unlock_irqrestore(&info->lock,flags);
5031
Paul Fulghumaf69c7f2006-12-06 20:40:24 -08005032#if SYNCLINK_GENERIC_HDLC
Paul Fulghum705b6c72006-01-08 01:02:06 -08005033 if (info->netcount)
5034 hdlcdev_tx_done(info);
5035 else
5036#endif
5037 bh_transmit(info);
5038}
5039
5040/*
5041 * receive buffer polling timer
5042 */
5043static void rx_timeout(unsigned long context)
5044{
5045 struct slgt_info *info = (struct slgt_info*)context;
5046 unsigned long flags;
5047
5048 DBGINFO(("%s rx_timeout\n", info->device_name));
5049 spin_lock_irqsave(&info->lock, flags);
5050 info->pending_bh |= BH_RECEIVE;
5051 spin_unlock_irqrestore(&info->lock, flags);
David Howellsc4028952006-11-22 14:57:56 +00005052 bh_handler(&info->task);
Paul Fulghum705b6c72006-01-08 01:02:06 -08005053}
5054