blob: 5182e800cc187d51bf259db76bd65c911e854968 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
49#include <linux/slab.h>
50#include <linux/init.h>
51#include <linux/rtnetlink.h>
52#include <linux/dma-mapping.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <net/irda/irda.h>
59#include <net/irda/wrapper.h>
60#include <net/irda/irda_device.h>
61#include "w83977af.h"
62#include "w83977af_ir.h"
63
64#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
65#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
67#endif
68#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */
69#define CONFIG_USE_W977_PNP /* Currently needed */
70#define PIO_MAX_SPEED 115200
71
72static char *driver_name = "w83977af_ir";
73static int qos_mtt_bits = 0x07; /* 1 ms or more */
74
75#define CHIP_IO_EXTENT 8
76
77static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
78#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
79static unsigned int irq[] = { 6, 0, 0, 0 };
80#else
81static unsigned int irq[] = { 11, 0, 0, 0 };
82#endif
83static unsigned int dma[] = { 1, 0, 0, 0 };
84static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
85static unsigned int efio = W977_EFIO_BASE;
86
87static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
88
89/* Some prototypes */
90static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
91 unsigned int dma);
92static int w83977af_close(struct w83977af_ir *self);
93static int w83977af_probe(int iobase, int irq, int dma);
94static int w83977af_dma_receive(struct w83977af_ir *self);
95static int w83977af_dma_receive_complete(struct w83977af_ir *self);
96static int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev);
97static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
98static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
99static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
100static int w83977af_is_receiving(struct w83977af_ir *self);
101
102static int w83977af_net_open(struct net_device *dev);
103static int w83977af_net_close(struct net_device *dev);
104static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
106
107/*
108 * Function w83977af_init ()
109 *
110 * Initialize chip. Just try to find out how many chips we are dealing with
111 * and where they are
112 */
113static int __init w83977af_init(void)
114{
115 int i;
116
117 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
118
Bjorn Helgaas9c3bd682006-08-15 00:05:38 -0700119 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
121 return 0;
122 }
123 return -ENODEV;
124}
125
126/*
127 * Function w83977af_cleanup ()
128 *
129 * Close all configured chips
130 *
131 */
132static void __exit w83977af_cleanup(void)
133{
134 int i;
135
136 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
137
Bjorn Helgaas9c3bd682006-08-15 00:05:38 -0700138 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 if (dev_self[i])
140 w83977af_close(dev_self[i]);
141 }
142}
143
144/*
145 * Function w83977af_open (iobase, irq)
146 *
147 * Open driver instance
148 *
149 */
150int w83977af_open(int i, unsigned int iobase, unsigned int irq,
151 unsigned int dma)
152{
153 struct net_device *dev;
154 struct w83977af_ir *self;
155 int err;
156
157 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
158
159 /* Lock the port that we need */
160 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
161 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
162 __FUNCTION__ , iobase);
163 return -ENODEV;
164 }
165
166 if (w83977af_probe(iobase, irq, dma) == -1) {
167 err = -1;
168 goto err_out;
169 }
170 /*
171 * Allocate new instance of the driver
172 */
173 dev = alloc_irdadev(sizeof(struct w83977af_ir));
174 if (dev == NULL) {
175 printk( KERN_ERR "IrDA: Can't allocate memory for "
176 "IrDA control block!\n");
177 err = -ENOMEM;
178 goto err_out;
179 }
180
181 self = dev->priv;
182 spin_lock_init(&self->lock);
183
184
185 /* Initialize IO */
186 self->io.fir_base = iobase;
187 self->io.irq = irq;
188 self->io.fir_ext = CHIP_IO_EXTENT;
189 self->io.dma = dma;
190 self->io.fifo_size = 32;
191
192 /* Initialize QoS for this device */
193 irda_init_max_qos_capabilies(&self->qos);
194
195 /* The only value we must override it the baudrate */
196
197 /* FIXME: The HP HDLS-1100 does not support 1152000! */
198 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
199 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
200
201 /* The HP HDLS-1100 needs 1 ms according to the specs */
202 self->qos.min_turn_time.bits = qos_mtt_bits;
203 irda_qos_bits_to_value(&self->qos);
204
205 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
206 self->rx_buff.truesize = 14384;
207 self->tx_buff.truesize = 4000;
208
209 /* Allocate memory if needed */
210 self->rx_buff.head =
211 dma_alloc_coherent(NULL, self->rx_buff.truesize,
212 &self->rx_buff_dma, GFP_KERNEL);
213 if (self->rx_buff.head == NULL) {
214 err = -ENOMEM;
215 goto err_out1;
216 }
217
218 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
219
220 self->tx_buff.head =
221 dma_alloc_coherent(NULL, self->tx_buff.truesize,
222 &self->tx_buff_dma, GFP_KERNEL);
223 if (self->tx_buff.head == NULL) {
224 err = -ENOMEM;
225 goto err_out2;
226 }
227 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
228
229 self->rx_buff.in_frame = FALSE;
230 self->rx_buff.state = OUTSIDE_FRAME;
231 self->tx_buff.data = self->tx_buff.head;
232 self->rx_buff.data = self->rx_buff.head;
233 self->netdev = dev;
234
235 /* Keep track of module usage */
236 SET_MODULE_OWNER(dev);
237
238 /* Override the network functions we need to use */
239 dev->hard_start_xmit = w83977af_hard_xmit;
240 dev->open = w83977af_net_open;
241 dev->stop = w83977af_net_close;
242 dev->do_ioctl = w83977af_net_ioctl;
243 dev->get_stats = w83977af_net_get_stats;
244
245 err = register_netdev(dev);
246 if (err) {
247 IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__);
248 goto err_out3;
249 }
250 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
251
252 /* Need to store self somewhere */
253 dev_self[i] = self;
254
255 return 0;
256err_out3:
257 dma_free_coherent(NULL, self->tx_buff.truesize,
258 self->tx_buff.head, self->tx_buff_dma);
259err_out2:
260 dma_free_coherent(NULL, self->rx_buff.truesize,
261 self->rx_buff.head, self->rx_buff_dma);
262err_out1:
263 free_netdev(dev);
264err_out:
265 release_region(iobase, CHIP_IO_EXTENT);
266 return err;
267}
268
269/*
270 * Function w83977af_close (self)
271 *
272 * Close driver instance
273 *
274 */
275static int w83977af_close(struct w83977af_ir *self)
276{
277 int iobase;
278
279 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
280
281 iobase = self->io.fir_base;
282
283#ifdef CONFIG_USE_W977_PNP
284 /* enter PnP configuration mode */
285 w977_efm_enter(efio);
286
287 w977_select_device(W977_DEVICE_IR, efio);
288
289 /* Deactivate device */
290 w977_write_reg(0x30, 0x00, efio);
291
292 w977_efm_exit(efio);
293#endif /* CONFIG_USE_W977_PNP */
294
295 /* Remove netdevice */
296 unregister_netdev(self->netdev);
297
298 /* Release the PORT that this driver is using */
299 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
300 __FUNCTION__ , self->io.fir_base);
301 release_region(self->io.fir_base, self->io.fir_ext);
302
303 if (self->tx_buff.head)
304 dma_free_coherent(NULL, self->tx_buff.truesize,
305 self->tx_buff.head, self->tx_buff_dma);
306
307 if (self->rx_buff.head)
308 dma_free_coherent(NULL, self->rx_buff.truesize,
309 self->rx_buff.head, self->rx_buff_dma);
310
311 free_netdev(self->netdev);
312
313 return 0;
314}
315
316int w83977af_probe( int iobase, int irq, int dma)
317{
318 int version;
319 int i;
320
321 for (i=0; i < 2; i++) {
322 IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ );
323#ifdef CONFIG_USE_W977_PNP
324 /* Enter PnP configuration mode */
325 w977_efm_enter(efbase[i]);
326
327 w977_select_device(W977_DEVICE_IR, efbase[i]);
328
329 /* Configure PnP port, IRQ, and DMA channel */
330 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
331 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
332
333 w977_write_reg(0x70, irq, efbase[i]);
334#ifdef CONFIG_ARCH_NETWINDER
335 /* Netwinder uses 1 higher than Linux */
336 w977_write_reg(0x74, dma+1, efbase[i]);
337#else
338 w977_write_reg(0x74, dma, efbase[i]);
339#endif /*CONFIG_ARCH_NETWINDER */
340 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
341
342 /* Set append hardware CRC, enable IR bank selection */
343 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
344
345 /* Activate device */
346 w977_write_reg(0x30, 0x01, efbase[i]);
347
348 w977_efm_exit(efbase[i]);
349#endif /* CONFIG_USE_W977_PNP */
350 /* Disable Advanced mode */
351 switch_bank(iobase, SET2);
352 outb(iobase+2, 0x00);
353
354 /* Turn on UART (global) interrupts */
355 switch_bank(iobase, SET0);
356 outb(HCR_EN_IRQ, iobase+HCR);
357
358 /* Switch to advanced mode */
359 switch_bank(iobase, SET2);
360 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
361
362 /* Set default IR-mode */
363 switch_bank(iobase, SET0);
364 outb(HCR_SIR, iobase+HCR);
365
366 /* Read the Advanced IR ID */
367 switch_bank(iobase, SET3);
368 version = inb(iobase+AUID);
369
370 /* Should be 0x1? */
371 if (0x10 == (version & 0xf0)) {
372 efio = efbase[i];
373
374 /* Set FIFO size to 32 */
375 switch_bank(iobase, SET2);
376 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
377
378 /* Set FIFO threshold to TX17, RX16 */
379 switch_bank(iobase, SET0);
380 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
381 UFR_EN_FIFO,iobase+UFR);
382
383 /* Receiver frame length */
384 switch_bank(iobase, SET4);
385 outb(2048 & 0xff, iobase+6);
386 outb((2048 >> 8) & 0x1f, iobase+7);
387
388 /*
389 * Init HP HSDL-1100 transceiver.
390 *
391 * Set IRX_MSL since we have 2 * receive paths IRRX,
392 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
393 * be a input pin used for IRRXH
394 *
395 * IRRX pin 37 connected to receiver
396 * IRTX pin 38 connected to transmitter
397 * FIRRX pin 39 connected to receiver (IRSL0)
398 * CIRRX pin 40 connected to pin 37
399 */
400 switch_bank(iobase, SET7);
401 outb(0x40, iobase+7);
402
403 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
404 "Version: 0x%02x\n", version);
405
406 return 0;
407 } else {
408 /* Try next extented function register address */
409 IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ );
410 }
411 }
412 return -1;
413}
414
415void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
416{
417 int ir_mode = HCR_SIR;
418 int iobase;
419 __u8 set;
420
421 iobase = self->io.fir_base;
422
423 /* Update accounting for new speed */
424 self->io.speed = speed;
425
426 /* Save current bank */
427 set = inb(iobase+SSR);
428
429 /* Disable interrupts */
430 switch_bank(iobase, SET0);
431 outb(0, iobase+ICR);
432
433 /* Select Set 2 */
434 switch_bank(iobase, SET2);
435 outb(0x00, iobase+ABHL);
436
437 switch (speed) {
438 case 9600: outb(0x0c, iobase+ABLL); break;
439 case 19200: outb(0x06, iobase+ABLL); break;
440 case 38400: outb(0x03, iobase+ABLL); break;
441 case 57600: outb(0x02, iobase+ABLL); break;
442 case 115200: outb(0x01, iobase+ABLL); break;
443 case 576000:
444 ir_mode = HCR_MIR_576;
445 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ );
446 break;
447 case 1152000:
448 ir_mode = HCR_MIR_1152;
449 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ );
450 break;
451 case 4000000:
452 ir_mode = HCR_FIR;
453 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ );
454 break;
455 default:
456 ir_mode = HCR_FIR;
457 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed);
458 break;
459 }
460
461 /* Set speed mode */
462 switch_bank(iobase, SET0);
463 outb(ir_mode, iobase+HCR);
464
465 /* set FIFO size to 32 */
466 switch_bank(iobase, SET2);
467 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
468
469 /* set FIFO threshold to TX17, RX16 */
470 switch_bank(iobase, SET0);
471 outb(0x00, iobase+UFR); /* Reset */
472 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
473 outb(0xa7, iobase+UFR);
474
475 netif_wake_queue(self->netdev);
476
477 /* Enable some interrupts so we can receive frames */
478 switch_bank(iobase, SET0);
479 if (speed > PIO_MAX_SPEED) {
480 outb(ICR_EFSFI, iobase+ICR);
481 w83977af_dma_receive(self);
482 } else
483 outb(ICR_ERBRI, iobase+ICR);
484
485 /* Restore SSR */
486 outb(set, iobase+SSR);
487}
488
489/*
490 * Function w83977af_hard_xmit (skb, dev)
491 *
492 * Sets up a DMA transfer to send the current frame.
493 *
494 */
495int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
496{
497 struct w83977af_ir *self;
498 __s32 speed;
499 int iobase;
500 __u8 set;
501 int mtt;
502
503 self = (struct w83977af_ir *) dev->priv;
504
505 iobase = self->io.fir_base;
506
507 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies,
508 (int) skb->len);
509
510 /* Lock transmit buffer */
511 netif_stop_queue(dev);
512
513 /* Check if we need to change the speed */
514 speed = irda_get_next_speed(skb);
515 if ((speed != self->io.speed) && (speed != -1)) {
516 /* Check for empty frame */
517 if (!skb->len) {
518 w83977af_change_speed(self, speed);
519 dev->trans_start = jiffies;
520 dev_kfree_skb(skb);
521 return 0;
522 } else
523 self->new_speed = speed;
524 }
525
526 /* Save current set */
527 set = inb(iobase+SSR);
528
529 /* Decide if we should use PIO or DMA transfer */
530 if (self->io.speed > PIO_MAX_SPEED) {
531 self->tx_buff.data = self->tx_buff.head;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300532 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 self->tx_buff.len = skb->len;
534
535 mtt = irda_get_mtt(skb);
536#ifdef CONFIG_USE_INTERNAL_TIMER
537 if (mtt > 50) {
538 /* Adjust for timer resolution */
539 mtt /= 1000+1;
540
541 /* Setup timer */
542 switch_bank(iobase, SET4);
543 outb(mtt & 0xff, iobase+TMRL);
544 outb((mtt >> 8) & 0x0f, iobase+TMRH);
545
546 /* Start timer */
547 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
548 self->io.direction = IO_XMIT;
549
550 /* Enable timer interrupt */
551 switch_bank(iobase, SET0);
552 outb(ICR_ETMRI, iobase+ICR);
553 } else {
554#endif
555 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt);
556 if (mtt)
557 udelay(mtt);
558
559 /* Enable DMA interrupt */
560 switch_bank(iobase, SET0);
561 outb(ICR_EDMAI, iobase+ICR);
562 w83977af_dma_write(self, iobase);
563#ifdef CONFIG_USE_INTERNAL_TIMER
564 }
565#endif
566 } else {
567 self->tx_buff.data = self->tx_buff.head;
568 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
569 self->tx_buff.truesize);
570
571 /* Add interrupt on tx low level (will fire immediately) */
572 switch_bank(iobase, SET0);
573 outb(ICR_ETXTHI, iobase+ICR);
574 }
575 dev->trans_start = jiffies;
576 dev_kfree_skb(skb);
577
578 /* Restore set register */
579 outb(set, iobase+SSR);
580
581 return 0;
582}
583
584/*
585 * Function w83977af_dma_write (self, iobase)
586 *
587 * Send frame using DMA
588 *
589 */
590static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
591{
592 __u8 set;
593#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
594 unsigned long flags;
595 __u8 hcr;
596#endif
597 IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len);
598
599 /* Save current set */
600 set = inb(iobase+SSR);
601
602 /* Disable DMA */
603 switch_bank(iobase, SET0);
604 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
605
606 /* Choose transmit DMA channel */
607 switch_bank(iobase, SET2);
608 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
609#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
610 spin_lock_irqsave(&self->lock, flags);
611
612 disable_dma(self->io.dma);
613 clear_dma_ff(self->io.dma);
614 set_dma_mode(self->io.dma, DMA_MODE_READ);
615 set_dma_addr(self->io.dma, self->tx_buff_dma);
616 set_dma_count(self->io.dma, self->tx_buff.len);
617#else
618 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
619 DMA_MODE_WRITE);
620#endif
621 self->io.direction = IO_XMIT;
622
623 /* Enable DMA */
624 switch_bank(iobase, SET0);
625#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
626 hcr = inb(iobase+HCR);
627 outb(hcr | HCR_EN_DMA, iobase+HCR);
628 enable_dma(self->io.dma);
629 spin_unlock_irqrestore(&self->lock, flags);
630#else
631 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
632#endif
633
634 /* Restore set register */
635 outb(set, iobase+SSR);
636}
637
638/*
639 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
640 *
641 *
642 *
643 */
644static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
645{
646 int actual = 0;
647 __u8 set;
648
649 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
650
651 /* Save current bank */
652 set = inb(iobase+SSR);
653
654 switch_bank(iobase, SET0);
655 if (!(inb_p(iobase+USR) & USR_TSRE)) {
656 IRDA_DEBUG(4,
657 "%s(), warning, FIFO not empty yet!\n", __FUNCTION__ );
658
659 fifo_size -= 17;
660 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
661 __FUNCTION__ , fifo_size);
662 }
663
664 /* Fill FIFO with current frame */
665 while ((fifo_size-- > 0) && (actual < len)) {
666 /* Transmit next byte */
667 outb(buf[actual++], iobase+TBR);
668 }
669
670 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
671 __FUNCTION__ , fifo_size, actual, len);
672
673 /* Restore bank */
674 outb(set, iobase+SSR);
675
676 return actual;
677}
678
679/*
680 * Function w83977af_dma_xmit_complete (self)
681 *
682 * The transfer of a frame in finished. So do the necessary things
683 *
684 *
685 */
686static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
687{
688 int iobase;
689 __u8 set;
690
691 IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies);
692
693 IRDA_ASSERT(self != NULL, return;);
694
695 iobase = self->io.fir_base;
696
697 /* Save current set */
698 set = inb(iobase+SSR);
699
700 /* Disable DMA */
701 switch_bank(iobase, SET0);
702 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
703
704 /* Check for underrrun! */
705 if (inb(iobase+AUDR) & AUDR_UNDR) {
706 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ );
707
708 self->stats.tx_errors++;
709 self->stats.tx_fifo_errors++;
710
711 /* Clear bit, by writing 1 to it */
712 outb(AUDR_UNDR, iobase+AUDR);
713 } else
714 self->stats.tx_packets++;
715
716
717 if (self->new_speed) {
718 w83977af_change_speed(self, self->new_speed);
719 self->new_speed = 0;
720 }
721
722 /* Unlock tx_buff and request another frame */
723 /* Tell the network layer, that we want more frames */
724 netif_wake_queue(self->netdev);
725
726 /* Restore set */
727 outb(set, iobase+SSR);
728}
729
730/*
731 * Function w83977af_dma_receive (self)
732 *
733 * Get ready for receiving a frame. The device will initiate a DMA
734 * if it starts to receive a frame.
735 *
736 */
737int w83977af_dma_receive(struct w83977af_ir *self)
738{
739 int iobase;
740 __u8 set;
741#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
742 unsigned long flags;
743 __u8 hcr;
744#endif
745 IRDA_ASSERT(self != NULL, return -1;);
746
747 IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
748
749 iobase= self->io.fir_base;
750
751 /* Save current set */
752 set = inb(iobase+SSR);
753
754 /* Disable DMA */
755 switch_bank(iobase, SET0);
756 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
757
758 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
759 switch_bank(iobase, SET2);
760 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
761 iobase+ADCR1);
762
763 self->io.direction = IO_RECV;
764 self->rx_buff.data = self->rx_buff.head;
765
766#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
767 spin_lock_irqsave(&self->lock, flags);
768
769 disable_dma(self->io.dma);
770 clear_dma_ff(self->io.dma);
771 set_dma_mode(self->io.dma, DMA_MODE_READ);
772 set_dma_addr(self->io.dma, self->rx_buff_dma);
773 set_dma_count(self->io.dma, self->rx_buff.truesize);
774#else
775 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
776 DMA_MODE_READ);
777#endif
778 /*
779 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
780 * important that we don't reset the Tx FIFO since it might not
781 * be finished transmitting yet
782 */
783 switch_bank(iobase, SET0);
784 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
785 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
786
787 /* Enable DMA */
788 switch_bank(iobase, SET0);
789#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
790 hcr = inb(iobase+HCR);
791 outb(hcr | HCR_EN_DMA, iobase+HCR);
792 enable_dma(self->io.dma);
793 spin_unlock_irqrestore(&self->lock, flags);
794#else
795 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
796#endif
797 /* Restore set */
798 outb(set, iobase+SSR);
799
800 return 0;
801}
802
803/*
804 * Function w83977af_receive_complete (self)
805 *
806 * Finished with receiving a frame
807 *
808 */
809int w83977af_dma_receive_complete(struct w83977af_ir *self)
810{
811 struct sk_buff *skb;
812 struct st_fifo *st_fifo;
813 int len;
814 int iobase;
815 __u8 set;
816 __u8 status;
817
818 IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
819
820 st_fifo = &self->st_fifo;
821
822 iobase = self->io.fir_base;
823
824 /* Save current set */
825 set = inb(iobase+SSR);
826
827 iobase = self->io.fir_base;
828
829 /* Read status FIFO */
830 switch_bank(iobase, SET5);
831 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
832 st_fifo->entries[st_fifo->tail].status = status;
833
834 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
835 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
836
837 st_fifo->tail++;
838 st_fifo->len++;
839 }
840
841 while (st_fifo->len) {
842 /* Get first entry */
843 status = st_fifo->entries[st_fifo->head].status;
844 len = st_fifo->entries[st_fifo->head].len;
845 st_fifo->head++;
846 st_fifo->len--;
847
848 /* Check for errors */
849 if (status & FS_FO_ERR_MSK) {
850 if (status & FS_FO_LST_FR) {
851 /* Add number of lost frames to stats */
852 self->stats.rx_errors += len;
853 } else {
854 /* Skip frame */
855 self->stats.rx_errors++;
856
857 self->rx_buff.data += len;
858
859 if (status & FS_FO_MX_LEX)
860 self->stats.rx_length_errors++;
861
862 if (status & FS_FO_PHY_ERR)
863 self->stats.rx_frame_errors++;
864
865 if (status & FS_FO_CRC_ERR)
866 self->stats.rx_crc_errors++;
867 }
868 /* The errors below can be reported in both cases */
869 if (status & FS_FO_RX_OV)
870 self->stats.rx_fifo_errors++;
871
872 if (status & FS_FO_FSF_OV)
873 self->stats.rx_fifo_errors++;
874
875 } else {
876 /* Check if we have transferred all data to memory */
877 switch_bank(iobase, SET0);
878 if (inb(iobase+USR) & USR_RDR) {
879#ifdef CONFIG_USE_INTERNAL_TIMER
880 /* Put this entry back in fifo */
881 st_fifo->head--;
882 st_fifo->len++;
883 st_fifo->entries[st_fifo->head].status = status;
884 st_fifo->entries[st_fifo->head].len = len;
885
886 /* Restore set register */
887 outb(set, iobase+SSR);
888
889 return FALSE; /* I'll be back! */
890#else
891 udelay(80); /* Should be enough!? */
892#endif
893 }
894
895 skb = dev_alloc_skb(len+1);
896 if (skb == NULL) {
897 printk(KERN_INFO
898 "%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
899 /* Restore set register */
900 outb(set, iobase+SSR);
901
902 return FALSE;
903 }
904
905 /* Align to 20 bytes */
906 skb_reserve(skb, 1);
907
908 /* Copy frame without CRC */
909 if (self->io.speed < 4000000) {
910 skb_put(skb, len-2);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300911 skb_copy_to_linear_data(skb,
912 self->rx_buff.data,
913 len - 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 } else {
915 skb_put(skb, len-4);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300916 skb_copy_to_linear_data(skb,
917 self->rx_buff.data,
918 len - 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 }
920
921 /* Move to next frame */
922 self->rx_buff.data += len;
923 self->stats.rx_packets++;
924
925 skb->dev = self->netdev;
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700926 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 skb->protocol = htons(ETH_P_IRDA);
928 netif_rx(skb);
929 self->netdev->last_rx = jiffies;
930 }
931 }
932 /* Restore set register */
933 outb(set, iobase+SSR);
934
935 return TRUE;
936}
937
938/*
939 * Function pc87108_pio_receive (self)
940 *
941 * Receive all data in receiver FIFO
942 *
943 */
944static void w83977af_pio_receive(struct w83977af_ir *self)
945{
946 __u8 byte = 0x00;
947 int iobase;
948
949 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
950
951 IRDA_ASSERT(self != NULL, return;);
952
953 iobase = self->io.fir_base;
954
955 /* Receive all characters in Rx FIFO */
956 do {
957 byte = inb(iobase+RBR);
958 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
959 byte);
960 } while (inb(iobase+USR) & USR_RDR); /* Data available */
961}
962
963/*
964 * Function w83977af_sir_interrupt (self, eir)
965 *
966 * Handle SIR interrupt
967 *
968 */
969static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
970{
971 int actual;
972 __u8 new_icr = 0;
973 __u8 set;
974 int iobase;
975
976 IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr);
977
978 iobase = self->io.fir_base;
979 /* Transmit FIFO low on data */
980 if (isr & ISR_TXTH_I) {
981 /* Write data left in transmit buffer */
982 actual = w83977af_pio_write(self->io.fir_base,
983 self->tx_buff.data,
984 self->tx_buff.len,
985 self->io.fifo_size);
986
987 self->tx_buff.data += actual;
988 self->tx_buff.len -= actual;
989
990 self->io.direction = IO_XMIT;
991
992 /* Check if finished */
993 if (self->tx_buff.len > 0) {
994 new_icr |= ICR_ETXTHI;
995 } else {
996 set = inb(iobase+SSR);
997 switch_bank(iobase, SET0);
998 outb(AUDR_SFEND, iobase+AUDR);
999 outb(set, iobase+SSR);
1000
1001 self->stats.tx_packets++;
1002
1003 /* Feed me more packets */
1004 netif_wake_queue(self->netdev);
1005 new_icr |= ICR_ETBREI;
1006 }
1007 }
1008 /* Check if transmission has completed */
1009 if (isr & ISR_TXEMP_I) {
1010 /* Check if we need to change the speed? */
1011 if (self->new_speed) {
1012 IRDA_DEBUG(2,
1013 "%s(), Changing speed!\n", __FUNCTION__ );
1014 w83977af_change_speed(self, self->new_speed);
1015 self->new_speed = 0;
1016 }
1017
1018 /* Turn around and get ready to receive some data */
1019 self->io.direction = IO_RECV;
1020 new_icr |= ICR_ERBRI;
1021 }
1022
1023 /* Rx FIFO threshold or timeout */
1024 if (isr & ISR_RXTH_I) {
1025 w83977af_pio_receive(self);
1026
1027 /* Keep receiving */
1028 new_icr |= ICR_ERBRI;
1029 }
1030 return new_icr;
1031}
1032
1033/*
1034 * Function pc87108_fir_interrupt (self, eir)
1035 *
1036 * Handle MIR/FIR interrupt
1037 *
1038 */
1039static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1040{
1041 __u8 new_icr = 0;
1042 __u8 set;
1043 int iobase;
1044
1045 iobase = self->io.fir_base;
1046 set = inb(iobase+SSR);
1047
1048 /* End of frame detected in FIFO */
1049 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1050 if (w83977af_dma_receive_complete(self)) {
1051
1052 /* Wait for next status FIFO interrupt */
1053 new_icr |= ICR_EFSFI;
1054 } else {
1055 /* DMA not finished yet */
1056
1057 /* Set timer value, resolution 1 ms */
1058 switch_bank(iobase, SET4);
1059 outb(0x01, iobase+TMRL); /* 1 ms */
1060 outb(0x00, iobase+TMRH);
1061
1062 /* Start timer */
1063 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1064
1065 new_icr |= ICR_ETMRI;
1066 }
1067 }
1068 /* Timer finished */
1069 if (isr & ISR_TMR_I) {
1070 /* Disable timer */
1071 switch_bank(iobase, SET4);
1072 outb(0, iobase+IR_MSL);
1073
1074 /* Clear timer event */
1075 /* switch_bank(iobase, SET0); */
1076/* outb(ASCR_CTE, iobase+ASCR); */
1077
1078 /* Check if this is a TX timer interrupt */
1079 if (self->io.direction == IO_XMIT) {
1080 w83977af_dma_write(self, iobase);
1081
1082 new_icr |= ICR_EDMAI;
1083 } else {
1084 /* Check if DMA has now finished */
1085 w83977af_dma_receive_complete(self);
1086
1087 new_icr |= ICR_EFSFI;
1088 }
1089 }
1090 /* Finished with DMA */
1091 if (isr & ISR_DMA_I) {
1092 w83977af_dma_xmit_complete(self);
1093
1094 /* Check if there are more frames to be transmitted */
1095 /* if (irda_device_txqueue_empty(self)) { */
1096
1097 /* Prepare for receive
1098 *
1099 * ** Netwinder Tx DMA likes that we do this anyway **
1100 */
1101 w83977af_dma_receive(self);
1102 new_icr = ICR_EFSFI;
1103 /* } */
1104 }
1105
1106 /* Restore set */
1107 outb(set, iobase+SSR);
1108
1109 return new_icr;
1110}
1111
1112/*
1113 * Function w83977af_interrupt (irq, dev_id, regs)
1114 *
1115 * An interrupt from the chip has arrived. Time to do some work
1116 *
1117 */
David Howells7d12e782006-10-05 14:55:46 +01001118static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119{
Jeff Garzikc31f28e2006-10-06 14:56:04 -04001120 struct net_device *dev = dev_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 struct w83977af_ir *self;
1122 __u8 set, icr, isr;
1123 int iobase;
1124
Jeff Garzikc31f28e2006-10-06 14:56:04 -04001125 self = dev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 iobase = self->io.fir_base;
1128
1129 /* Save current bank */
1130 set = inb(iobase+SSR);
1131 switch_bank(iobase, SET0);
1132
1133 icr = inb(iobase+ICR);
1134 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1135
1136 outb(0, iobase+ICR); /* Disable interrupts */
1137
1138 if (isr) {
1139 /* Dispatch interrupt handler for the current speed */
1140 if (self->io.speed > PIO_MAX_SPEED )
1141 icr = w83977af_fir_interrupt(self, isr);
1142 else
1143 icr = w83977af_sir_interrupt(self, isr);
1144 }
1145
1146 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1147 outb(set, iobase+SSR); /* Restore bank register */
1148 return IRQ_RETVAL(isr);
1149}
1150
1151/*
1152 * Function w83977af_is_receiving (self)
1153 *
1154 * Return TRUE is we are currently receiving a frame
1155 *
1156 */
1157static int w83977af_is_receiving(struct w83977af_ir *self)
1158{
1159 int status = FALSE;
1160 int iobase;
1161 __u8 set;
1162
1163 IRDA_ASSERT(self != NULL, return FALSE;);
1164
1165 if (self->io.speed > 115200) {
1166 iobase = self->io.fir_base;
1167
1168 /* Check if rx FIFO is not empty */
1169 set = inb(iobase+SSR);
1170 switch_bank(iobase, SET2);
1171 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1172 /* We are receiving something */
1173 status = TRUE;
1174 }
1175 outb(set, iobase+SSR);
1176 } else
1177 status = (self->rx_buff.state != OUTSIDE_FRAME);
1178
1179 return status;
1180}
1181
1182/*
1183 * Function w83977af_net_open (dev)
1184 *
1185 * Start the device
1186 *
1187 */
1188static int w83977af_net_open(struct net_device *dev)
1189{
1190 struct w83977af_ir *self;
1191 int iobase;
1192 char hwname[32];
1193 __u8 set;
1194
1195 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1196
1197 IRDA_ASSERT(dev != NULL, return -1;);
1198 self = (struct w83977af_ir *) dev->priv;
1199
1200 IRDA_ASSERT(self != NULL, return 0;);
1201
1202 iobase = self->io.fir_base;
1203
1204 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1205 (void *) dev)) {
1206 return -EAGAIN;
1207 }
1208 /*
1209 * Always allocate the DMA channel after the IRQ,
1210 * and clean up on failure.
1211 */
1212 if (request_dma(self->io.dma, dev->name)) {
1213 free_irq(self->io.irq, self);
1214 return -EAGAIN;
1215 }
1216
1217 /* Save current set */
1218 set = inb(iobase+SSR);
1219
1220 /* Enable some interrupts so we can receive frames again */
1221 switch_bank(iobase, SET0);
1222 if (self->io.speed > 115200) {
1223 outb(ICR_EFSFI, iobase+ICR);
1224 w83977af_dma_receive(self);
1225 } else
1226 outb(ICR_ERBRI, iobase+ICR);
1227
1228 /* Restore bank register */
1229 outb(set, iobase+SSR);
1230
1231 /* Ready to play! */
1232 netif_start_queue(dev);
1233
1234 /* Give self a hardware name */
1235 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1236
1237 /*
1238 * Open new IrLAP layer instance, now that everything should be
1239 * initialized properly
1240 */
1241 self->irlap = irlap_open(dev, &self->qos, hwname);
1242
1243 return 0;
1244}
1245
1246/*
1247 * Function w83977af_net_close (dev)
1248 *
1249 * Stop the device
1250 *
1251 */
1252static int w83977af_net_close(struct net_device *dev)
1253{
1254 struct w83977af_ir *self;
1255 int iobase;
1256 __u8 set;
1257
1258 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1259
1260 IRDA_ASSERT(dev != NULL, return -1;);
1261
1262 self = (struct w83977af_ir *) dev->priv;
1263
1264 IRDA_ASSERT(self != NULL, return 0;);
1265
1266 iobase = self->io.fir_base;
1267
1268 /* Stop device */
1269 netif_stop_queue(dev);
1270
1271 /* Stop and remove instance of IrLAP */
1272 if (self->irlap)
1273 irlap_close(self->irlap);
1274 self->irlap = NULL;
1275
1276 disable_dma(self->io.dma);
1277
1278 /* Save current set */
1279 set = inb(iobase+SSR);
1280
1281 /* Disable interrupts */
1282 switch_bank(iobase, SET0);
1283 outb(0, iobase+ICR);
1284
1285 free_irq(self->io.irq, dev);
1286 free_dma(self->io.dma);
1287
1288 /* Restore bank register */
1289 outb(set, iobase+SSR);
1290
1291 return 0;
1292}
1293
1294/*
1295 * Function w83977af_net_ioctl (dev, rq, cmd)
1296 *
1297 * Process IOCTL commands for this device
1298 *
1299 */
1300static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1301{
1302 struct if_irda_req *irq = (struct if_irda_req *) rq;
1303 struct w83977af_ir *self;
1304 unsigned long flags;
1305 int ret = 0;
1306
1307 IRDA_ASSERT(dev != NULL, return -1;);
1308
1309 self = dev->priv;
1310
1311 IRDA_ASSERT(self != NULL, return -1;);
1312
1313 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
1314
1315 spin_lock_irqsave(&self->lock, flags);
1316
1317 switch (cmd) {
1318 case SIOCSBANDWIDTH: /* Set bandwidth */
1319 if (!capable(CAP_NET_ADMIN)) {
1320 ret = -EPERM;
1321 goto out;
1322 }
1323 w83977af_change_speed(self, irq->ifr_baudrate);
1324 break;
1325 case SIOCSMEDIABUSY: /* Set media busy */
1326 if (!capable(CAP_NET_ADMIN)) {
1327 ret = -EPERM;
1328 goto out;
1329 }
1330 irda_device_set_media_busy(self->netdev, TRUE);
1331 break;
1332 case SIOCGRECEIVING: /* Check if we are receiving right now */
1333 irq->ifr_receiving = w83977af_is_receiving(self);
1334 break;
1335 default:
1336 ret = -EOPNOTSUPP;
1337 }
1338out:
1339 spin_unlock_irqrestore(&self->lock, flags);
1340 return ret;
1341}
1342
1343static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
1344{
1345 struct w83977af_ir *self = (struct w83977af_ir *) dev->priv;
1346
1347 return &self->stats;
1348}
1349
1350MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1351MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1352MODULE_LICENSE("GPL");
1353
1354
1355module_param(qos_mtt_bits, int, 0);
1356MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1357module_param_array(io, int, NULL, 0);
1358MODULE_PARM_DESC(io, "Base I/O addresses");
1359module_param_array(irq, int, NULL, 0);
1360MODULE_PARM_DESC(irq, "IRQ lines");
1361
1362/*
1363 * Function init_module (void)
1364 *
1365 *
1366 *
1367 */
1368module_init(w83977af_init);
1369
1370/*
1371 * Function cleanup_module (void)
1372 *
1373 *
1374 *
1375 */
1376module_exit(w83977af_cleanup);