blob: 670bb0591217be2e162b9b00df77faaba697572a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Alchemy Semi Au1000 IrDA driver
3 *
4 * Copyright 2001 MontaVista Software Inc.
5 * Author: MontaVista Software, Inc.
6 * ppopov@mvista.com or source@mvista.com
7 *
8 * This program is free software; you can distribute it and/or modify it
9 * under the terms of the GNU General Public License (Version 2) as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/errno.h>
25#include <linux/netdevice.h>
26#include <linux/slab.h>
27#include <linux/rtnetlink.h>
28#include <linux/interrupt.h>
29#include <linux/pm.h>
30#include <linux/bitops.h>
31
32#include <asm/irq.h>
33#include <asm/io.h>
34#include <asm/au1000.h>
Manuel Laussf59c8112011-11-10 12:06:22 +000035#if defined(CONFIG_MIPS_DB1000)
Manuel Lauss9bdcf332009-10-04 14:55:24 +020036#include <asm/mach-db1x00/bcsr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#else
38#error au1k_ir: unsupported board
39#endif
40
41#include <net/irda/irda.h>
42#include <net/irda/irmod.h>
43#include <net/irda/wrapper.h>
44#include <net/irda/irda_device.h>
45#include "au1000_ircc.h"
46
47static int au1k_irda_net_init(struct net_device *);
48static int au1k_irda_start(struct net_device *);
49static int au1k_irda_stop(struct net_device *dev);
50static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
51static int au1k_irda_rx(struct net_device *);
David Howells7d12e782006-10-05 14:55:46 +010052static void au1k_irda_interrupt(int, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static void au1k_tx_timeout(struct net_device *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
55static int au1k_irda_set_speed(struct net_device *dev, int speed);
56
57static void *dma_alloc(size_t, dma_addr_t *);
58static void dma_free(void *, size_t);
59
60static int qos_mtt_bits = 0x07; /* 1 ms or more */
61static struct net_device *ir_devs[NUM_IR_IFF];
62static char version[] __devinitdata =
63 "au1k_ircc:1.2 ppopov@mvista.com\n";
64
65#define RUN_AT(x) (jiffies + (x))
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067static DEFINE_SPINLOCK(ir_lock);
68
69/*
70 * IrDA peripheral bug. You have to read the register
71 * twice to get the right value.
72 */
73u32 read_ir_reg(u32 addr)
74{
75 readl(addr);
76 return readl(addr);
77}
78
79
80/*
81 * Buffer allocation/deallocation routines. The buffer descriptor returned
82 * has the virtual and dma address of a buffer suitable for
83 * both, receive and transmit operations.
84 */
85static db_dest_t *GetFreeDB(struct au1k_private *aup)
86{
87 db_dest_t *pDB;
88 pDB = aup->pDBfree;
89
90 if (pDB) {
91 aup->pDBfree = pDB->pnext;
92 }
93 return pDB;
94}
95
96static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB)
97{
98 db_dest_t *pDBfree = aup->pDBfree;
99 if (pDBfree)
100 pDBfree->pnext = pDB;
101 aup->pDBfree = pDB;
102}
103
104
105/*
106 DMA memory allocation, derived from pci_alloc_consistent.
107 However, the Au1000 data cache is coherent (when programmed
108 so), therefore we return KSEG0 address, not KSEG1.
109*/
110static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
111{
112 void *ret;
113 int gfp = GFP_ATOMIC | GFP_DMA;
114
115 ret = (void *) __get_free_pages(gfp, get_order(size));
116
117 if (ret != NULL) {
118 memset(ret, 0, size);
119 *dma_handle = virt_to_bus(ret);
120 ret = (void *)KSEG0ADDR(ret);
121 }
122 return ret;
123}
124
125
126static void dma_free(void *vaddr, size_t size)
127{
128 vaddr = (void *)KSEG0ADDR(vaddr);
129 free_pages((unsigned long) vaddr, get_order(size));
130}
131
132
133static void
134setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
135{
136 int i;
137 for (i=0; i<NUM_IR_DESC; i++) {
138 aup->rx_ring[i] = (volatile ring_dest_t *)
139 (rx_base + sizeof(ring_dest_t)*i);
140 }
141 for (i=0; i<NUM_IR_DESC; i++) {
142 aup->tx_ring[i] = (volatile ring_dest_t *)
143 (tx_base + sizeof(ring_dest_t)*i);
144 }
145}
146
147static int au1k_irda_init(void)
148{
149 static unsigned version_printed = 0;
150 struct au1k_private *aup;
151 struct net_device *dev;
152 int err;
153
154 if (version_printed++ == 0) printk(version);
155
156 dev = alloc_irdadev(sizeof(struct au1k_private));
157 if (!dev)
158 return -ENOMEM;
159
160 dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
161 err = au1k_irda_net_init(dev);
162 if (err)
163 goto out;
164 err = register_netdev(dev);
165 if (err)
166 goto out1;
167 ir_devs[0] = dev;
168 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
169 return 0;
170
171out1:
172 aup = netdev_priv(dev);
173 dma_free((void *)aup->db[0].vaddr,
174 MAX_BUF_SIZE * 2*NUM_IR_DESC);
175 dma_free((void *)aup->rx_ring[0],
176 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
177 kfree(aup->rx_buff.head);
178out:
179 free_netdev(dev);
180 return err;
181}
182
183static int au1k_irda_init_iobuf(iobuff_t *io, int size)
184{
185 io->head = kmalloc(size, GFP_KERNEL);
186 if (io->head != NULL) {
187 io->truesize = size;
188 io->in_frame = FALSE;
189 io->state = OUTSIDE_FRAME;
190 io->data = io->head;
191 }
192 return io->head ? 0 : -ENOMEM;
193}
194
Alexander Beregalov602355a2009-04-15 12:52:40 +0000195static const struct net_device_ops au1k_irda_netdev_ops = {
196 .ndo_open = au1k_irda_start,
197 .ndo_stop = au1k_irda_stop,
198 .ndo_start_xmit = au1k_irda_hard_xmit,
199 .ndo_tx_timeout = au1k_tx_timeout,
200 .ndo_do_ioctl = au1k_irda_ioctl,
Alexander Beregalov602355a2009-04-15 12:52:40 +0000201};
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203static int au1k_irda_net_init(struct net_device *dev)
204{
205 struct au1k_private *aup = netdev_priv(dev);
206 int i, retval = 0, err;
207 db_dest_t *pDB, *pDBfree;
208 dma_addr_t temp;
209
210 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
211 if (err)
212 goto out1;
213
Alexander Beregalov602355a2009-04-15 12:52:40 +0000214 dev->netdev_ops = &au1k_irda_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 irda_init_max_qos_capabilies(&aup->qos);
217
218 /* The only value we must override it the baudrate */
219 aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
220 IR_115200|IR_576000 |(IR_4000000 << 8);
221
222 aup->qos.min_turn_time.bits = qos_mtt_bits;
223 irda_qos_bits_to_value(&aup->qos);
224
225 retval = -ENOMEM;
226
227 /* Tx ring follows rx ring + 512 bytes */
228 /* we need a 1k aligned buffer */
229 aup->rx_ring[0] = (ring_dest_t *)
230 dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp);
231 if (!aup->rx_ring[0])
232 goto out2;
233
234 /* allocate the data buffers */
235 aup->db[0].vaddr =
236 (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp);
237 if (!aup->db[0].vaddr)
238 goto out3;
239
240 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
241
242 pDBfree = NULL;
243 pDB = aup->db;
244 for (i=0; i<(2*NUM_IR_DESC); i++) {
245 pDB->pnext = pDBfree;
246 pDBfree = pDB;
247 pDB->vaddr =
248 (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i);
249 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
250 pDB++;
251 }
252 aup->pDBfree = pDBfree;
253
254 /* attach a data buffer to each descriptor */
255 for (i=0; i<NUM_IR_DESC; i++) {
256 pDB = GetFreeDB(aup);
257 if (!pDB) goto out;
258 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
259 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
260 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
261 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
262 aup->rx_db_inuse[i] = pDB;
263 }
264 for (i=0; i<NUM_IR_DESC; i++) {
265 pDB = GetFreeDB(aup);
266 if (!pDB) goto out;
267 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
268 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
269 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
270 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
271 aup->tx_ring[i]->count_0 = 0;
272 aup->tx_ring[i]->count_1 = 0;
273 aup->tx_ring[i]->flags = 0;
274 aup->tx_db_inuse[i] = pDB;
275 }
276
Manuel Laussf59c8112011-11-10 12:06:22 +0000277#if defined(CONFIG_MIPS_DB1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 /* power on */
Manuel Lauss9bdcf332009-10-04 14:55:24 +0200279 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
280 BCSR_RESETS_IRDA_MODE_FULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281#endif
282
283 return 0;
284
285out3:
286 dma_free((void *)aup->rx_ring[0],
287 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
288out2:
289 kfree(aup->rx_buff.head);
290out1:
291 printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval);
292 return retval;
293}
294
295
296static int au1k_init(struct net_device *dev)
297{
298 struct au1k_private *aup = netdev_priv(dev);
299 int i;
300 u32 control;
301 u32 ring_address;
302
303 /* bring the device out of reset */
304 control = 0xe; /* coherent, clock enable, one half system clock */
305
306#ifndef CONFIG_CPU_LITTLE_ENDIAN
307 control |= 1;
308#endif
309 aup->tx_head = 0;
310 aup->tx_tail = 0;
311 aup->rx_head = 0;
312
313 for (i=0; i<NUM_IR_DESC; i++) {
314 aup->rx_ring[i]->flags = AU_OWN;
315 }
316
317 writel(control, IR_INTERFACE_CONFIG);
318 au_sync_delay(10);
319
320 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */
321 au_sync_delay(1);
322
323 writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN);
324
325 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
326 writel(ring_address >> 26, IR_RING_BASE_ADDR_H);
327 writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L);
328
329 writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE);
330
331 writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */
332 writel(0, IR_RING_ADDR_CMPR);
333
334 au1k_irda_set_speed(dev, 9600);
335 return 0;
336}
337
338static int au1k_irda_start(struct net_device *dev)
339{
340 int retval;
341 char hwname[32];
342 struct au1k_private *aup = netdev_priv(dev);
343
344 if ((retval = au1k_init(dev))) {
345 printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
346 return retval;
347 }
348
Joe Perchesa0607fd2009-11-18 23:29:17 -0800349 if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 0, dev->name, dev))) {
351 printk(KERN_ERR "%s: unable to get IRQ %d\n",
352 dev->name, dev->irq);
353 return retval;
354 }
Joe Perchesa0607fd2009-11-18 23:29:17 -0800355 if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 0, dev->name, dev))) {
357 free_irq(AU1000_IRDA_TX_INT, dev);
358 printk(KERN_ERR "%s: unable to get IRQ %d\n",
359 dev->name, dev->irq);
360 return retval;
361 }
362
363 /* Give self a hardware name */
364 sprintf(hwname, "Au1000 SIR/FIR");
365 aup->irlap = irlap_open(dev, &aup->qos, hwname);
366 netif_start_queue(dev);
367
368 writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */
369
370 aup->timer.expires = RUN_AT((3*HZ));
371 aup->timer.data = (unsigned long)dev;
372 return 0;
373}
374
375static int au1k_irda_stop(struct net_device *dev)
376{
377 struct au1k_private *aup = netdev_priv(dev);
378
379 /* disable interrupts */
380 writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2);
381 writel(0, IR_CONFIG_1);
382 writel(0, IR_INTERFACE_CONFIG); /* disable clock */
383 au_sync();
384
385 if (aup->irlap) {
386 irlap_close(aup->irlap);
387 aup->irlap = NULL;
388 }
389
390 netif_stop_queue(dev);
391 del_timer(&aup->timer);
392
393 /* disable the interrupt */
394 free_irq(AU1000_IRDA_TX_INT, dev);
395 free_irq(AU1000_IRDA_RX_INT, dev);
396 return 0;
397}
398
399static void __exit au1k_irda_exit(void)
400{
401 struct net_device *dev = ir_devs[0];
402 struct au1k_private *aup = netdev_priv(dev);
403
404 unregister_netdev(dev);
405
406 dma_free((void *)aup->db[0].vaddr,
407 MAX_BUF_SIZE * 2*NUM_IR_DESC);
408 dma_free((void *)aup->rx_ring[0],
409 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
410 kfree(aup->rx_buff.head);
411 free_netdev(dev);
412}
413
414
415static inline void
416update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
417{
418 struct au1k_private *aup = netdev_priv(dev);
419 struct net_device_stats *ps = &aup->stats;
420
421 ps->tx_packets++;
422 ps->tx_bytes += pkt_len;
423
424 if (status & IR_TX_ERROR) {
425 ps->tx_errors++;
426 ps->tx_aborted_errors++;
427 }
428}
429
430
431static void au1k_tx_ack(struct net_device *dev)
432{
433 struct au1k_private *aup = netdev_priv(dev);
434 volatile ring_dest_t *ptxd;
435
436 ptxd = aup->tx_ring[aup->tx_tail];
437 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
438 update_tx_stats(dev, ptxd->flags,
439 ptxd->count_1<<8 | ptxd->count_0);
440 ptxd->count_0 = 0;
441 ptxd->count_1 = 0;
442 au_sync();
443
444 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
445 ptxd = aup->tx_ring[aup->tx_tail];
446
447 if (aup->tx_full) {
448 aup->tx_full = 0;
449 netif_wake_queue(dev);
450 }
451 }
452
453 if (aup->tx_tail == aup->tx_head) {
454 if (aup->newspeed) {
455 au1k_irda_set_speed(dev, aup->newspeed);
456 aup->newspeed = 0;
457 }
458 else {
459 writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
460 IR_CONFIG_1);
461 au_sync();
462 writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
463 IR_CONFIG_1);
464 writel(0, IR_RING_PROMPT);
465 au_sync();
466 }
467 }
468}
469
470
471/*
472 * Au1000 transmit routine.
473 */
474static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
475{
476 struct au1k_private *aup = netdev_priv(dev);
477 int speed = irda_get_next_speed(skb);
478 volatile ring_dest_t *ptxd;
479 u32 len;
480
481 u32 flags;
482 db_dest_t *pDB;
483
484 if (speed != aup->speed && speed != -1) {
485 aup->newspeed = speed;
486 }
487
488 if ((skb->len == 0) && (aup->newspeed)) {
489 if (aup->tx_tail == aup->tx_head) {
490 au1k_irda_set_speed(dev, speed);
491 aup->newspeed = 0;
492 }
493 dev_kfree_skb(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000494 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 }
496
497 ptxd = aup->tx_ring[aup->tx_head];
498 flags = ptxd->flags;
499
500 if (flags & AU_OWN) {
501 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
502 netif_stop_queue(dev);
503 aup->tx_full = 1;
Patrick McHardy5b548142009-06-12 06:22:29 +0000504 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 }
506 else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
507 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
508 netif_stop_queue(dev);
509 aup->tx_full = 1;
Patrick McHardy5b548142009-06-12 06:22:29 +0000510 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 }
512
513 pDB = aup->tx_db_inuse[aup->tx_head];
514
515#if 0
516 if (read_ir_reg(IR_RX_BYTE_CNT) != 0) {
517 printk("tx warning: rx byte cnt %x\n",
518 read_ir_reg(IR_RX_BYTE_CNT));
519 }
520#endif
521
522 if (aup->speed == 4000000) {
523 /* FIR */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300524 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 ptxd->count_0 = skb->len & 0xff;
526 ptxd->count_1 = (skb->len >> 8) & 0xff;
527
528 }
529 else {
530 /* SIR */
531 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
532 ptxd->count_0 = len & 0xff;
533 ptxd->count_1 = (len >> 8) & 0xff;
534 ptxd->flags |= IR_DIS_CRC;
535 au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
536 }
537 ptxd->flags |= AU_OWN;
538 au_sync();
539
540 writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1);
541 writel(0, IR_RING_PROMPT);
542 au_sync();
543
544 dev_kfree_skb(skb);
545 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000546 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
549
550static inline void
551update_rx_stats(struct net_device *dev, u32 status, u32 count)
552{
553 struct au1k_private *aup = netdev_priv(dev);
554 struct net_device_stats *ps = &aup->stats;
555
556 ps->rx_packets++;
557
558 if (status & IR_RX_ERROR) {
559 ps->rx_errors++;
560 if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
561 ps->rx_missed_errors++;
562 if (status & IR_MAX_LEN)
563 ps->rx_length_errors++;
564 if (status & IR_CRC_ERROR)
565 ps->rx_crc_errors++;
566 }
567 else
568 ps->rx_bytes += count;
569}
570
571/*
572 * Au1000 receive routine.
573 */
574static int au1k_irda_rx(struct net_device *dev)
575{
576 struct au1k_private *aup = netdev_priv(dev);
577 struct sk_buff *skb;
578 volatile ring_dest_t *prxd;
579 u32 flags, count;
580 db_dest_t *pDB;
581
582 prxd = aup->rx_ring[aup->rx_head];
583 flags = prxd->flags;
584
585 while (!(flags & AU_OWN)) {
586 pDB = aup->rx_db_inuse[aup->rx_head];
587 count = prxd->count_1<<8 | prxd->count_0;
588 if (!(flags & IR_RX_ERROR)) {
589 /* good frame */
590 update_rx_stats(dev, flags, count);
591 skb=alloc_skb(count+1,GFP_ATOMIC);
592 if (skb == NULL) {
Alexander Beregalov216c32d2009-01-08 16:42:08 -0800593 aup->netdev->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 continue;
595 }
596 skb_reserve(skb, 1);
597 if (aup->speed == 4000000)
598 skb_put(skb, count);
599 else
600 skb_put(skb, count-2);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300601 skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 skb->dev = dev;
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700603 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 skb->protocol = htons(ETH_P_IRDA);
605 netif_rx(skb);
606 prxd->count_0 = 0;
607 prxd->count_1 = 0;
608 }
609 prxd->flags |= AU_OWN;
610 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
611 writel(0, IR_RING_PROMPT);
612 au_sync();
613
614 /* next descriptor */
615 prxd = aup->rx_ring[aup->rx_head];
616 flags = prxd->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
618 }
619 return 0;
620}
621
622
Jeff Garzike38c2c62007-10-29 05:18:12 -0400623static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624{
Jeff Garzike38c2c62007-10-29 05:18:12 -0400625 struct net_device *dev = dev_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627 writel(0, IR_INT_CLEAR); /* ack irda interrupts */
628
629 au1k_irda_rx(dev);
630 au1k_tx_ack(dev);
Jeff Garzike38c2c62007-10-29 05:18:12 -0400631
632 return IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633}
634
635
636/*
637 * The Tx ring has been full longer than the watchdog timeout
638 * value. The transmitter must be hung?
639 */
640static void au1k_tx_timeout(struct net_device *dev)
641{
642 u32 speed;
643 struct au1k_private *aup = netdev_priv(dev);
644
645 printk(KERN_ERR "%s: tx timeout\n", dev->name);
646 speed = aup->speed;
647 aup->speed = 0;
648 au1k_irda_set_speed(dev, speed);
649 aup->tx_full = 0;
650 netif_wake_queue(dev);
651}
652
653
654/*
655 * Set the IrDA communications speed.
656 */
657static int
658au1k_irda_set_speed(struct net_device *dev, int speed)
659{
660 unsigned long flags;
661 struct au1k_private *aup = netdev_priv(dev);
662 u32 control;
663 int ret = 0, timeout = 10, i;
664 volatile ring_dest_t *ptxd;
Manuel Laussf59c8112011-11-10 12:06:22 +0000665#if defined(CONFIG_MIPS_DB1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 unsigned long irda_resets;
667#endif
668
669 if (speed == aup->speed)
670 return ret;
671
672 spin_lock_irqsave(&ir_lock, flags);
673
674 /* disable PHY first */
675 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
676
677 /* disable RX/TX */
678 writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
679 IR_CONFIG_1);
680 au_sync_delay(1);
681 while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
682 mdelay(1);
683 if (!timeout--) {
684 printk(KERN_ERR "%s: rx/tx disable timeout\n",
685 dev->name);
686 break;
687 }
688 }
689
690 /* disable DMA */
691 writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
692 au_sync_delay(1);
693
694 /*
695 * After we disable tx/rx. the index pointers
696 * go back to zero.
697 */
698 aup->tx_head = aup->tx_tail = aup->rx_head = 0;
699 for (i=0; i<NUM_IR_DESC; i++) {
700 ptxd = aup->tx_ring[i];
701 ptxd->flags = 0;
702 ptxd->count_0 = 0;
703 ptxd->count_1 = 0;
704 }
705
706 for (i=0; i<NUM_IR_DESC; i++) {
707 ptxd = aup->rx_ring[i];
708 ptxd->count_0 = 0;
709 ptxd->count_1 = 0;
710 ptxd->flags = AU_OWN;
711 }
712
713 if (speed == 4000000) {
Manuel Laussf59c8112011-11-10 12:06:22 +0000714#if defined(CONFIG_MIPS_DB1000)
Manuel Lauss9bdcf332009-10-04 14:55:24 +0200715 bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716#else /* Pb1000 and Pb1100 */
717 writel(1<<13, CPLD_AUX1);
718#endif
719 }
720 else {
Manuel Laussf59c8112011-11-10 12:06:22 +0000721#if defined(CONFIG_MIPS_DB1000)
Manuel Lauss9bdcf332009-10-04 14:55:24 +0200722 bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723#else /* Pb1000 and Pb1100 */
724 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
725#endif
726 }
727
728 switch (speed) {
729 case 9600:
730 writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
731 writel(IR_SIR_MODE, IR_CONFIG_1);
732 break;
733 case 19200:
734 writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
735 writel(IR_SIR_MODE, IR_CONFIG_1);
736 break;
737 case 38400:
738 writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
739 writel(IR_SIR_MODE, IR_CONFIG_1);
740 break;
741 case 57600:
742 writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
743 writel(IR_SIR_MODE, IR_CONFIG_1);
744 break;
745 case 115200:
746 writel(12<<5, IR_WRITE_PHY_CONFIG);
747 writel(IR_SIR_MODE, IR_CONFIG_1);
748 break;
749 case 4000000:
750 writel(0xF, IR_WRITE_PHY_CONFIG);
751 writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
752 break;
753 default:
754 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
755 ret = -EINVAL;
756 break;
757 }
758
759 aup->speed = speed;
760 writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
761 au_sync();
762
763 control = read_ir_reg(IR_ENABLE);
764 writel(0, IR_RING_PROMPT);
765 au_sync();
766
767 if (control & (1<<14)) {
768 printk(KERN_ERR "%s: configuration error\n", dev->name);
769 }
770 else {
771 if (control & (1<<11))
772 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
773 if (control & (1<<12))
774 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
775 if (control & (1<<13))
776 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
777 if (control & (1<<10))
778 printk(KERN_DEBUG "%s TX enabled\n", dev->name);
779 if (control & (1<<9))
780 printk(KERN_DEBUG "%s RX enabled\n", dev->name);
781 }
782
783 spin_unlock_irqrestore(&ir_lock, flags);
784 return ret;
785}
786
787static int
788au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
789{
790 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
791 struct au1k_private *aup = netdev_priv(dev);
792 int ret = -EOPNOTSUPP;
793
794 switch (cmd) {
795 case SIOCSBANDWIDTH:
796 if (capable(CAP_NET_ADMIN)) {
797 /*
798 * We are unable to set the speed if the
799 * device is not running.
800 */
801 if (aup->open)
802 ret = au1k_irda_set_speed(dev,
803 rq->ifr_baudrate);
804 else {
805 printk(KERN_ERR "%s ioctl: !netif_running\n",
806 dev->name);
807 ret = 0;
808 }
809 }
810 break;
811
812 case SIOCSMEDIABUSY:
813 ret = -EPERM;
814 if (capable(CAP_NET_ADMIN)) {
815 irda_device_set_media_busy(dev, TRUE);
816 ret = 0;
817 }
818 break;
819
820 case SIOCGRECEIVING:
821 rq->ifr_receiving = 0;
822 break;
823 default:
824 break;
825 }
826 return ret;
827}
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
830MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
831
832module_init(au1k_irda_init);
833module_exit(au1k_irda_exit);