blob: df19fbfa9ea1af01eb003ea0adeb9780e038722f [file] [log] [blame]
John W. Linvillef2223132006-01-23 16:59:58 -05001/*
2
3 Broadcom BCM43xx wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "bcm43xx.h"
31#include "bcm43xx_dma.h"
32#include "bcm43xx_main.h"
33#include "bcm43xx_debugfs.h"
34#include "bcm43xx_power.h"
35
36#include <linux/dmapool.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
40#include <asm/semaphore.h>
41
42
43static inline int free_slots(struct bcm43xx_dmaring *ring)
44{
45 return (ring->nr_slots - ring->used_slots);
46}
47
48static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49{
50 assert(slot >= -1 && slot <= ring->nr_slots - 1);
51 if (slot == ring->nr_slots - 1)
52 return 0;
53 return slot + 1;
54}
55
56static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57{
58 assert(slot >= 0 && slot <= ring->nr_slots - 1);
59 if (slot == 0)
60 return ring->nr_slots - 1;
61 return slot - 1;
62}
63
64/* Request a slot for usage. */
65static inline
66int request_slot(struct bcm43xx_dmaring *ring)
67{
68 int slot;
69
70 assert(ring->tx);
71 assert(!ring->suspended);
72 assert(free_slots(ring) != 0);
73
74 slot = next_slot(ring, ring->current_slot);
75 ring->current_slot = slot;
76 ring->used_slots++;
77
78 /* Check the number of available slots and suspend TX,
79 * if we are running low on free slots.
80 */
81 if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82 netif_stop_queue(ring->bcm->net_dev);
83 ring->suspended = 1;
84 }
85#ifdef CONFIG_BCM43XX_DEBUG
86 if (ring->used_slots > ring->max_used_slots)
87 ring->max_used_slots = ring->used_slots;
88#endif /* CONFIG_BCM43XX_DEBUG*/
89
90 return slot;
91}
92
93/* Return a slot to the free slots. */
94static inline
95void return_slot(struct bcm43xx_dmaring *ring, int slot)
96{
97 assert(ring->tx);
98
99 ring->used_slots--;
100
101 /* Check if TX is suspended and check if we have
102 * enough free slots to resume it again.
103 */
104 if (unlikely(ring->suspended)) {
105 if (free_slots(ring) >= ring->resume_mark) {
106 ring->suspended = 0;
107 netif_wake_queue(ring->bcm->net_dev);
108 }
109 }
110}
111
112static inline
113dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
114 unsigned char *buf,
115 size_t len,
116 int tx)
117{
118 dma_addr_t dmaaddr;
119
120 if (tx) {
121 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
122 buf, len,
123 DMA_TO_DEVICE);
124 } else {
125 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
126 buf, len,
127 DMA_FROM_DEVICE);
128 }
129
130 return dmaaddr;
131}
132
133static inline
134void unmap_descbuffer(struct bcm43xx_dmaring *ring,
135 dma_addr_t addr,
136 size_t len,
137 int tx)
138{
139 if (tx) {
140 dma_unmap_single(&ring->bcm->pci_dev->dev,
141 addr, len,
142 DMA_TO_DEVICE);
143 } else {
144 dma_unmap_single(&ring->bcm->pci_dev->dev,
145 addr, len,
146 DMA_FROM_DEVICE);
147 }
148}
149
150static inline
151void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
152 dma_addr_t addr,
153 size_t len)
154{
155 assert(!ring->tx);
156
157 dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
158 addr, len, DMA_FROM_DEVICE);
159}
160
161static inline
162void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
163 dma_addr_t addr,
164 size_t len)
165{
166 assert(!ring->tx);
167
168 dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
169 addr, len, DMA_FROM_DEVICE);
170}
171
172static inline
173void mark_skb_mustfree(struct sk_buff *skb,
174 char mustfree)
175{
176 skb->cb[0] = mustfree;
177}
178
179static inline
180int skb_mustfree(struct sk_buff *skb)
181{
182 return (skb->cb[0] != 0);
183}
184
185/* Unmap and free a descriptor buffer. */
186static inline
187void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
188 struct bcm43xx_dmadesc *desc,
189 struct bcm43xx_dmadesc_meta *meta,
190 int irq_context)
191{
192 assert(meta->skb);
193 if (skb_mustfree(meta->skb)) {
194 if (irq_context)
195 dev_kfree_skb_irq(meta->skb);
196 else
197 dev_kfree_skb(meta->skb);
198 }
199 meta->skb = NULL;
200 if (meta->txb) {
201 ieee80211_txb_free(meta->txb);
202 meta->txb = NULL;
203 }
204}
205
206static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
207{
208 struct device *dev = &(ring->bcm->pci_dev->dev);
209
210 ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
211 &(ring->dmabase), GFP_KERNEL);
212 if (!ring->vbase) {
213 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
214 return -ENOMEM;
215 }
216 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
217 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G\n");
218 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
219 ring->vbase, ring->dmabase);
220 return -ENOMEM;
221 }
222 assert(!(ring->dmabase & 0x000003FF));
223 memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
224
225 return 0;
226}
227
228static void free_ringmemory(struct bcm43xx_dmaring *ring)
229{
230 struct device *dev = &(ring->bcm->pci_dev->dev);
231
232 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
233 ring->vbase, ring->dmabase);
234}
235
236/* Reset the RX DMA channel */
237int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
238 u16 mmio_base)
239{
240 int i;
241 u32 value;
242
243 bcm43xx_write32(bcm,
244 mmio_base + BCM43xx_DMA_RX_CONTROL,
245 0x00000000);
246 for (i = 0; i < 1000; i++) {
247 value = bcm43xx_read32(bcm,
248 mmio_base + BCM43xx_DMA_RX_STATUS);
249 value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
250 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
251 i = -1;
252 break;
253 }
254 udelay(10);
255 }
256 if (i != -1) {
257 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
258 return -ENODEV;
259 }
260
261 return 0;
262}
263
264static inline int dmacontroller_rx_reset(struct bcm43xx_dmaring *ring)
265{
266 assert(!ring->tx);
267
268 return bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
269}
270
271/* Reset the RX DMA channel */
272int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
273 u16 mmio_base)
274{
275 int i;
276 u32 value;
277
278 for (i = 0; i < 1000; i++) {
279 value = bcm43xx_read32(bcm,
280 mmio_base + BCM43xx_DMA_TX_STATUS);
281 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
282 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
283 value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
284 value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
285 break;
286 udelay(10);
287 }
288 bcm43xx_write32(bcm,
289 mmio_base + BCM43xx_DMA_TX_CONTROL,
290 0x00000000);
291 for (i = 0; i < 1000; i++) {
292 value = bcm43xx_read32(bcm,
293 mmio_base + BCM43xx_DMA_TX_STATUS);
294 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
295 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
296 i = -1;
297 break;
298 }
299 udelay(10);
300 }
301 if (i != -1) {
302 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
303 return -ENODEV;
304 }
305 /* ensure the reset is completed. */
306 udelay(300);
307
308 return 0;
309}
310
311static inline int dmacontroller_tx_reset(struct bcm43xx_dmaring *ring)
312{
313 assert(ring->tx);
314
315 return bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
316}
317
318static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
319 struct bcm43xx_dmadesc *desc,
320 struct bcm43xx_dmadesc_meta *meta,
321 gfp_t gfp_flags)
322{
323 struct bcm43xx_rxhdr *rxhdr;
324 dma_addr_t dmaaddr;
325 u32 desc_addr;
326 u32 desc_ctl;
327 const int slot = (int)(desc - ring->vbase);
328 struct sk_buff *skb;
329
330 assert(slot >= 0 && slot < ring->nr_slots);
331 assert(!ring->tx);
332
333 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
334 if (unlikely(!skb))
335 return -ENOMEM;
336 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
337 if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
338 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
339 dev_kfree_skb_any(skb);
340 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G\n");
341 return -ENOMEM;
342 }
343 meta->skb = skb;
344 meta->dmaaddr = dmaaddr;
345 skb->dev = ring->bcm->net_dev;
346 mark_skb_mustfree(skb, 1);
347 desc_addr = (u32)(dmaaddr + ring->memoffset);
348 desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
349 (u32)(ring->rx_buffersize - ring->frameoffset));
350 if (slot == ring->nr_slots - 1)
351 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
352 set_desc_addr(desc, desc_addr);
353 set_desc_ctl(desc, desc_ctl);
354
355 rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
356 rxhdr->frame_length = 0;
357 rxhdr->flags1 = 0;
358
359 return 0;
360}
361
362/* Allocate the initial descbuffers.
363 * This is used for an RX ring only.
364 */
365static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
366{
367 int i, err = -ENOMEM;
368 struct bcm43xx_dmadesc *desc = NULL;
369 struct bcm43xx_dmadesc_meta *meta;
370
371 for (i = 0; i < ring->nr_slots; i++) {
372 desc = ring->vbase + i;
373 meta = ring->meta + i;
374
375 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
376 if (err)
377 goto err_unwind;
378
379 assert(ring->used_slots <= ring->nr_slots);
380 }
381 ring->used_slots = ring->nr_slots;
382
383 err = 0;
384out:
385 return err;
386
387err_unwind:
388 for ( ; i >= 0; i--) {
389 desc = ring->vbase + i;
390 meta = ring->meta + i;
391
392 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
393 dev_kfree_skb(meta->skb);
394 }
395 ring->used_slots = 0;
396 goto out;
397}
398
399/* Do initial setup of the DMA controller.
400 * Reset the controller, write the ring busaddress
401 * and switch the "enable" bit on.
402 */
403static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
404{
405 int err = 0;
406 u32 value;
407
408 if (ring->tx) {
409 /* Set Transmit Control register to "transmit enable" */
410 bcm43xx_write32(ring->bcm,
411 ring->mmio_base + BCM43xx_DMA_TX_CONTROL,
412 BCM43xx_DMA_TXCTRL_ENABLE);
413 /* Set Transmit Descriptor ring address. */
414 bcm43xx_write32(ring->bcm,
415 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
416 ring->dmabase + ring->memoffset);
417 } else {
418 err = alloc_initial_descbuffers(ring);
419 if (err)
420 goto out;
421 /* Set Receive Control "receive enable" and frame offset */
422 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
423 value |= BCM43xx_DMA_RXCTRL_ENABLE;
424 bcm43xx_write32(ring->bcm,
425 ring->mmio_base + BCM43xx_DMA_RX_CONTROL,
426 value);
427 /* Set Receive Descriptor ring address. */
428 bcm43xx_write32(ring->bcm,
429 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
430 ring->dmabase + ring->memoffset);
431 /* Init the descriptor pointer. */
432 bcm43xx_write32(ring->bcm,
433 ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
434 200);
435 }
436
437out:
438 return err;
439}
440
441/* Shutdown the DMA controller. */
442static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
443{
444 if (ring->tx) {
445 dmacontroller_tx_reset(ring);
446 /* Zero out Transmit Descriptor ring address. */
447 bcm43xx_write32(ring->bcm,
448 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
449 0x00000000);
450 } else {
451 dmacontroller_rx_reset(ring);
452 /* Zero out Receive Descriptor ring address. */
453 bcm43xx_write32(ring->bcm,
454 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
455 0x00000000);
456 }
457}
458
459static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
460{
461 struct bcm43xx_dmadesc *desc;
462 struct bcm43xx_dmadesc_meta *meta;
463 int i;
464
465 if (!ring->used_slots)
466 return;
467 for (i = 0; i < ring->nr_slots; i++) {
468 desc = ring->vbase + i;
469 meta = ring->meta + i;
470
471 if (!meta->skb) {
472 assert(ring->tx);
473 assert(!meta->txb);
474 continue;
475 }
476 if (ring->tx) {
477 unmap_descbuffer(ring, meta->dmaaddr,
478 meta->skb->len, 1);
479 } else {
480 unmap_descbuffer(ring, meta->dmaaddr,
481 ring->rx_buffersize, 0);
482 }
483 free_descriptor_buffer(ring, desc, meta, 0);
484 }
485}
486
487/* Main initialization function. */
488static
489struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
490 u16 dma_controller_base,
491 int nr_descriptor_slots,
492 int tx)
493{
494 struct bcm43xx_dmaring *ring;
495 int err;
496
497 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
498 if (!ring)
499 goto out;
500
501 ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
502 GFP_KERNEL);
503 if (!ring->meta)
504 goto err_kfree_ring;
505
506 ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
507#ifdef CONFIG_BCM947XX
508 if (bcm->pci_dev->bus->number == 0)
509 ring->memoffset = 0;
510#endif
511
512
513 spin_lock_init(&ring->lock);
514 ring->bcm = bcm;
515 ring->nr_slots = nr_descriptor_slots;
516 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
517 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
518 assert(ring->suspend_mark < ring->resume_mark);
519 ring->mmio_base = dma_controller_base;
520 if (tx) {
521 ring->tx = 1;
522 ring->current_slot = -1;
523 } else {
524 switch (dma_controller_base) {
525 case BCM43xx_MMIO_DMA1_BASE:
526 ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
527 ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
528 break;
529 case BCM43xx_MMIO_DMA4_BASE:
530 ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
531 ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
532 break;
533 default:
534 assert(0);
535 }
536 }
537
538 err = alloc_ringmemory(ring);
539 if (err)
540 goto err_kfree_meta;
541 err = dmacontroller_setup(ring);
542 if (err)
543 goto err_free_ringmemory;
544
545out:
546 return ring;
547
548err_free_ringmemory:
549 free_ringmemory(ring);
550err_kfree_meta:
551 kfree(ring->meta);
552err_kfree_ring:
553 kfree(ring);
554 ring = NULL;
555 goto out;
556}
557
558/* Main cleanup function. */
559static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
560{
561 if (!ring)
562 return;
563
564 dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
565 ring->mmio_base,
566 (ring->tx) ? "TX" : "RX",
567 ring->max_used_slots, ring->nr_slots);
568 /* Device IRQs are disabled prior entering this function,
569 * so no need to take care of concurrency with rx handler stuff.
570 */
571 dmacontroller_cleanup(ring);
572 free_all_descbuffers(ring);
573 free_ringmemory(ring);
574
575 kfree(ring->meta);
576 kfree(ring);
577}
578
579void bcm43xx_dma_free(struct bcm43xx_private *bcm)
580{
581 bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring1);
582 bcm->current_core->dma->rx_ring1 = NULL;
583 bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0);
584 bcm->current_core->dma->rx_ring0 = NULL;
585 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3);
586 bcm->current_core->dma->tx_ring3 = NULL;
587 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2);
588 bcm->current_core->dma->tx_ring2 = NULL;
589 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1);
590 bcm->current_core->dma->tx_ring1 = NULL;
591 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0);
592 bcm->current_core->dma->tx_ring0 = NULL;
593}
594
595int bcm43xx_dma_init(struct bcm43xx_private *bcm)
596{
597 struct bcm43xx_dmaring *ring;
598 int err = -ENOMEM;
599
600 /* setup TX DMA channels. */
601 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
602 BCM43xx_TXRING_SLOTS, 1);
603 if (!ring)
604 goto out;
605 bcm->current_core->dma->tx_ring0 = ring;
606
607 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
608 BCM43xx_TXRING_SLOTS, 1);
609 if (!ring)
610 goto err_destroy_tx0;
611 bcm->current_core->dma->tx_ring1 = ring;
612
613 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
614 BCM43xx_TXRING_SLOTS, 1);
615 if (!ring)
616 goto err_destroy_tx1;
617 bcm->current_core->dma->tx_ring2 = ring;
618
619 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
620 BCM43xx_TXRING_SLOTS, 1);
621 if (!ring)
622 goto err_destroy_tx2;
623 bcm->current_core->dma->tx_ring3 = ring;
624
625 /* setup RX DMA channels. */
626 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
627 BCM43xx_RXRING_SLOTS, 0);
628 if (!ring)
629 goto err_destroy_tx3;
630 bcm->current_core->dma->rx_ring0 = ring;
631
632 if (bcm->current_core->rev < 5) {
633 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
634 BCM43xx_RXRING_SLOTS, 0);
635 if (!ring)
636 goto err_destroy_rx0;
637 bcm->current_core->dma->rx_ring1 = ring;
638 }
639
640 dprintk(KERN_INFO PFX "DMA initialized\n");
641 err = 0;
642out:
643 return err;
644
645err_destroy_rx0:
646 bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0);
647 bcm->current_core->dma->rx_ring0 = NULL;
648err_destroy_tx3:
649 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3);
650 bcm->current_core->dma->tx_ring3 = NULL;
651err_destroy_tx2:
652 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2);
653 bcm->current_core->dma->tx_ring2 = NULL;
654err_destroy_tx1:
655 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1);
656 bcm->current_core->dma->tx_ring1 = NULL;
657err_destroy_tx0:
658 bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0);
659 bcm->current_core->dma->tx_ring0 = NULL;
660 goto out;
661}
662
663/* Generate a cookie for the TX header. */
664static inline
665u16 generate_cookie(struct bcm43xx_dmaring *ring,
666 int slot)
667{
668 u16 cookie = 0x0000;
669
670 /* Use the upper 4 bits of the cookie as
671 * DMA controller ID and store the slot number
672 * in the lower 12 bits
673 */
674 switch (ring->mmio_base) {
675 default:
676 assert(0);
677 case BCM43xx_MMIO_DMA1_BASE:
678 break;
679 case BCM43xx_MMIO_DMA2_BASE:
680 cookie = 0x1000;
681 break;
682 case BCM43xx_MMIO_DMA3_BASE:
683 cookie = 0x2000;
684 break;
685 case BCM43xx_MMIO_DMA4_BASE:
686 cookie = 0x3000;
687 break;
688 }
689 assert(((u16)slot & 0xF000) == 0x0000);
690 cookie |= (u16)slot;
691
692 return cookie;
693}
694
695/* Inspect a cookie and find out to which controller/slot it belongs. */
696static inline
697struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
698 u16 cookie, int *slot)
699{
700 struct bcm43xx_dmaring *ring = NULL;
701
702 switch (cookie & 0xF000) {
703 case 0x0000:
704 ring = bcm->current_core->dma->tx_ring0;
705 break;
706 case 0x1000:
707 ring = bcm->current_core->dma->tx_ring1;
708 break;
709 case 0x2000:
710 ring = bcm->current_core->dma->tx_ring2;
711 break;
712 case 0x3000:
713 ring = bcm->current_core->dma->tx_ring3;
714 break;
715 default:
716 assert(0);
717 }
718 *slot = (cookie & 0x0FFF);
719 assert(*slot >= 0 && *slot < ring->nr_slots);
720
721 return ring;
722}
723
724static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
725 int slot)
726{
727 /* Everything is ready to start. Buffers are DMA mapped and
728 * associated with slots.
729 * "slot" is the last slot of the new frame we want to transmit.
730 * Close your seat belts now, please.
731 */
732 wmb();
733 slot = next_slot(ring, slot);
734 bcm43xx_write32(ring->bcm,
735 ring->mmio_base + BCM43xx_DMA_TX_DESC_INDEX,
736 (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
737}
738
739static inline
740int dma_tx_fragment(struct bcm43xx_dmaring *ring,
741 struct sk_buff *skb,
742 struct ieee80211_txb *txb,
743 u8 cur_frag)
744{
745 int slot;
746 struct bcm43xx_dmadesc *desc;
747 struct bcm43xx_dmadesc_meta *meta;
748 u32 desc_ctl;
749 u32 desc_addr;
750
751 assert(skb_shinfo(skb)->nr_frags == 0);
752
753 slot = request_slot(ring);
754 desc = ring->vbase + slot;
755 meta = ring->meta + slot;
756
757 if (cur_frag == 0) {
758 /* Save the txb pointer for freeing in xmitstatus IRQ */
759 meta->txb = txb;
760 }
761
762 /* Add a device specific TX header. */
763 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
764 /* Reserve enough headroom for the device tx header. */
765 __skb_push(skb, sizeof(struct bcm43xx_txhdr));
766 /* Now calculate and add the tx header.
767 * The tx header includes the PLCP header.
768 */
769 bcm43xx_generate_txhdr(ring->bcm,
770 (struct bcm43xx_txhdr *)skb->data,
771 skb->data + sizeof(struct bcm43xx_txhdr),
772 skb->len - sizeof(struct bcm43xx_txhdr),
773 (cur_frag == 0),
774 generate_cookie(ring, slot));
775
776 meta->skb = skb;
777 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
778 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
779 return_slot(ring, slot);
780 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G\n");
781 return -ENOMEM;
782 }
783
784 desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
785 desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
786 desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
787 desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
788 (u32)(meta->skb->len - ring->frameoffset));
789 if (slot == ring->nr_slots - 1)
790 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
791
792 set_desc_ctl(desc, desc_ctl);
793 set_desc_addr(desc, desc_addr);
794 /* Now transfer the whole frame. */
795 dmacontroller_poke_tx(ring, slot);
796
797 return 0;
798}
799
800static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
801 struct ieee80211_txb *txb)
802{
803 /* We just received a packet from the kernel network subsystem.
804 * Add headers and DMA map the memory. Poke
805 * the device to send the stuff.
806 * Note that this is called from atomic context.
807 */
808 u8 i;
809 struct sk_buff *skb;
810
811 assert(ring->tx);
812 if (unlikely(free_slots(ring) < txb->nr_frags)) {
813 /* The queue should be stopped,
814 * if we are low on free slots.
815 * If this ever triggers, we have to lower the suspend_mark.
816 */
817 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
818 return -ENOMEM;
819 }
820
821 assert(irqs_disabled());
822 spin_lock(&ring->lock);
823 for (i = 0; i < txb->nr_frags; i++) {
824 skb = txb->fragments[i];
825 /* We do not free the skb, as it is freed as
826 * part of the txb freeing.
827 */
828 mark_skb_mustfree(skb, 0);
829 dma_tx_fragment(ring, skb, txb, i);
830 //TODO: handle failure of dma_tx_fragment
831 }
832 spin_unlock(&ring->lock);
833
834 return 0;
835}
836
837int fastcall
838bcm43xx_dma_transfer_txb(struct bcm43xx_private *bcm,
839 struct ieee80211_txb *txb)
840{
841 return dma_transfer_txb(bcm->current_core->dma->tx_ring1,
842 txb);
843}
844
845void fastcall
846bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
847 struct bcm43xx_xmitstatus *status)
848{
849 struct bcm43xx_dmaring *ring;
850 struct bcm43xx_dmadesc *desc;
851 struct bcm43xx_dmadesc_meta *meta;
852 int is_last_fragment;
853 int slot;
854
855 ring = parse_cookie(bcm, status->cookie, &slot);
856 assert(ring);
857 assert(ring->tx);
858 assert(irqs_disabled());
859 spin_lock(&ring->lock);
860
861 assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
862 while (1) {
863 assert(slot >= 0 && slot < ring->nr_slots);
864 desc = ring->vbase + slot;
865 meta = ring->meta + slot;
866
867 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
868 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
869 free_descriptor_buffer(ring, desc, meta, 1);
870 /* Everything belonging to the slot is unmapped
871 * and freed, so we can return it.
872 */
873 return_slot(ring, slot);
874
875 if (is_last_fragment)
876 break;
877 slot = next_slot(ring, slot);
878 }
879 bcm->stats.last_tx = jiffies;
880
881 spin_unlock(&ring->lock);
882}
883
884static inline
885void dma_rx(struct bcm43xx_dmaring *ring,
886 int *slot)
887{
888 struct bcm43xx_dmadesc *desc;
889 struct bcm43xx_dmadesc_meta *meta;
890 struct bcm43xx_rxhdr *rxhdr;
891 struct sk_buff *skb;
892 u16 len;
893 int err;
894 dma_addr_t dmaaddr;
895
896 desc = ring->vbase + *slot;
897 meta = ring->meta + *slot;
898
899 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
900 skb = meta->skb;
901
902 if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
903 /* We received an xmit status. */
904 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
905 struct bcm43xx_xmitstatus stat;
906
907 stat.cookie = le16_to_cpu(hw->cookie);
908 stat.flags = hw->flags;
909 stat.cnt1 = hw->cnt1;
910 stat.cnt2 = hw->cnt2;
911 stat.seq = le16_to_cpu(hw->seq);
912 stat.unknown = le16_to_cpu(hw->unknown);
913
914 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
915 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
916 /* recycle the descriptor buffer. */
917 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
918
919 return;
920 }
921 rxhdr = (struct bcm43xx_rxhdr *)skb->data;
922 len = le16_to_cpu(rxhdr->frame_length);
923 if (len == 0) {
924 int i = 0;
925
926 do {
927 udelay(2);
928 barrier();
929 len = le16_to_cpu(rxhdr->frame_length);
930 } while (len == 0 && i++ < 5);
931 if (len == 0)
932 goto drop;
933 }
934 if (unlikely(len > ring->rx_buffersize)) {
935 /* The data did not fit into one descriptor buffer
936 * and is split over multiple buffers.
937 * This should never happen, as we try to allocate buffers
938 * big enough. So simply ignore this packet.
939 */
940 int cnt = 1;
941 s32 tmp = len - ring->rx_buffersize;
942
943 for ( ; tmp > 0; tmp -= ring->rx_buffersize) {
944 *slot = next_slot(ring, *slot);
945 cnt++;
946 }
947 printkl(KERN_ERR PFX "DMA RX buffer too small. %d dropped.\n",
948 cnt);
949 goto drop;
950 }
951 len -= IEEE80211_FCS_LEN;
952
953 dmaaddr = meta->dmaaddr;
954 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
955 if (unlikely(err)) {
956 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
957 goto drop;
958 }
959
960 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
961 skb_put(skb, len + ring->frameoffset);
962 skb_pull(skb, ring->frameoffset);
963
964 err = bcm43xx_rx(ring->bcm, skb, rxhdr);
965 if (err) {
966 dev_kfree_skb_irq(skb);
967 goto drop;
968 }
969
970drop:
971 return;
972}
973
974void fastcall
975bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
976{
977 u32 status;
978 u16 descptr;
979 int slot, current_slot;
980#ifdef CONFIG_BCM43XX_DEBUG
981 int used_slots = 0;
982#endif
983
984 assert(!ring->tx);
985 assert(irqs_disabled());
986 spin_lock(&ring->lock);
987
988 status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
989 descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
990 current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
991 assert(current_slot >= 0 && current_slot < ring->nr_slots);
992
993 slot = ring->current_slot;
994 for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
995 dma_rx(ring, &slot);
996#ifdef CONFIG_BCM43XX_DEBUG
997 if (++used_slots > ring->max_used_slots)
998 ring->max_used_slots = used_slots;
999#endif
1000 }
1001 bcm43xx_write32(ring->bcm,
1002 ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
1003 (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
1004 ring->current_slot = slot;
1005
1006 spin_unlock(&ring->lock);
1007}
1008
1009/* vim: set ts=8 sw=8 sts=8: */