blob: af5c27f9bfda00d291ed705abd942d77ec1053ee [file] [log] [blame]
John W. Linvillef2223132006-01-23 16:59:58 -05001/*
2
3 Broadcom BCM43xx wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "bcm43xx.h"
31#include "bcm43xx_dma.h"
32#include "bcm43xx_main.h"
33#include "bcm43xx_debugfs.h"
34#include "bcm43xx_power.h"
35
36#include <linux/dmapool.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
40#include <asm/semaphore.h>
41
42
43static inline int free_slots(struct bcm43xx_dmaring *ring)
44{
45 return (ring->nr_slots - ring->used_slots);
46}
47
48static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49{
50 assert(slot >= -1 && slot <= ring->nr_slots - 1);
51 if (slot == ring->nr_slots - 1)
52 return 0;
53 return slot + 1;
54}
55
56static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57{
58 assert(slot >= 0 && slot <= ring->nr_slots - 1);
59 if (slot == 0)
60 return ring->nr_slots - 1;
61 return slot - 1;
62}
63
64/* Request a slot for usage. */
65static inline
66int request_slot(struct bcm43xx_dmaring *ring)
67{
68 int slot;
69
70 assert(ring->tx);
71 assert(!ring->suspended);
72 assert(free_slots(ring) != 0);
73
74 slot = next_slot(ring, ring->current_slot);
75 ring->current_slot = slot;
76 ring->used_slots++;
77
78 /* Check the number of available slots and suspend TX,
79 * if we are running low on free slots.
80 */
81 if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82 netif_stop_queue(ring->bcm->net_dev);
83 ring->suspended = 1;
84 }
85#ifdef CONFIG_BCM43XX_DEBUG
86 if (ring->used_slots > ring->max_used_slots)
87 ring->max_used_slots = ring->used_slots;
88#endif /* CONFIG_BCM43XX_DEBUG*/
89
90 return slot;
91}
92
93/* Return a slot to the free slots. */
94static inline
95void return_slot(struct bcm43xx_dmaring *ring, int slot)
96{
97 assert(ring->tx);
98
99 ring->used_slots--;
100
101 /* Check if TX is suspended and check if we have
102 * enough free slots to resume it again.
103 */
104 if (unlikely(ring->suspended)) {
105 if (free_slots(ring) >= ring->resume_mark) {
106 ring->suspended = 0;
107 netif_wake_queue(ring->bcm->net_dev);
108 }
109 }
110}
111
112static inline
113dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
114 unsigned char *buf,
115 size_t len,
116 int tx)
117{
118 dma_addr_t dmaaddr;
119
120 if (tx) {
121 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
122 buf, len,
123 DMA_TO_DEVICE);
124 } else {
125 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
126 buf, len,
127 DMA_FROM_DEVICE);
128 }
129
130 return dmaaddr;
131}
132
133static inline
134void unmap_descbuffer(struct bcm43xx_dmaring *ring,
135 dma_addr_t addr,
136 size_t len,
137 int tx)
138{
139 if (tx) {
140 dma_unmap_single(&ring->bcm->pci_dev->dev,
141 addr, len,
142 DMA_TO_DEVICE);
143 } else {
144 dma_unmap_single(&ring->bcm->pci_dev->dev,
145 addr, len,
146 DMA_FROM_DEVICE);
147 }
148}
149
150static inline
151void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
152 dma_addr_t addr,
153 size_t len)
154{
155 assert(!ring->tx);
156
157 dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
158 addr, len, DMA_FROM_DEVICE);
159}
160
161static inline
162void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
163 dma_addr_t addr,
164 size_t len)
165{
166 assert(!ring->tx);
167
168 dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
169 addr, len, DMA_FROM_DEVICE);
170}
171
172static inline
173void mark_skb_mustfree(struct sk_buff *skb,
174 char mustfree)
175{
176 skb->cb[0] = mustfree;
177}
178
179static inline
180int skb_mustfree(struct sk_buff *skb)
181{
182 return (skb->cb[0] != 0);
183}
184
185/* Unmap and free a descriptor buffer. */
186static inline
187void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
188 struct bcm43xx_dmadesc *desc,
189 struct bcm43xx_dmadesc_meta *meta,
190 int irq_context)
191{
192 assert(meta->skb);
193 if (skb_mustfree(meta->skb)) {
194 if (irq_context)
195 dev_kfree_skb_irq(meta->skb);
196 else
197 dev_kfree_skb(meta->skb);
198 }
199 meta->skb = NULL;
200 if (meta->txb) {
201 ieee80211_txb_free(meta->txb);
202 meta->txb = NULL;
203 }
204}
205
206static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
207{
208 struct device *dev = &(ring->bcm->pci_dev->dev);
209
210 ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
211 &(ring->dmabase), GFP_KERNEL);
212 if (!ring->vbase) {
213 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
214 return -ENOMEM;
215 }
216 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
Michael Bueschea72ab22006-01-27 17:26:20 +0100217 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
218 "(0x%08x, len: %lu)\n",
219 ring->dmabase, BCM43xx_DMA_RINGMEMSIZE);
John W. Linvillef2223132006-01-23 16:59:58 -0500220 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
221 ring->vbase, ring->dmabase);
222 return -ENOMEM;
223 }
224 assert(!(ring->dmabase & 0x000003FF));
225 memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
226
227 return 0;
228}
229
230static void free_ringmemory(struct bcm43xx_dmaring *ring)
231{
232 struct device *dev = &(ring->bcm->pci_dev->dev);
233
234 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
235 ring->vbase, ring->dmabase);
236}
237
238/* Reset the RX DMA channel */
239int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
240 u16 mmio_base)
241{
242 int i;
243 u32 value;
244
245 bcm43xx_write32(bcm,
246 mmio_base + BCM43xx_DMA_RX_CONTROL,
247 0x00000000);
248 for (i = 0; i < 1000; i++) {
249 value = bcm43xx_read32(bcm,
250 mmio_base + BCM43xx_DMA_RX_STATUS);
251 value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
252 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
253 i = -1;
254 break;
255 }
256 udelay(10);
257 }
258 if (i != -1) {
259 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
260 return -ENODEV;
261 }
262
263 return 0;
264}
265
John W. Linvillef2223132006-01-23 16:59:58 -0500266/* Reset the RX DMA channel */
267int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
268 u16 mmio_base)
269{
270 int i;
271 u32 value;
272
273 for (i = 0; i < 1000; i++) {
274 value = bcm43xx_read32(bcm,
275 mmio_base + BCM43xx_DMA_TX_STATUS);
276 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
277 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
278 value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
279 value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
280 break;
281 udelay(10);
282 }
283 bcm43xx_write32(bcm,
284 mmio_base + BCM43xx_DMA_TX_CONTROL,
285 0x00000000);
286 for (i = 0; i < 1000; i++) {
287 value = bcm43xx_read32(bcm,
288 mmio_base + BCM43xx_DMA_TX_STATUS);
289 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
290 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
291 i = -1;
292 break;
293 }
294 udelay(10);
295 }
296 if (i != -1) {
297 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
298 return -ENODEV;
299 }
300 /* ensure the reset is completed. */
301 udelay(300);
302
303 return 0;
304}
305
John W. Linvillef2223132006-01-23 16:59:58 -0500306static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
307 struct bcm43xx_dmadesc *desc,
308 struct bcm43xx_dmadesc_meta *meta,
309 gfp_t gfp_flags)
310{
311 struct bcm43xx_rxhdr *rxhdr;
312 dma_addr_t dmaaddr;
313 u32 desc_addr;
314 u32 desc_ctl;
315 const int slot = (int)(desc - ring->vbase);
316 struct sk_buff *skb;
317
318 assert(slot >= 0 && slot < ring->nr_slots);
319 assert(!ring->tx);
320
321 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
322 if (unlikely(!skb))
323 return -ENOMEM;
324 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
325 if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
326 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
327 dev_kfree_skb_any(skb);
Michael Bueschea72ab22006-01-27 17:26:20 +0100328 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
329 "(0x%08x, len: %u)\n",
330 dmaaddr, ring->rx_buffersize);
John W. Linvillef2223132006-01-23 16:59:58 -0500331 return -ENOMEM;
332 }
333 meta->skb = skb;
334 meta->dmaaddr = dmaaddr;
335 skb->dev = ring->bcm->net_dev;
336 mark_skb_mustfree(skb, 1);
337 desc_addr = (u32)(dmaaddr + ring->memoffset);
338 desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
339 (u32)(ring->rx_buffersize - ring->frameoffset));
340 if (slot == ring->nr_slots - 1)
341 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
342 set_desc_addr(desc, desc_addr);
343 set_desc_ctl(desc, desc_ctl);
344
345 rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
346 rxhdr->frame_length = 0;
347 rxhdr->flags1 = 0;
348
349 return 0;
350}
351
352/* Allocate the initial descbuffers.
353 * This is used for an RX ring only.
354 */
355static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
356{
357 int i, err = -ENOMEM;
Michael Bueschea72ab22006-01-27 17:26:20 +0100358 struct bcm43xx_dmadesc *desc;
John W. Linvillef2223132006-01-23 16:59:58 -0500359 struct bcm43xx_dmadesc_meta *meta;
360
361 for (i = 0; i < ring->nr_slots; i++) {
362 desc = ring->vbase + i;
363 meta = ring->meta + i;
364
365 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
366 if (err)
367 goto err_unwind;
John W. Linvillef2223132006-01-23 16:59:58 -0500368 }
369 ring->used_slots = ring->nr_slots;
John W. Linvillef2223132006-01-23 16:59:58 -0500370 err = 0;
371out:
372 return err;
373
374err_unwind:
Michael Bueschea72ab22006-01-27 17:26:20 +0100375 for (i--; i >= 0; i--) {
John W. Linvillef2223132006-01-23 16:59:58 -0500376 desc = ring->vbase + i;
377 meta = ring->meta + i;
378
379 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
380 dev_kfree_skb(meta->skb);
381 }
John W. Linvillef2223132006-01-23 16:59:58 -0500382 goto out;
383}
384
385/* Do initial setup of the DMA controller.
386 * Reset the controller, write the ring busaddress
387 * and switch the "enable" bit on.
388 */
389static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
390{
391 int err = 0;
392 u32 value;
393
394 if (ring->tx) {
395 /* Set Transmit Control register to "transmit enable" */
396 bcm43xx_write32(ring->bcm,
397 ring->mmio_base + BCM43xx_DMA_TX_CONTROL,
398 BCM43xx_DMA_TXCTRL_ENABLE);
399 /* Set Transmit Descriptor ring address. */
400 bcm43xx_write32(ring->bcm,
401 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
402 ring->dmabase + ring->memoffset);
403 } else {
404 err = alloc_initial_descbuffers(ring);
405 if (err)
406 goto out;
407 /* Set Receive Control "receive enable" and frame offset */
408 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
409 value |= BCM43xx_DMA_RXCTRL_ENABLE;
410 bcm43xx_write32(ring->bcm,
411 ring->mmio_base + BCM43xx_DMA_RX_CONTROL,
412 value);
413 /* Set Receive Descriptor ring address. */
414 bcm43xx_write32(ring->bcm,
415 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
416 ring->dmabase + ring->memoffset);
417 /* Init the descriptor pointer. */
418 bcm43xx_write32(ring->bcm,
419 ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
420 200);
421 }
422
423out:
424 return err;
425}
426
427/* Shutdown the DMA controller. */
428static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
429{
430 if (ring->tx) {
Michael Bueschea72ab22006-01-27 17:26:20 +0100431 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
John W. Linvillef2223132006-01-23 16:59:58 -0500432 /* Zero out Transmit Descriptor ring address. */
433 bcm43xx_write32(ring->bcm,
434 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
435 0x00000000);
436 } else {
Michael Bueschea72ab22006-01-27 17:26:20 +0100437 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
John W. Linvillef2223132006-01-23 16:59:58 -0500438 /* Zero out Receive Descriptor ring address. */
439 bcm43xx_write32(ring->bcm,
440 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
441 0x00000000);
442 }
443}
444
445static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
446{
447 struct bcm43xx_dmadesc *desc;
448 struct bcm43xx_dmadesc_meta *meta;
449 int i;
450
451 if (!ring->used_slots)
452 return;
453 for (i = 0; i < ring->nr_slots; i++) {
454 desc = ring->vbase + i;
455 meta = ring->meta + i;
456
457 if (!meta->skb) {
458 assert(ring->tx);
459 assert(!meta->txb);
460 continue;
461 }
462 if (ring->tx) {
463 unmap_descbuffer(ring, meta->dmaaddr,
464 meta->skb->len, 1);
465 } else {
466 unmap_descbuffer(ring, meta->dmaaddr,
467 ring->rx_buffersize, 0);
468 }
469 free_descriptor_buffer(ring, desc, meta, 0);
470 }
471}
472
473/* Main initialization function. */
474static
475struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
476 u16 dma_controller_base,
477 int nr_descriptor_slots,
478 int tx)
479{
480 struct bcm43xx_dmaring *ring;
481 int err;
482
483 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
484 if (!ring)
485 goto out;
486
487 ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
488 GFP_KERNEL);
489 if (!ring->meta)
490 goto err_kfree_ring;
491
492 ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
493#ifdef CONFIG_BCM947XX
494 if (bcm->pci_dev->bus->number == 0)
495 ring->memoffset = 0;
496#endif
Michael Bueschea72ab22006-01-27 17:26:20 +0100497
John W. Linvillef2223132006-01-23 16:59:58 -0500498 ring->bcm = bcm;
499 ring->nr_slots = nr_descriptor_slots;
500 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
501 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
502 assert(ring->suspend_mark < ring->resume_mark);
503 ring->mmio_base = dma_controller_base;
504 if (tx) {
505 ring->tx = 1;
506 ring->current_slot = -1;
507 } else {
508 switch (dma_controller_base) {
509 case BCM43xx_MMIO_DMA1_BASE:
510 ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
511 ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
512 break;
513 case BCM43xx_MMIO_DMA4_BASE:
514 ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
515 ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
516 break;
517 default:
518 assert(0);
519 }
520 }
521
522 err = alloc_ringmemory(ring);
523 if (err)
524 goto err_kfree_meta;
525 err = dmacontroller_setup(ring);
526 if (err)
527 goto err_free_ringmemory;
528
529out:
530 return ring;
531
532err_free_ringmemory:
533 free_ringmemory(ring);
534err_kfree_meta:
535 kfree(ring->meta);
536err_kfree_ring:
537 kfree(ring);
538 ring = NULL;
539 goto out;
540}
541
542/* Main cleanup function. */
543static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
544{
545 if (!ring)
546 return;
547
548 dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
549 ring->mmio_base,
550 (ring->tx) ? "TX" : "RX",
551 ring->max_used_slots, ring->nr_slots);
552 /* Device IRQs are disabled prior entering this function,
553 * so no need to take care of concurrency with rx handler stuff.
554 */
555 dmacontroller_cleanup(ring);
556 free_all_descbuffers(ring);
557 free_ringmemory(ring);
558
559 kfree(ring->meta);
560 kfree(ring);
561}
562
563void bcm43xx_dma_free(struct bcm43xx_private *bcm)
564{
Michael Bueschea72ab22006-01-27 17:26:20 +0100565 struct bcm43xx_dma *dma = bcm->current_core->dma;
566
567 bcm43xx_destroy_dmaring(dma->rx_ring1);
568 dma->rx_ring1 = NULL;
569 bcm43xx_destroy_dmaring(dma->rx_ring0);
570 dma->rx_ring0 = NULL;
571 bcm43xx_destroy_dmaring(dma->tx_ring3);
572 dma->tx_ring3 = NULL;
573 bcm43xx_destroy_dmaring(dma->tx_ring2);
574 dma->tx_ring2 = NULL;
575 bcm43xx_destroy_dmaring(dma->tx_ring1);
576 dma->tx_ring1 = NULL;
577 bcm43xx_destroy_dmaring(dma->tx_ring0);
578 dma->tx_ring0 = NULL;
John W. Linvillef2223132006-01-23 16:59:58 -0500579}
580
581int bcm43xx_dma_init(struct bcm43xx_private *bcm)
582{
Michael Bueschea72ab22006-01-27 17:26:20 +0100583 struct bcm43xx_dma *dma = bcm->current_core->dma;
John W. Linvillef2223132006-01-23 16:59:58 -0500584 struct bcm43xx_dmaring *ring;
585 int err = -ENOMEM;
586
587 /* setup TX DMA channels. */
588 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
589 BCM43xx_TXRING_SLOTS, 1);
590 if (!ring)
591 goto out;
Michael Bueschea72ab22006-01-27 17:26:20 +0100592 dma->tx_ring0 = ring;
John W. Linvillef2223132006-01-23 16:59:58 -0500593
594 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
595 BCM43xx_TXRING_SLOTS, 1);
596 if (!ring)
597 goto err_destroy_tx0;
Michael Bueschea72ab22006-01-27 17:26:20 +0100598 dma->tx_ring1 = ring;
John W. Linvillef2223132006-01-23 16:59:58 -0500599
600 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
601 BCM43xx_TXRING_SLOTS, 1);
602 if (!ring)
603 goto err_destroy_tx1;
Michael Bueschea72ab22006-01-27 17:26:20 +0100604 dma->tx_ring2 = ring;
John W. Linvillef2223132006-01-23 16:59:58 -0500605
606 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
607 BCM43xx_TXRING_SLOTS, 1);
608 if (!ring)
609 goto err_destroy_tx2;
Michael Bueschea72ab22006-01-27 17:26:20 +0100610 dma->tx_ring3 = ring;
John W. Linvillef2223132006-01-23 16:59:58 -0500611
612 /* setup RX DMA channels. */
613 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
614 BCM43xx_RXRING_SLOTS, 0);
615 if (!ring)
616 goto err_destroy_tx3;
Michael Bueschea72ab22006-01-27 17:26:20 +0100617 dma->rx_ring0 = ring;
John W. Linvillef2223132006-01-23 16:59:58 -0500618
619 if (bcm->current_core->rev < 5) {
620 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
621 BCM43xx_RXRING_SLOTS, 0);
622 if (!ring)
623 goto err_destroy_rx0;
Michael Bueschea72ab22006-01-27 17:26:20 +0100624 dma->rx_ring1 = ring;
John W. Linvillef2223132006-01-23 16:59:58 -0500625 }
626
627 dprintk(KERN_INFO PFX "DMA initialized\n");
628 err = 0;
629out:
630 return err;
631
632err_destroy_rx0:
Michael Bueschea72ab22006-01-27 17:26:20 +0100633 bcm43xx_destroy_dmaring(dma->rx_ring0);
634 dma->rx_ring0 = NULL;
John W. Linvillef2223132006-01-23 16:59:58 -0500635err_destroy_tx3:
Michael Bueschea72ab22006-01-27 17:26:20 +0100636 bcm43xx_destroy_dmaring(dma->tx_ring3);
637 dma->tx_ring3 = NULL;
John W. Linvillef2223132006-01-23 16:59:58 -0500638err_destroy_tx2:
Michael Bueschea72ab22006-01-27 17:26:20 +0100639 bcm43xx_destroy_dmaring(dma->tx_ring2);
640 dma->tx_ring2 = NULL;
John W. Linvillef2223132006-01-23 16:59:58 -0500641err_destroy_tx1:
Michael Bueschea72ab22006-01-27 17:26:20 +0100642 bcm43xx_destroy_dmaring(dma->tx_ring1);
643 dma->tx_ring1 = NULL;
John W. Linvillef2223132006-01-23 16:59:58 -0500644err_destroy_tx0:
Michael Bueschea72ab22006-01-27 17:26:20 +0100645 bcm43xx_destroy_dmaring(dma->tx_ring0);
646 dma->tx_ring0 = NULL;
John W. Linvillef2223132006-01-23 16:59:58 -0500647 goto out;
648}
649
650/* Generate a cookie for the TX header. */
Michael Bueschea72ab22006-01-27 17:26:20 +0100651static u16 generate_cookie(struct bcm43xx_dmaring *ring,
652 int slot)
John W. Linvillef2223132006-01-23 16:59:58 -0500653{
654 u16 cookie = 0x0000;
655
656 /* Use the upper 4 bits of the cookie as
657 * DMA controller ID and store the slot number
658 * in the lower 12 bits
659 */
660 switch (ring->mmio_base) {
661 default:
662 assert(0);
663 case BCM43xx_MMIO_DMA1_BASE:
664 break;
665 case BCM43xx_MMIO_DMA2_BASE:
666 cookie = 0x1000;
667 break;
668 case BCM43xx_MMIO_DMA3_BASE:
669 cookie = 0x2000;
670 break;
671 case BCM43xx_MMIO_DMA4_BASE:
672 cookie = 0x3000;
673 break;
674 }
675 assert(((u16)slot & 0xF000) == 0x0000);
676 cookie |= (u16)slot;
677
678 return cookie;
679}
680
681/* Inspect a cookie and find out to which controller/slot it belongs. */
Michael Bueschea72ab22006-01-27 17:26:20 +0100682static
John W. Linvillef2223132006-01-23 16:59:58 -0500683struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
684 u16 cookie, int *slot)
685{
Michael Bueschea72ab22006-01-27 17:26:20 +0100686 struct bcm43xx_dma *dma = bcm->current_core->dma;
John W. Linvillef2223132006-01-23 16:59:58 -0500687 struct bcm43xx_dmaring *ring = NULL;
688
689 switch (cookie & 0xF000) {
690 case 0x0000:
Michael Bueschea72ab22006-01-27 17:26:20 +0100691 ring = dma->tx_ring0;
John W. Linvillef2223132006-01-23 16:59:58 -0500692 break;
693 case 0x1000:
Michael Bueschea72ab22006-01-27 17:26:20 +0100694 ring = dma->tx_ring1;
John W. Linvillef2223132006-01-23 16:59:58 -0500695 break;
696 case 0x2000:
Michael Bueschea72ab22006-01-27 17:26:20 +0100697 ring = dma->tx_ring2;
John W. Linvillef2223132006-01-23 16:59:58 -0500698 break;
699 case 0x3000:
Michael Bueschea72ab22006-01-27 17:26:20 +0100700 ring = dma->tx_ring3;
John W. Linvillef2223132006-01-23 16:59:58 -0500701 break;
702 default:
703 assert(0);
704 }
705 *slot = (cookie & 0x0FFF);
706 assert(*slot >= 0 && *slot < ring->nr_slots);
707
708 return ring;
709}
710
Michael Bueschea72ab22006-01-27 17:26:20 +0100711static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
712 int slot)
John W. Linvillef2223132006-01-23 16:59:58 -0500713{
714 /* Everything is ready to start. Buffers are DMA mapped and
715 * associated with slots.
716 * "slot" is the last slot of the new frame we want to transmit.
717 * Close your seat belts now, please.
718 */
719 wmb();
720 slot = next_slot(ring, slot);
721 bcm43xx_write32(ring->bcm,
722 ring->mmio_base + BCM43xx_DMA_TX_DESC_INDEX,
723 (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
724}
725
Michael Bueschea72ab22006-01-27 17:26:20 +0100726static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
727 struct sk_buff *skb,
728 struct ieee80211_txb *txb,
729 u8 cur_frag)
John W. Linvillef2223132006-01-23 16:59:58 -0500730{
731 int slot;
732 struct bcm43xx_dmadesc *desc;
733 struct bcm43xx_dmadesc_meta *meta;
734 u32 desc_ctl;
735 u32 desc_addr;
736
737 assert(skb_shinfo(skb)->nr_frags == 0);
738
739 slot = request_slot(ring);
740 desc = ring->vbase + slot;
741 meta = ring->meta + slot;
742
743 if (cur_frag == 0) {
744 /* Save the txb pointer for freeing in xmitstatus IRQ */
745 meta->txb = txb;
746 }
747
748 /* Add a device specific TX header. */
749 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
750 /* Reserve enough headroom for the device tx header. */
751 __skb_push(skb, sizeof(struct bcm43xx_txhdr));
752 /* Now calculate and add the tx header.
753 * The tx header includes the PLCP header.
754 */
755 bcm43xx_generate_txhdr(ring->bcm,
756 (struct bcm43xx_txhdr *)skb->data,
757 skb->data + sizeof(struct bcm43xx_txhdr),
758 skb->len - sizeof(struct bcm43xx_txhdr),
759 (cur_frag == 0),
760 generate_cookie(ring, slot));
761
762 meta->skb = skb;
763 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
764 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
765 return_slot(ring, slot);
Michael Bueschea72ab22006-01-27 17:26:20 +0100766 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
767 "(0x%08x, len: %u)\n",
768 meta->dmaaddr, skb->len);
John W. Linvillef2223132006-01-23 16:59:58 -0500769 return -ENOMEM;
770 }
771
772 desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
773 desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
774 desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
775 desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
776 (u32)(meta->skb->len - ring->frameoffset));
777 if (slot == ring->nr_slots - 1)
778 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
779
780 set_desc_ctl(desc, desc_ctl);
781 set_desc_addr(desc, desc_addr);
782 /* Now transfer the whole frame. */
783 dmacontroller_poke_tx(ring, slot);
784
785 return 0;
786}
787
Michael Bueschea72ab22006-01-27 17:26:20 +0100788int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
789 struct ieee80211_txb *txb)
John W. Linvillef2223132006-01-23 16:59:58 -0500790{
791 /* We just received a packet from the kernel network subsystem.
792 * Add headers and DMA map the memory. Poke
793 * the device to send the stuff.
794 * Note that this is called from atomic context.
795 */
Michael Bueschea72ab22006-01-27 17:26:20 +0100796 struct bcm43xx_dmaring *ring = bcm->current_core->dma->tx_ring1;
John W. Linvillef2223132006-01-23 16:59:58 -0500797 u8 i;
798 struct sk_buff *skb;
799
800 assert(ring->tx);
801 if (unlikely(free_slots(ring) < txb->nr_frags)) {
802 /* The queue should be stopped,
803 * if we are low on free slots.
804 * If this ever triggers, we have to lower the suspend_mark.
805 */
806 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
807 return -ENOMEM;
808 }
809
John W. Linvillef2223132006-01-23 16:59:58 -0500810 for (i = 0; i < txb->nr_frags; i++) {
811 skb = txb->fragments[i];
812 /* We do not free the skb, as it is freed as
813 * part of the txb freeing.
814 */
815 mark_skb_mustfree(skb, 0);
816 dma_tx_fragment(ring, skb, txb, i);
817 //TODO: handle failure of dma_tx_fragment
818 }
John W. Linvillef2223132006-01-23 16:59:58 -0500819
820 return 0;
821}
822
Michael Bueschea72ab22006-01-27 17:26:20 +0100823void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
824 struct bcm43xx_xmitstatus *status)
John W. Linvillef2223132006-01-23 16:59:58 -0500825{
826 struct bcm43xx_dmaring *ring;
827 struct bcm43xx_dmadesc *desc;
828 struct bcm43xx_dmadesc_meta *meta;
829 int is_last_fragment;
830 int slot;
831
832 ring = parse_cookie(bcm, status->cookie, &slot);
833 assert(ring);
834 assert(ring->tx);
John W. Linvillef2223132006-01-23 16:59:58 -0500835 assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
836 while (1) {
837 assert(slot >= 0 && slot < ring->nr_slots);
838 desc = ring->vbase + slot;
839 meta = ring->meta + slot;
840
841 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
842 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
843 free_descriptor_buffer(ring, desc, meta, 1);
844 /* Everything belonging to the slot is unmapped
845 * and freed, so we can return it.
846 */
847 return_slot(ring, slot);
848
849 if (is_last_fragment)
850 break;
851 slot = next_slot(ring, slot);
852 }
853 bcm->stats.last_tx = jiffies;
John W. Linvillef2223132006-01-23 16:59:58 -0500854}
855
Michael Bueschea72ab22006-01-27 17:26:20 +0100856static void dma_rx(struct bcm43xx_dmaring *ring,
857 int *slot)
John W. Linvillef2223132006-01-23 16:59:58 -0500858{
859 struct bcm43xx_dmadesc *desc;
860 struct bcm43xx_dmadesc_meta *meta;
861 struct bcm43xx_rxhdr *rxhdr;
862 struct sk_buff *skb;
863 u16 len;
864 int err;
865 dma_addr_t dmaaddr;
866
867 desc = ring->vbase + *slot;
868 meta = ring->meta + *slot;
869
870 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
871 skb = meta->skb;
872
873 if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
874 /* We received an xmit status. */
875 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
876 struct bcm43xx_xmitstatus stat;
877
878 stat.cookie = le16_to_cpu(hw->cookie);
879 stat.flags = hw->flags;
880 stat.cnt1 = hw->cnt1;
881 stat.cnt2 = hw->cnt2;
882 stat.seq = le16_to_cpu(hw->seq);
883 stat.unknown = le16_to_cpu(hw->unknown);
884
885 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
886 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
887 /* recycle the descriptor buffer. */
888 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
889
890 return;
891 }
892 rxhdr = (struct bcm43xx_rxhdr *)skb->data;
893 len = le16_to_cpu(rxhdr->frame_length);
894 if (len == 0) {
895 int i = 0;
896
897 do {
898 udelay(2);
899 barrier();
900 len = le16_to_cpu(rxhdr->frame_length);
901 } while (len == 0 && i++ < 5);
Michael Bueschea72ab22006-01-27 17:26:20 +0100902 if (unlikely(len == 0)) {
903 /* recycle the descriptor buffer. */
904 sync_descbuffer_for_device(ring, meta->dmaaddr,
905 ring->rx_buffersize);
John W. Linvillef2223132006-01-23 16:59:58 -0500906 goto drop;
Michael Bueschea72ab22006-01-27 17:26:20 +0100907 }
John W. Linvillef2223132006-01-23 16:59:58 -0500908 }
909 if (unlikely(len > ring->rx_buffersize)) {
910 /* The data did not fit into one descriptor buffer
911 * and is split over multiple buffers.
912 * This should never happen, as we try to allocate buffers
913 * big enough. So simply ignore this packet.
914 */
Michael Bueschea72ab22006-01-27 17:26:20 +0100915 int cnt = 0;
916 s32 tmp = len;
John W. Linvillef2223132006-01-23 16:59:58 -0500917
Michael Bueschea72ab22006-01-27 17:26:20 +0100918 while (1) {
919 desc = ring->vbase + *slot;
920 meta = ring->meta + *slot;
921 /* recycle the descriptor buffer. */
922 sync_descbuffer_for_device(ring, meta->dmaaddr,
923 ring->rx_buffersize);
John W. Linvillef2223132006-01-23 16:59:58 -0500924 *slot = next_slot(ring, *slot);
925 cnt++;
Michael Bueschea72ab22006-01-27 17:26:20 +0100926 tmp -= ring->rx_buffersize;
927 if (tmp <= 0)
928 break;
John W. Linvillef2223132006-01-23 16:59:58 -0500929 }
Michael Bueschea72ab22006-01-27 17:26:20 +0100930 printkl(KERN_ERR PFX "DMA RX buffer too small "
931 "(len: %u, buffer: %u, nr-dropped: %d)\n",
932 len, ring->rx_buffersize, cnt);
John W. Linvillef2223132006-01-23 16:59:58 -0500933 goto drop;
934 }
935 len -= IEEE80211_FCS_LEN;
936
937 dmaaddr = meta->dmaaddr;
938 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
939 if (unlikely(err)) {
940 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
Michael Bueschea72ab22006-01-27 17:26:20 +0100941 sync_descbuffer_for_device(ring, dmaaddr,
942 ring->rx_buffersize);
John W. Linvillef2223132006-01-23 16:59:58 -0500943 goto drop;
944 }
945
946 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
947 skb_put(skb, len + ring->frameoffset);
948 skb_pull(skb, ring->frameoffset);
949
950 err = bcm43xx_rx(ring->bcm, skb, rxhdr);
951 if (err) {
952 dev_kfree_skb_irq(skb);
953 goto drop;
954 }
955
956drop:
957 return;
958}
959
Michael Bueschea72ab22006-01-27 17:26:20 +0100960void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
John W. Linvillef2223132006-01-23 16:59:58 -0500961{
962 u32 status;
963 u16 descptr;
964 int slot, current_slot;
965#ifdef CONFIG_BCM43XX_DEBUG
966 int used_slots = 0;
967#endif
968
969 assert(!ring->tx);
John W. Linvillef2223132006-01-23 16:59:58 -0500970 status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
971 descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
972 current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
973 assert(current_slot >= 0 && current_slot < ring->nr_slots);
974
975 slot = ring->current_slot;
976 for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
977 dma_rx(ring, &slot);
978#ifdef CONFIG_BCM43XX_DEBUG
979 if (++used_slots > ring->max_used_slots)
980 ring->max_used_slots = used_slots;
981#endif
982 }
983 bcm43xx_write32(ring->bcm,
984 ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
985 (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
986 ring->current_slot = slot;
John W. Linvillef2223132006-01-23 16:59:58 -0500987}
988
989/* vim: set ts=8 sw=8 sts=8: */