blob: 63217b1e312d76b56da4b571c84b20aaf8d93cd8 [file] [log] [blame]
Michael Buesche4d6b792007-09-18 15:39:42 -04001/*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
40
41/* 32bit DMA ops. */
42static
43struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
44 int slot,
45 struct b43_dmadesc_meta **meta)
46{
47 struct b43_dmadesc32 *desc;
48
49 *meta = &(ring->meta[slot]);
50 desc = ring->descbase;
51 desc = &(desc[slot]);
52
53 return (struct b43_dmadesc_generic *)desc;
54}
55
56static void op32_fill_descriptor(struct b43_dmaring *ring,
57 struct b43_dmadesc_generic *desc,
58 dma_addr_t dmaaddr, u16 bufsize,
59 int start, int end, int irq)
60{
61 struct b43_dmadesc32 *descbase = ring->descbase;
62 int slot;
63 u32 ctl;
64 u32 addr;
65 u32 addrext;
66
67 slot = (int)(&(desc->dma32) - descbase);
68 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
69
70 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
71 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
72 >> SSB_DMA_TRANSLATION_SHIFT;
73 addr |= ssb_dma_translation(ring->dev->dev);
74 ctl = (bufsize - ring->frameoffset)
75 & B43_DMA32_DCTL_BYTECNT;
76 if (slot == ring->nr_slots - 1)
77 ctl |= B43_DMA32_DCTL_DTABLEEND;
78 if (start)
79 ctl |= B43_DMA32_DCTL_FRAMESTART;
80 if (end)
81 ctl |= B43_DMA32_DCTL_FRAMEEND;
82 if (irq)
83 ctl |= B43_DMA32_DCTL_IRQ;
84 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
85 & B43_DMA32_DCTL_ADDREXT_MASK;
86
87 desc->dma32.control = cpu_to_le32(ctl);
88 desc->dma32.address = cpu_to_le32(addr);
89}
90
91static void op32_poke_tx(struct b43_dmaring *ring, int slot)
92{
93 b43_dma_write(ring, B43_DMA32_TXINDEX,
94 (u32) (slot * sizeof(struct b43_dmadesc32)));
95}
96
97static void op32_tx_suspend(struct b43_dmaring *ring)
98{
99 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
100 | B43_DMA32_TXSUSPEND);
101}
102
103static void op32_tx_resume(struct b43_dmaring *ring)
104{
105 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
106 & ~B43_DMA32_TXSUSPEND);
107}
108
109static int op32_get_current_rxslot(struct b43_dmaring *ring)
110{
111 u32 val;
112
113 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
114 val &= B43_DMA32_RXDPTR;
115
116 return (val / sizeof(struct b43_dmadesc32));
117}
118
119static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
120{
121 b43_dma_write(ring, B43_DMA32_RXINDEX,
122 (u32) (slot * sizeof(struct b43_dmadesc32)));
123}
124
125static const struct b43_dma_ops dma32_ops = {
126 .idx2desc = op32_idx2desc,
127 .fill_descriptor = op32_fill_descriptor,
128 .poke_tx = op32_poke_tx,
129 .tx_suspend = op32_tx_suspend,
130 .tx_resume = op32_tx_resume,
131 .get_current_rxslot = op32_get_current_rxslot,
132 .set_current_rxslot = op32_set_current_rxslot,
133};
134
135/* 64bit DMA ops. */
136static
137struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
138 int slot,
139 struct b43_dmadesc_meta **meta)
140{
141 struct b43_dmadesc64 *desc;
142
143 *meta = &(ring->meta[slot]);
144 desc = ring->descbase;
145 desc = &(desc[slot]);
146
147 return (struct b43_dmadesc_generic *)desc;
148}
149
150static void op64_fill_descriptor(struct b43_dmaring *ring,
151 struct b43_dmadesc_generic *desc,
152 dma_addr_t dmaaddr, u16 bufsize,
153 int start, int end, int irq)
154{
155 struct b43_dmadesc64 *descbase = ring->descbase;
156 int slot;
157 u32 ctl0 = 0, ctl1 = 0;
158 u32 addrlo, addrhi;
159 u32 addrext;
160
161 slot = (int)(&(desc->dma64) - descbase);
162 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
163
164 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
165 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
166 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
167 >> SSB_DMA_TRANSLATION_SHIFT;
Larry Finger013978b2007-11-26 10:29:47 -0600168 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
Michael Buesche4d6b792007-09-18 15:39:42 -0400169 if (slot == ring->nr_slots - 1)
170 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
171 if (start)
172 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
173 if (end)
174 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
175 if (irq)
176 ctl0 |= B43_DMA64_DCTL0_IRQ;
177 ctl1 |= (bufsize - ring->frameoffset)
178 & B43_DMA64_DCTL1_BYTECNT;
179 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
180 & B43_DMA64_DCTL1_ADDREXT_MASK;
181
182 desc->dma64.control0 = cpu_to_le32(ctl0);
183 desc->dma64.control1 = cpu_to_le32(ctl1);
184 desc->dma64.address_low = cpu_to_le32(addrlo);
185 desc->dma64.address_high = cpu_to_le32(addrhi);
186}
187
188static void op64_poke_tx(struct b43_dmaring *ring, int slot)
189{
190 b43_dma_write(ring, B43_DMA64_TXINDEX,
191 (u32) (slot * sizeof(struct b43_dmadesc64)));
192}
193
194static void op64_tx_suspend(struct b43_dmaring *ring)
195{
196 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
197 | B43_DMA64_TXSUSPEND);
198}
199
200static void op64_tx_resume(struct b43_dmaring *ring)
201{
202 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
203 & ~B43_DMA64_TXSUSPEND);
204}
205
206static int op64_get_current_rxslot(struct b43_dmaring *ring)
207{
208 u32 val;
209
210 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
211 val &= B43_DMA64_RXSTATDPTR;
212
213 return (val / sizeof(struct b43_dmadesc64));
214}
215
216static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
217{
218 b43_dma_write(ring, B43_DMA64_RXINDEX,
219 (u32) (slot * sizeof(struct b43_dmadesc64)));
220}
221
222static const struct b43_dma_ops dma64_ops = {
223 .idx2desc = op64_idx2desc,
224 .fill_descriptor = op64_fill_descriptor,
225 .poke_tx = op64_poke_tx,
226 .tx_suspend = op64_tx_suspend,
227 .tx_resume = op64_tx_resume,
228 .get_current_rxslot = op64_get_current_rxslot,
229 .set_current_rxslot = op64_set_current_rxslot,
230};
231
232static inline int free_slots(struct b43_dmaring *ring)
233{
234 return (ring->nr_slots - ring->used_slots);
235}
236
237static inline int next_slot(struct b43_dmaring *ring, int slot)
238{
239 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
240 if (slot == ring->nr_slots - 1)
241 return 0;
242 return slot + 1;
243}
244
245static inline int prev_slot(struct b43_dmaring *ring, int slot)
246{
247 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
248 if (slot == 0)
249 return ring->nr_slots - 1;
250 return slot - 1;
251}
252
253#ifdef CONFIG_B43_DEBUG
254static void update_max_used_slots(struct b43_dmaring *ring,
255 int current_used_slots)
256{
257 if (current_used_slots <= ring->max_used_slots)
258 return;
259 ring->max_used_slots = current_used_slots;
260 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
261 b43dbg(ring->dev->wl,
262 "max_used_slots increased to %d on %s ring %d\n",
263 ring->max_used_slots,
264 ring->tx ? "TX" : "RX", ring->index);
265 }
266}
267#else
268static inline
269 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
270{
271}
272#endif /* DEBUG */
273
274/* Request a slot for usage. */
275static inline int request_slot(struct b43_dmaring *ring)
276{
277 int slot;
278
279 B43_WARN_ON(!ring->tx);
280 B43_WARN_ON(ring->stopped);
281 B43_WARN_ON(free_slots(ring) == 0);
282
283 slot = next_slot(ring, ring->current_slot);
284 ring->current_slot = slot;
285 ring->used_slots++;
286
287 update_max_used_slots(ring, ring->used_slots);
288
289 return slot;
290}
291
292/* Mac80211-queue to b43-ring mapping */
293static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev,
294 int queue_priority)
295{
296 struct b43_dmaring *ring;
297
298/*FIXME: For now we always run on TX-ring-1 */
299 return dev->dma.tx_ring1;
300
301 /* 0 = highest priority */
302 switch (queue_priority) {
303 default:
304 B43_WARN_ON(1);
305 /* fallthrough */
306 case 0:
307 ring = dev->dma.tx_ring3;
308 break;
309 case 1:
310 ring = dev->dma.tx_ring2;
311 break;
312 case 2:
313 ring = dev->dma.tx_ring1;
314 break;
315 case 3:
316 ring = dev->dma.tx_ring0;
317 break;
318 case 4:
319 ring = dev->dma.tx_ring4;
320 break;
321 case 5:
322 ring = dev->dma.tx_ring5;
323 break;
324 }
325
326 return ring;
327}
328
329/* Bcm43xx-ring to mac80211-queue mapping */
330static inline int txring_to_priority(struct b43_dmaring *ring)
331{
332 static const u8 idx_to_prio[] = { 3, 2, 1, 0, 4, 5, };
333
334/*FIXME: have only one queue, for now */
335 return 0;
336
337 return idx_to_prio[ring->index];
338}
339
340u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
341{
342 static const u16 map64[] = {
343 B43_MMIO_DMA64_BASE0,
344 B43_MMIO_DMA64_BASE1,
345 B43_MMIO_DMA64_BASE2,
346 B43_MMIO_DMA64_BASE3,
347 B43_MMIO_DMA64_BASE4,
348 B43_MMIO_DMA64_BASE5,
349 };
350 static const u16 map32[] = {
351 B43_MMIO_DMA32_BASE0,
352 B43_MMIO_DMA32_BASE1,
353 B43_MMIO_DMA32_BASE2,
354 B43_MMIO_DMA32_BASE3,
355 B43_MMIO_DMA32_BASE4,
356 B43_MMIO_DMA32_BASE5,
357 };
358
359 if (dma64bit) {
360 B43_WARN_ON(!(controller_idx >= 0 &&
361 controller_idx < ARRAY_SIZE(map64)));
362 return map64[controller_idx];
363 }
364 B43_WARN_ON(!(controller_idx >= 0 &&
365 controller_idx < ARRAY_SIZE(map32)));
366 return map32[controller_idx];
367}
368
369static inline
370 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
371 unsigned char *buf, size_t len, int tx)
372{
373 dma_addr_t dmaaddr;
374
375 if (tx) {
376 dmaaddr = dma_map_single(ring->dev->dev->dev,
377 buf, len, DMA_TO_DEVICE);
378 } else {
379 dmaaddr = dma_map_single(ring->dev->dev->dev,
380 buf, len, DMA_FROM_DEVICE);
381 }
382
383 return dmaaddr;
384}
385
386static inline
387 void unmap_descbuffer(struct b43_dmaring *ring,
388 dma_addr_t addr, size_t len, int tx)
389{
390 if (tx) {
391 dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
392 } else {
393 dma_unmap_single(ring->dev->dev->dev,
394 addr, len, DMA_FROM_DEVICE);
395 }
396}
397
398static inline
399 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
400 dma_addr_t addr, size_t len)
401{
402 B43_WARN_ON(ring->tx);
403 dma_sync_single_for_cpu(ring->dev->dev->dev,
404 addr, len, DMA_FROM_DEVICE);
405}
406
407static inline
408 void sync_descbuffer_for_device(struct b43_dmaring *ring,
409 dma_addr_t addr, size_t len)
410{
411 B43_WARN_ON(ring->tx);
412 dma_sync_single_for_device(ring->dev->dev->dev,
413 addr, len, DMA_FROM_DEVICE);
414}
415
416static inline
417 void free_descriptor_buffer(struct b43_dmaring *ring,
418 struct b43_dmadesc_meta *meta)
419{
420 if (meta->skb) {
421 dev_kfree_skb_any(meta->skb);
422 meta->skb = NULL;
423 }
424}
425
426static int alloc_ringmemory(struct b43_dmaring *ring)
427{
428 struct device *dev = ring->dev->dev->dev;
Larry Finger013978b2007-11-26 10:29:47 -0600429 gfp_t flags = GFP_KERNEL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400430
Larry Finger013978b2007-11-26 10:29:47 -0600431 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
432 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
433 * has shown that 4K is sufficient for the latter as long as the buffer
434 * does not cross an 8K boundary.
435 *
436 * For unknown reasons - possibly a hardware error - the BCM4311 rev
437 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
438 * which accounts for the GFP_DMA flag below.
439 */
440 if (ring->dma64)
441 flags |= GFP_DMA;
Michael Buesche4d6b792007-09-18 15:39:42 -0400442 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
Larry Finger013978b2007-11-26 10:29:47 -0600443 &(ring->dmabase), flags);
Michael Buesche4d6b792007-09-18 15:39:42 -0400444 if (!ring->descbase) {
445 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
446 return -ENOMEM;
447 }
448 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
449
450 return 0;
451}
452
453static void free_ringmemory(struct b43_dmaring *ring)
454{
455 struct device *dev = ring->dev->dev->dev;
456
457 dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
458 ring->descbase, ring->dmabase);
459}
460
461/* Reset the RX DMA channel */
462int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
463{
464 int i;
465 u32 value;
466 u16 offset;
467
468 might_sleep();
469
470 offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
471 b43_write32(dev, mmio_base + offset, 0);
472 for (i = 0; i < 10; i++) {
473 offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS;
474 value = b43_read32(dev, mmio_base + offset);
475 if (dma64) {
476 value &= B43_DMA64_RXSTAT;
477 if (value == B43_DMA64_RXSTAT_DISABLED) {
478 i = -1;
479 break;
480 }
481 } else {
482 value &= B43_DMA32_RXSTATE;
483 if (value == B43_DMA32_RXSTAT_DISABLED) {
484 i = -1;
485 break;
486 }
487 }
488 msleep(1);
489 }
490 if (i != -1) {
491 b43err(dev->wl, "DMA RX reset timed out\n");
492 return -ENODEV;
493 }
494
495 return 0;
496}
497
Larry Finger013978b2007-11-26 10:29:47 -0600498/* Reset the TX DMA channel */
Michael Buesche4d6b792007-09-18 15:39:42 -0400499int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
500{
501 int i;
502 u32 value;
503 u16 offset;
504
505 might_sleep();
506
507 for (i = 0; i < 10; i++) {
508 offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
509 value = b43_read32(dev, mmio_base + offset);
510 if (dma64) {
511 value &= B43_DMA64_TXSTAT;
512 if (value == B43_DMA64_TXSTAT_DISABLED ||
513 value == B43_DMA64_TXSTAT_IDLEWAIT ||
514 value == B43_DMA64_TXSTAT_STOPPED)
515 break;
516 } else {
517 value &= B43_DMA32_TXSTATE;
518 if (value == B43_DMA32_TXSTAT_DISABLED ||
519 value == B43_DMA32_TXSTAT_IDLEWAIT ||
520 value == B43_DMA32_TXSTAT_STOPPED)
521 break;
522 }
523 msleep(1);
524 }
525 offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
526 b43_write32(dev, mmio_base + offset, 0);
527 for (i = 0; i < 10; i++) {
528 offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
529 value = b43_read32(dev, mmio_base + offset);
530 if (dma64) {
531 value &= B43_DMA64_TXSTAT;
532 if (value == B43_DMA64_TXSTAT_DISABLED) {
533 i = -1;
534 break;
535 }
536 } else {
537 value &= B43_DMA32_TXSTATE;
538 if (value == B43_DMA32_TXSTAT_DISABLED) {
539 i = -1;
540 break;
541 }
542 }
543 msleep(1);
544 }
545 if (i != -1) {
546 b43err(dev->wl, "DMA TX reset timed out\n");
547 return -ENODEV;
548 }
549 /* ensure the reset is completed. */
550 msleep(1);
551
552 return 0;
553}
554
555static int setup_rx_descbuffer(struct b43_dmaring *ring,
556 struct b43_dmadesc_generic *desc,
557 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
558{
559 struct b43_rxhdr_fw4 *rxhdr;
560 struct b43_hwtxstatus *txstat;
561 dma_addr_t dmaaddr;
562 struct sk_buff *skb;
563
564 B43_WARN_ON(ring->tx);
565
566 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
567 if (unlikely(!skb))
568 return -ENOMEM;
569 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
570 if (dma_mapping_error(dmaaddr)) {
571 /* ugh. try to realloc in zone_dma */
572 gfp_flags |= GFP_DMA;
573
574 dev_kfree_skb_any(skb);
575
576 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
577 if (unlikely(!skb))
578 return -ENOMEM;
579 dmaaddr = map_descbuffer(ring, skb->data,
580 ring->rx_buffersize, 0);
581 }
582
583 if (dma_mapping_error(dmaaddr)) {
584 dev_kfree_skb_any(skb);
585 return -EIO;
586 }
587
588 meta->skb = skb;
589 meta->dmaaddr = dmaaddr;
590 ring->ops->fill_descriptor(ring, desc, dmaaddr,
591 ring->rx_buffersize, 0, 0, 0);
592
593 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
594 rxhdr->frame_len = 0;
595 txstat = (struct b43_hwtxstatus *)(skb->data);
596 txstat->cookie = 0;
597
598 return 0;
599}
600
601/* Allocate the initial descbuffers.
602 * This is used for an RX ring only.
603 */
604static int alloc_initial_descbuffers(struct b43_dmaring *ring)
605{
606 int i, err = -ENOMEM;
607 struct b43_dmadesc_generic *desc;
608 struct b43_dmadesc_meta *meta;
609
610 for (i = 0; i < ring->nr_slots; i++) {
611 desc = ring->ops->idx2desc(ring, i, &meta);
612
613 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
614 if (err) {
615 b43err(ring->dev->wl,
616 "Failed to allocate initial descbuffers\n");
617 goto err_unwind;
618 }
619 }
620 mb();
621 ring->used_slots = ring->nr_slots;
622 err = 0;
623 out:
624 return err;
625
626 err_unwind:
627 for (i--; i >= 0; i--) {
628 desc = ring->ops->idx2desc(ring, i, &meta);
629
630 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
631 dev_kfree_skb(meta->skb);
632 }
633 goto out;
634}
635
636/* Do initial setup of the DMA controller.
637 * Reset the controller, write the ring busaddress
638 * and switch the "enable" bit on.
639 */
640static int dmacontroller_setup(struct b43_dmaring *ring)
641{
642 int err = 0;
643 u32 value;
644 u32 addrext;
645 u32 trans = ssb_dma_translation(ring->dev->dev);
646
647 if (ring->tx) {
648 if (ring->dma64) {
649 u64 ringbase = (u64) (ring->dmabase);
650
651 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
652 >> SSB_DMA_TRANSLATION_SHIFT;
653 value = B43_DMA64_TXENABLE;
654 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
655 & B43_DMA64_TXADDREXT_MASK;
656 b43_dma_write(ring, B43_DMA64_TXCTL, value);
657 b43_dma_write(ring, B43_DMA64_TXRINGLO,
658 (ringbase & 0xFFFFFFFF));
659 b43_dma_write(ring, B43_DMA64_TXRINGHI,
660 ((ringbase >> 32) &
661 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600662 | (trans << 1));
Michael Buesche4d6b792007-09-18 15:39:42 -0400663 } else {
664 u32 ringbase = (u32) (ring->dmabase);
665
666 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
667 >> SSB_DMA_TRANSLATION_SHIFT;
668 value = B43_DMA32_TXENABLE;
669 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
670 & B43_DMA32_TXADDREXT_MASK;
671 b43_dma_write(ring, B43_DMA32_TXCTL, value);
672 b43_dma_write(ring, B43_DMA32_TXRING,
673 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
674 | trans);
675 }
676 } else {
677 err = alloc_initial_descbuffers(ring);
678 if (err)
679 goto out;
680 if (ring->dma64) {
681 u64 ringbase = (u64) (ring->dmabase);
682
683 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
684 >> SSB_DMA_TRANSLATION_SHIFT;
685 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
686 value |= B43_DMA64_RXENABLE;
687 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
688 & B43_DMA64_RXADDREXT_MASK;
689 b43_dma_write(ring, B43_DMA64_RXCTL, value);
690 b43_dma_write(ring, B43_DMA64_RXRINGLO,
691 (ringbase & 0xFFFFFFFF));
692 b43_dma_write(ring, B43_DMA64_RXRINGHI,
693 ((ringbase >> 32) &
694 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600695 | (trans << 1));
696 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
697 sizeof(struct b43_dmadesc64));
Michael Buesche4d6b792007-09-18 15:39:42 -0400698 } else {
699 u32 ringbase = (u32) (ring->dmabase);
700
701 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
702 >> SSB_DMA_TRANSLATION_SHIFT;
703 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
704 value |= B43_DMA32_RXENABLE;
705 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
706 & B43_DMA32_RXADDREXT_MASK;
707 b43_dma_write(ring, B43_DMA32_RXCTL, value);
708 b43_dma_write(ring, B43_DMA32_RXRING,
709 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
710 | trans);
Larry Finger013978b2007-11-26 10:29:47 -0600711 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
712 sizeof(struct b43_dmadesc32));
Michael Buesche4d6b792007-09-18 15:39:42 -0400713 }
714 }
715
Larry Finger013978b2007-11-26 10:29:47 -0600716out:
Michael Buesche4d6b792007-09-18 15:39:42 -0400717 return err;
718}
719
720/* Shutdown the DMA controller. */
721static void dmacontroller_cleanup(struct b43_dmaring *ring)
722{
723 if (ring->tx) {
724 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
725 ring->dma64);
726 if (ring->dma64) {
727 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
728 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
729 } else
730 b43_dma_write(ring, B43_DMA32_TXRING, 0);
731 } else {
732 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
733 ring->dma64);
734 if (ring->dma64) {
735 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
736 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
737 } else
738 b43_dma_write(ring, B43_DMA32_RXRING, 0);
739 }
740}
741
742static void free_all_descbuffers(struct b43_dmaring *ring)
743{
744 struct b43_dmadesc_generic *desc;
745 struct b43_dmadesc_meta *meta;
746 int i;
747
748 if (!ring->used_slots)
749 return;
750 for (i = 0; i < ring->nr_slots; i++) {
751 desc = ring->ops->idx2desc(ring, i, &meta);
752
753 if (!meta->skb) {
754 B43_WARN_ON(!ring->tx);
755 continue;
756 }
757 if (ring->tx) {
758 unmap_descbuffer(ring, meta->dmaaddr,
759 meta->skb->len, 1);
760 } else {
761 unmap_descbuffer(ring, meta->dmaaddr,
762 ring->rx_buffersize, 0);
763 }
764 free_descriptor_buffer(ring, meta);
765 }
766}
767
768static u64 supported_dma_mask(struct b43_wldev *dev)
769{
770 u32 tmp;
771 u16 mmio_base;
772
773 tmp = b43_read32(dev, SSB_TMSHIGH);
774 if (tmp & SSB_TMSHIGH_DMA64)
775 return DMA_64BIT_MASK;
776 mmio_base = b43_dmacontroller_base(0, 0);
777 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
778 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
779 if (tmp & B43_DMA32_TXADDREXT_MASK)
780 return DMA_32BIT_MASK;
781
782 return DMA_30BIT_MASK;
783}
784
785/* Main initialization function. */
786static
787struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
788 int controller_index,
789 int for_tx, int dma64)
790{
791 struct b43_dmaring *ring;
792 int err;
793 int nr_slots;
794 dma_addr_t dma_test;
795
796 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
797 if (!ring)
798 goto out;
799
800 nr_slots = B43_RXRING_SLOTS;
801 if (for_tx)
802 nr_slots = B43_TXRING_SLOTS;
803
804 ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
805 GFP_KERNEL);
806 if (!ring->meta)
807 goto err_kfree_ring;
808 if (for_tx) {
809 ring->txhdr_cache = kcalloc(nr_slots,
810 sizeof(struct b43_txhdr_fw4),
811 GFP_KERNEL);
812 if (!ring->txhdr_cache)
813 goto err_kfree_meta;
814
815 /* test for ability to dma to txhdr_cache */
816 dma_test = dma_map_single(dev->dev->dev,
817 ring->txhdr_cache,
818 sizeof(struct b43_txhdr_fw4),
819 DMA_TO_DEVICE);
820
821 if (dma_mapping_error(dma_test)) {
822 /* ugh realloc */
823 kfree(ring->txhdr_cache);
824 ring->txhdr_cache = kcalloc(nr_slots,
825 sizeof(struct
826 b43_txhdr_fw4),
827 GFP_KERNEL | GFP_DMA);
828 if (!ring->txhdr_cache)
829 goto err_kfree_meta;
830
831 dma_test = dma_map_single(dev->dev->dev,
832 ring->txhdr_cache,
833 sizeof(struct b43_txhdr_fw4),
834 DMA_TO_DEVICE);
835
836 if (dma_mapping_error(dma_test))
837 goto err_kfree_txhdr_cache;
838 }
839
840 dma_unmap_single(dev->dev->dev,
841 dma_test, sizeof(struct b43_txhdr_fw4),
842 DMA_TO_DEVICE);
843 }
844
845 ring->dev = dev;
846 ring->nr_slots = nr_slots;
847 ring->mmio_base = b43_dmacontroller_base(dma64, controller_index);
848 ring->index = controller_index;
849 ring->dma64 = !!dma64;
850 if (dma64)
851 ring->ops = &dma64_ops;
852 else
853 ring->ops = &dma32_ops;
854 if (for_tx) {
855 ring->tx = 1;
856 ring->current_slot = -1;
857 } else {
858 if (ring->index == 0) {
859 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
860 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
861 } else if (ring->index == 3) {
862 ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
863 ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
864 } else
865 B43_WARN_ON(1);
866 }
867 spin_lock_init(&ring->lock);
868#ifdef CONFIG_B43_DEBUG
869 ring->last_injected_overflow = jiffies;
870#endif
871
872 err = alloc_ringmemory(ring);
873 if (err)
874 goto err_kfree_txhdr_cache;
875 err = dmacontroller_setup(ring);
876 if (err)
877 goto err_free_ringmemory;
878
879 out:
880 return ring;
881
882 err_free_ringmemory:
883 free_ringmemory(ring);
884 err_kfree_txhdr_cache:
885 kfree(ring->txhdr_cache);
886 err_kfree_meta:
887 kfree(ring->meta);
888 err_kfree_ring:
889 kfree(ring);
890 ring = NULL;
891 goto out;
892}
893
894/* Main cleanup function. */
895static void b43_destroy_dmaring(struct b43_dmaring *ring)
896{
897 if (!ring)
898 return;
899
900 b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
901 (ring->dma64) ? "64" : "32",
902 ring->mmio_base,
903 (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
904 /* Device IRQs are disabled prior entering this function,
905 * so no need to take care of concurrency with rx handler stuff.
906 */
907 dmacontroller_cleanup(ring);
908 free_all_descbuffers(ring);
909 free_ringmemory(ring);
910
911 kfree(ring->txhdr_cache);
912 kfree(ring->meta);
913 kfree(ring);
914}
915
916void b43_dma_free(struct b43_wldev *dev)
917{
Michael Buesch03b29772007-12-26 14:41:30 +0100918 struct b43_dma *dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -0400919
920 b43_destroy_dmaring(dma->rx_ring3);
921 dma->rx_ring3 = NULL;
922 b43_destroy_dmaring(dma->rx_ring0);
923 dma->rx_ring0 = NULL;
924
925 b43_destroy_dmaring(dma->tx_ring5);
926 dma->tx_ring5 = NULL;
927 b43_destroy_dmaring(dma->tx_ring4);
928 dma->tx_ring4 = NULL;
929 b43_destroy_dmaring(dma->tx_ring3);
930 dma->tx_ring3 = NULL;
931 b43_destroy_dmaring(dma->tx_ring2);
932 dma->tx_ring2 = NULL;
933 b43_destroy_dmaring(dma->tx_ring1);
934 dma->tx_ring1 = NULL;
935 b43_destroy_dmaring(dma->tx_ring0);
936 dma->tx_ring0 = NULL;
937}
938
939int b43_dma_init(struct b43_wldev *dev)
940{
941 struct b43_dma *dma = &dev->dma;
942 struct b43_dmaring *ring;
943 int err;
944 u64 dmamask;
945 int dma64 = 0;
946
947 dmamask = supported_dma_mask(dev);
948 if (dmamask == DMA_64BIT_MASK)
949 dma64 = 1;
950
951 err = ssb_dma_set_mask(dev->dev, dmamask);
952 if (err) {
Michael Buesch03b29772007-12-26 14:41:30 +0100953 b43err(dev->wl, "The machine/kernel does not support "
954 "the required DMA mask (0x%08X%08X)\n",
955 (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
956 (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
Michael Buesche4d6b792007-09-18 15:39:42 -0400957 return -EOPNOTSUPP;
Michael Buesche4d6b792007-09-18 15:39:42 -0400958 }
959
960 err = -ENOMEM;
961 /* setup TX DMA channels. */
962 ring = b43_setup_dmaring(dev, 0, 1, dma64);
963 if (!ring)
964 goto out;
965 dma->tx_ring0 = ring;
966
967 ring = b43_setup_dmaring(dev, 1, 1, dma64);
968 if (!ring)
969 goto err_destroy_tx0;
970 dma->tx_ring1 = ring;
971
972 ring = b43_setup_dmaring(dev, 2, 1, dma64);
973 if (!ring)
974 goto err_destroy_tx1;
975 dma->tx_ring2 = ring;
976
977 ring = b43_setup_dmaring(dev, 3, 1, dma64);
978 if (!ring)
979 goto err_destroy_tx2;
980 dma->tx_ring3 = ring;
981
982 ring = b43_setup_dmaring(dev, 4, 1, dma64);
983 if (!ring)
984 goto err_destroy_tx3;
985 dma->tx_ring4 = ring;
986
987 ring = b43_setup_dmaring(dev, 5, 1, dma64);
988 if (!ring)
989 goto err_destroy_tx4;
990 dma->tx_ring5 = ring;
991
992 /* setup RX DMA channels. */
993 ring = b43_setup_dmaring(dev, 0, 0, dma64);
994 if (!ring)
995 goto err_destroy_tx5;
996 dma->rx_ring0 = ring;
997
998 if (dev->dev->id.revision < 5) {
999 ring = b43_setup_dmaring(dev, 3, 0, dma64);
1000 if (!ring)
1001 goto err_destroy_rx0;
1002 dma->rx_ring3 = ring;
1003 }
1004
1005 b43dbg(dev->wl, "%d-bit DMA initialized\n",
1006 (dmamask == DMA_64BIT_MASK) ? 64 :
1007 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
1008 err = 0;
1009 out:
1010 return err;
1011
1012 err_destroy_rx0:
1013 b43_destroy_dmaring(dma->rx_ring0);
1014 dma->rx_ring0 = NULL;
1015 err_destroy_tx5:
1016 b43_destroy_dmaring(dma->tx_ring5);
1017 dma->tx_ring5 = NULL;
1018 err_destroy_tx4:
1019 b43_destroy_dmaring(dma->tx_ring4);
1020 dma->tx_ring4 = NULL;
1021 err_destroy_tx3:
1022 b43_destroy_dmaring(dma->tx_ring3);
1023 dma->tx_ring3 = NULL;
1024 err_destroy_tx2:
1025 b43_destroy_dmaring(dma->tx_ring2);
1026 dma->tx_ring2 = NULL;
1027 err_destroy_tx1:
1028 b43_destroy_dmaring(dma->tx_ring1);
1029 dma->tx_ring1 = NULL;
1030 err_destroy_tx0:
1031 b43_destroy_dmaring(dma->tx_ring0);
1032 dma->tx_ring0 = NULL;
1033 goto out;
1034}
1035
1036/* Generate a cookie for the TX header. */
1037static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1038{
1039 u16 cookie = 0x1000;
1040
1041 /* Use the upper 4 bits of the cookie as
1042 * DMA controller ID and store the slot number
1043 * in the lower 12 bits.
1044 * Note that the cookie must never be 0, as this
1045 * is a special value used in RX path.
1046 */
1047 switch (ring->index) {
1048 case 0:
1049 cookie = 0xA000;
1050 break;
1051 case 1:
1052 cookie = 0xB000;
1053 break;
1054 case 2:
1055 cookie = 0xC000;
1056 break;
1057 case 3:
1058 cookie = 0xD000;
1059 break;
1060 case 4:
1061 cookie = 0xE000;
1062 break;
1063 case 5:
1064 cookie = 0xF000;
1065 break;
1066 }
1067 B43_WARN_ON(slot & ~0x0FFF);
1068 cookie |= (u16) slot;
1069
1070 return cookie;
1071}
1072
1073/* Inspect a cookie and find out to which controller/slot it belongs. */
1074static
1075struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1076{
1077 struct b43_dma *dma = &dev->dma;
1078 struct b43_dmaring *ring = NULL;
1079
1080 switch (cookie & 0xF000) {
1081 case 0xA000:
1082 ring = dma->tx_ring0;
1083 break;
1084 case 0xB000:
1085 ring = dma->tx_ring1;
1086 break;
1087 case 0xC000:
1088 ring = dma->tx_ring2;
1089 break;
1090 case 0xD000:
1091 ring = dma->tx_ring3;
1092 break;
1093 case 0xE000:
1094 ring = dma->tx_ring4;
1095 break;
1096 case 0xF000:
1097 ring = dma->tx_ring5;
1098 break;
1099 default:
1100 B43_WARN_ON(1);
1101 }
1102 *slot = (cookie & 0x0FFF);
1103 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1104
1105 return ring;
1106}
1107
1108static int dma_tx_fragment(struct b43_dmaring *ring,
1109 struct sk_buff *skb,
1110 struct ieee80211_tx_control *ctl)
1111{
1112 const struct b43_dma_ops *ops = ring->ops;
1113 u8 *header;
1114 int slot;
1115 int err;
1116 struct b43_dmadesc_generic *desc;
1117 struct b43_dmadesc_meta *meta;
1118 struct b43_dmadesc_meta *meta_hdr;
1119 struct sk_buff *bounce_skb;
1120
1121#define SLOTS_PER_PACKET 2
1122 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1123
1124 /* Get a slot for the header. */
1125 slot = request_slot(ring);
1126 desc = ops->idx2desc(ring, slot, &meta_hdr);
1127 memset(meta_hdr, 0, sizeof(*meta_hdr));
1128
1129 header = &(ring->txhdr_cache[slot * sizeof(struct b43_txhdr_fw4)]);
1130 b43_generate_txhdr(ring->dev, header,
1131 skb->data, skb->len, ctl,
1132 generate_cookie(ring, slot));
1133
1134 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1135 sizeof(struct b43_txhdr_fw4), 1);
1136 if (dma_mapping_error(meta_hdr->dmaaddr))
1137 return -EIO;
1138 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1139 sizeof(struct b43_txhdr_fw4), 1, 0, 0);
1140
1141 /* Get a slot for the payload. */
1142 slot = request_slot(ring);
1143 desc = ops->idx2desc(ring, slot, &meta);
1144 memset(meta, 0, sizeof(*meta));
1145
1146 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1147 meta->skb = skb;
1148 meta->is_last_fragment = 1;
1149
1150 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1151 /* create a bounce buffer in zone_dma on mapping failure. */
1152 if (dma_mapping_error(meta->dmaaddr)) {
1153 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1154 if (!bounce_skb) {
1155 err = -ENOMEM;
1156 goto out_unmap_hdr;
1157 }
1158
1159 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1160 dev_kfree_skb_any(skb);
1161 skb = bounce_skb;
1162 meta->skb = skb;
1163 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1164 if (dma_mapping_error(meta->dmaaddr)) {
1165 err = -EIO;
1166 goto out_free_bounce;
1167 }
1168 }
1169
1170 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1171
1172 /* Now transfer the whole frame. */
1173 wmb();
1174 ops->poke_tx(ring, next_slot(ring, slot));
1175 return 0;
1176
1177 out_free_bounce:
1178 dev_kfree_skb_any(skb);
1179 out_unmap_hdr:
1180 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1181 sizeof(struct b43_txhdr_fw4), 1);
1182 return err;
1183}
1184
1185static inline int should_inject_overflow(struct b43_dmaring *ring)
1186{
1187#ifdef CONFIG_B43_DEBUG
1188 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1189 /* Check if we should inject another ringbuffer overflow
1190 * to test handling of this situation in the stack. */
1191 unsigned long next_overflow;
1192
1193 next_overflow = ring->last_injected_overflow + HZ;
1194 if (time_after(jiffies, next_overflow)) {
1195 ring->last_injected_overflow = jiffies;
1196 b43dbg(ring->dev->wl,
1197 "Injecting TX ring overflow on "
1198 "DMA controller %d\n", ring->index);
1199 return 1;
1200 }
1201 }
1202#endif /* CONFIG_B43_DEBUG */
1203 return 0;
1204}
1205
1206int b43_dma_tx(struct b43_wldev *dev,
1207 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1208{
1209 struct b43_dmaring *ring;
1210 int err = 0;
1211 unsigned long flags;
1212
1213 ring = priority_to_txring(dev, ctl->queue);
1214 spin_lock_irqsave(&ring->lock, flags);
1215 B43_WARN_ON(!ring->tx);
1216 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1217 b43warn(dev->wl, "DMA queue overflow\n");
1218 err = -ENOSPC;
1219 goto out_unlock;
1220 }
1221 /* Check if the queue was stopped in mac80211,
1222 * but we got called nevertheless.
1223 * That would be a mac80211 bug. */
1224 B43_WARN_ON(ring->stopped);
1225
1226 err = dma_tx_fragment(ring, skb, ctl);
1227 if (unlikely(err)) {
1228 b43err(dev->wl, "DMA tx mapping failure\n");
1229 goto out_unlock;
1230 }
1231 ring->nr_tx_packets++;
1232 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1233 should_inject_overflow(ring)) {
1234 /* This TX ring is full. */
1235 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1236 ring->stopped = 1;
1237 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1238 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1239 }
1240 }
1241 out_unlock:
1242 spin_unlock_irqrestore(&ring->lock, flags);
1243
1244 return err;
1245}
1246
1247void b43_dma_handle_txstatus(struct b43_wldev *dev,
1248 const struct b43_txstatus *status)
1249{
1250 const struct b43_dma_ops *ops;
1251 struct b43_dmaring *ring;
1252 struct b43_dmadesc_generic *desc;
1253 struct b43_dmadesc_meta *meta;
1254 int slot;
1255
1256 ring = parse_cookie(dev, status->cookie, &slot);
1257 if (unlikely(!ring))
1258 return;
1259 B43_WARN_ON(!irqs_disabled());
1260 spin_lock(&ring->lock);
1261
1262 B43_WARN_ON(!ring->tx);
1263 ops = ring->ops;
1264 while (1) {
1265 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1266 desc = ops->idx2desc(ring, slot, &meta);
1267
1268 if (meta->skb)
1269 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1270 1);
1271 else
1272 unmap_descbuffer(ring, meta->dmaaddr,
1273 sizeof(struct b43_txhdr_fw4), 1);
1274
1275 if (meta->is_last_fragment) {
1276 B43_WARN_ON(!meta->skb);
1277 /* Call back to inform the ieee80211 subsystem about the
1278 * status of the transmission.
1279 * Some fields of txstat are already filled in dma_tx().
1280 */
1281 if (status->acked) {
1282 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1283 } else {
1284 if (!(meta->txstat.control.flags
1285 & IEEE80211_TXCTL_NO_ACK))
1286 meta->txstat.excessive_retries = 1;
1287 }
1288 if (status->frame_count == 0) {
1289 /* The frame was not transmitted at all. */
1290 meta->txstat.retry_count = 0;
1291 } else
1292 meta->txstat.retry_count = status->frame_count - 1;
1293 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1294 &(meta->txstat));
1295 /* skb is freed by ieee80211_tx_status_irqsafe() */
1296 meta->skb = NULL;
1297 } else {
1298 /* No need to call free_descriptor_buffer here, as
1299 * this is only the txhdr, which is not allocated.
1300 */
1301 B43_WARN_ON(meta->skb);
1302 }
1303
1304 /* Everything unmapped and free'd. So it's not used anymore. */
1305 ring->used_slots--;
1306
1307 if (meta->is_last_fragment)
1308 break;
1309 slot = next_slot(ring, slot);
1310 }
1311 dev->stats.last_tx = jiffies;
1312 if (ring->stopped) {
1313 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1314 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1315 ring->stopped = 0;
1316 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1317 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1318 }
1319 }
1320
1321 spin_unlock(&ring->lock);
1322}
1323
1324void b43_dma_get_tx_stats(struct b43_wldev *dev,
1325 struct ieee80211_tx_queue_stats *stats)
1326{
1327 const int nr_queues = dev->wl->hw->queues;
1328 struct b43_dmaring *ring;
1329 struct ieee80211_tx_queue_stats_data *data;
1330 unsigned long flags;
1331 int i;
1332
1333 for (i = 0; i < nr_queues; i++) {
1334 data = &(stats->data[i]);
1335 ring = priority_to_txring(dev, i);
1336
1337 spin_lock_irqsave(&ring->lock, flags);
1338 data->len = ring->used_slots / SLOTS_PER_PACKET;
1339 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1340 data->count = ring->nr_tx_packets;
1341 spin_unlock_irqrestore(&ring->lock, flags);
1342 }
1343}
1344
1345static void dma_rx(struct b43_dmaring *ring, int *slot)
1346{
1347 const struct b43_dma_ops *ops = ring->ops;
1348 struct b43_dmadesc_generic *desc;
1349 struct b43_dmadesc_meta *meta;
1350 struct b43_rxhdr_fw4 *rxhdr;
1351 struct sk_buff *skb;
1352 u16 len;
1353 int err;
1354 dma_addr_t dmaaddr;
1355
1356 desc = ops->idx2desc(ring, *slot, &meta);
1357
1358 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1359 skb = meta->skb;
1360
1361 if (ring->index == 3) {
1362 /* We received an xmit status. */
1363 struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data;
1364 int i = 0;
1365
1366 while (hw->cookie == 0) {
1367 if (i > 100)
1368 break;
1369 i++;
1370 udelay(2);
1371 barrier();
1372 }
1373 b43_handle_hwtxstatus(ring->dev, hw);
1374 /* recycle the descriptor buffer. */
1375 sync_descbuffer_for_device(ring, meta->dmaaddr,
1376 ring->rx_buffersize);
1377
1378 return;
1379 }
1380 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1381 len = le16_to_cpu(rxhdr->frame_len);
1382 if (len == 0) {
1383 int i = 0;
1384
1385 do {
1386 udelay(2);
1387 barrier();
1388 len = le16_to_cpu(rxhdr->frame_len);
1389 } while (len == 0 && i++ < 5);
1390 if (unlikely(len == 0)) {
1391 /* recycle the descriptor buffer. */
1392 sync_descbuffer_for_device(ring, meta->dmaaddr,
1393 ring->rx_buffersize);
1394 goto drop;
1395 }
1396 }
1397 if (unlikely(len > ring->rx_buffersize)) {
1398 /* The data did not fit into one descriptor buffer
1399 * and is split over multiple buffers.
1400 * This should never happen, as we try to allocate buffers
1401 * big enough. So simply ignore this packet.
1402 */
1403 int cnt = 0;
1404 s32 tmp = len;
1405
1406 while (1) {
1407 desc = ops->idx2desc(ring, *slot, &meta);
1408 /* recycle the descriptor buffer. */
1409 sync_descbuffer_for_device(ring, meta->dmaaddr,
1410 ring->rx_buffersize);
1411 *slot = next_slot(ring, *slot);
1412 cnt++;
1413 tmp -= ring->rx_buffersize;
1414 if (tmp <= 0)
1415 break;
1416 }
1417 b43err(ring->dev->wl, "DMA RX buffer too small "
1418 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1419 len, ring->rx_buffersize, cnt);
1420 goto drop;
1421 }
1422
1423 dmaaddr = meta->dmaaddr;
1424 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1425 if (unlikely(err)) {
1426 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1427 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1428 goto drop;
1429 }
1430
1431 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1432 skb_put(skb, len + ring->frameoffset);
1433 skb_pull(skb, ring->frameoffset);
1434
1435 b43_rx(ring->dev, skb, rxhdr);
1436 drop:
1437 return;
1438}
1439
1440void b43_dma_rx(struct b43_dmaring *ring)
1441{
1442 const struct b43_dma_ops *ops = ring->ops;
1443 int slot, current_slot;
1444 int used_slots = 0;
1445
1446 B43_WARN_ON(ring->tx);
1447 current_slot = ops->get_current_rxslot(ring);
1448 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1449
1450 slot = ring->current_slot;
1451 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1452 dma_rx(ring, &slot);
1453 update_max_used_slots(ring, ++used_slots);
1454 }
1455 ops->set_current_rxslot(ring, slot);
1456 ring->current_slot = slot;
1457}
1458
1459static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1460{
1461 unsigned long flags;
1462
1463 spin_lock_irqsave(&ring->lock, flags);
1464 B43_WARN_ON(!ring->tx);
1465 ring->ops->tx_suspend(ring);
1466 spin_unlock_irqrestore(&ring->lock, flags);
1467}
1468
1469static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1470{
1471 unsigned long flags;
1472
1473 spin_lock_irqsave(&ring->lock, flags);
1474 B43_WARN_ON(!ring->tx);
1475 ring->ops->tx_resume(ring);
1476 spin_unlock_irqrestore(&ring->lock, flags);
1477}
1478
1479void b43_dma_tx_suspend(struct b43_wldev *dev)
1480{
1481 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1482 b43_dma_tx_suspend_ring(dev->dma.tx_ring0);
1483 b43_dma_tx_suspend_ring(dev->dma.tx_ring1);
1484 b43_dma_tx_suspend_ring(dev->dma.tx_ring2);
1485 b43_dma_tx_suspend_ring(dev->dma.tx_ring3);
1486 b43_dma_tx_suspend_ring(dev->dma.tx_ring4);
1487 b43_dma_tx_suspend_ring(dev->dma.tx_ring5);
1488}
1489
1490void b43_dma_tx_resume(struct b43_wldev *dev)
1491{
1492 b43_dma_tx_resume_ring(dev->dma.tx_ring5);
1493 b43_dma_tx_resume_ring(dev->dma.tx_ring4);
1494 b43_dma_tx_resume_ring(dev->dma.tx_ring3);
1495 b43_dma_tx_resume_ring(dev->dma.tx_ring2);
1496 b43_dma_tx_resume_ring(dev->dma.tx_ring1);
1497 b43_dma_tx_resume_ring(dev->dma.tx_ring0);
1498 b43_power_saving_ctl_bits(dev, 0);
1499}