blob: 027be275e0351ee401e28d4b7f5993bbe3c0def5 [file] [log] [blame]
Michael Buesche4d6b792007-09-18 15:39:42 -04001/*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
Michael Buesch280d0e12007-12-26 18:26:17 +010040#include <linux/etherdevice.h>
Michael Buesch57df40d2008-03-07 15:50:02 +010041#include <asm/div64.h>
Michael Buesch280d0e12007-12-26 18:26:17 +010042
Michael Buesche4d6b792007-09-18 15:39:42 -040043
Michael Bueschbdceeb22009-02-19 23:45:43 +010044/* Required number of TX DMA slots per TX frame.
45 * This currently is 2, because we put the header and the ieee80211 frame
46 * into separate slots. */
47#define TX_SLOTS_PER_FRAME 2
48
49
Michael Buesche4d6b792007-09-18 15:39:42 -040050/* 32bit DMA ops. */
51static
52struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
53 int slot,
54 struct b43_dmadesc_meta **meta)
55{
56 struct b43_dmadesc32 *desc;
57
58 *meta = &(ring->meta[slot]);
59 desc = ring->descbase;
60 desc = &(desc[slot]);
61
62 return (struct b43_dmadesc_generic *)desc;
63}
64
65static void op32_fill_descriptor(struct b43_dmaring *ring,
66 struct b43_dmadesc_generic *desc,
67 dma_addr_t dmaaddr, u16 bufsize,
68 int start, int end, int irq)
69{
70 struct b43_dmadesc32 *descbase = ring->descbase;
71 int slot;
72 u32 ctl;
73 u32 addr;
74 u32 addrext;
75
76 slot = (int)(&(desc->dma32) - descbase);
77 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
78
79 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
80 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
81 >> SSB_DMA_TRANSLATION_SHIFT;
82 addr |= ssb_dma_translation(ring->dev->dev);
Michael Buesch8eccb532009-02-19 23:39:26 +010083 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
Michael Buesche4d6b792007-09-18 15:39:42 -040084 if (slot == ring->nr_slots - 1)
85 ctl |= B43_DMA32_DCTL_DTABLEEND;
86 if (start)
87 ctl |= B43_DMA32_DCTL_FRAMESTART;
88 if (end)
89 ctl |= B43_DMA32_DCTL_FRAMEEND;
90 if (irq)
91 ctl |= B43_DMA32_DCTL_IRQ;
92 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
93 & B43_DMA32_DCTL_ADDREXT_MASK;
94
95 desc->dma32.control = cpu_to_le32(ctl);
96 desc->dma32.address = cpu_to_le32(addr);
97}
98
99static void op32_poke_tx(struct b43_dmaring *ring, int slot)
100{
101 b43_dma_write(ring, B43_DMA32_TXINDEX,
102 (u32) (slot * sizeof(struct b43_dmadesc32)));
103}
104
105static void op32_tx_suspend(struct b43_dmaring *ring)
106{
107 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108 | B43_DMA32_TXSUSPEND);
109}
110
111static void op32_tx_resume(struct b43_dmaring *ring)
112{
113 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
114 & ~B43_DMA32_TXSUSPEND);
115}
116
117static int op32_get_current_rxslot(struct b43_dmaring *ring)
118{
119 u32 val;
120
121 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
122 val &= B43_DMA32_RXDPTR;
123
124 return (val / sizeof(struct b43_dmadesc32));
125}
126
127static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
128{
129 b43_dma_write(ring, B43_DMA32_RXINDEX,
130 (u32) (slot * sizeof(struct b43_dmadesc32)));
131}
132
133static const struct b43_dma_ops dma32_ops = {
134 .idx2desc = op32_idx2desc,
135 .fill_descriptor = op32_fill_descriptor,
136 .poke_tx = op32_poke_tx,
137 .tx_suspend = op32_tx_suspend,
138 .tx_resume = op32_tx_resume,
139 .get_current_rxslot = op32_get_current_rxslot,
140 .set_current_rxslot = op32_set_current_rxslot,
141};
142
143/* 64bit DMA ops. */
144static
145struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
146 int slot,
147 struct b43_dmadesc_meta **meta)
148{
149 struct b43_dmadesc64 *desc;
150
151 *meta = &(ring->meta[slot]);
152 desc = ring->descbase;
153 desc = &(desc[slot]);
154
155 return (struct b43_dmadesc_generic *)desc;
156}
157
158static void op64_fill_descriptor(struct b43_dmaring *ring,
159 struct b43_dmadesc_generic *desc,
160 dma_addr_t dmaaddr, u16 bufsize,
161 int start, int end, int irq)
162{
163 struct b43_dmadesc64 *descbase = ring->descbase;
164 int slot;
165 u32 ctl0 = 0, ctl1 = 0;
166 u32 addrlo, addrhi;
167 u32 addrext;
168
169 slot = (int)(&(desc->dma64) - descbase);
170 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
171
172 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
173 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
174 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
175 >> SSB_DMA_TRANSLATION_SHIFT;
Larry Finger013978b2007-11-26 10:29:47 -0600176 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
Michael Buesche4d6b792007-09-18 15:39:42 -0400177 if (slot == ring->nr_slots - 1)
178 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
179 if (start)
180 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
181 if (end)
182 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
183 if (irq)
184 ctl0 |= B43_DMA64_DCTL0_IRQ;
Michael Buesch8eccb532009-02-19 23:39:26 +0100185 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
Michael Buesche4d6b792007-09-18 15:39:42 -0400186 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
187 & B43_DMA64_DCTL1_ADDREXT_MASK;
188
189 desc->dma64.control0 = cpu_to_le32(ctl0);
190 desc->dma64.control1 = cpu_to_le32(ctl1);
191 desc->dma64.address_low = cpu_to_le32(addrlo);
192 desc->dma64.address_high = cpu_to_le32(addrhi);
193}
194
195static void op64_poke_tx(struct b43_dmaring *ring, int slot)
196{
197 b43_dma_write(ring, B43_DMA64_TXINDEX,
198 (u32) (slot * sizeof(struct b43_dmadesc64)));
199}
200
201static void op64_tx_suspend(struct b43_dmaring *ring)
202{
203 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
204 | B43_DMA64_TXSUSPEND);
205}
206
207static void op64_tx_resume(struct b43_dmaring *ring)
208{
209 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
210 & ~B43_DMA64_TXSUSPEND);
211}
212
213static int op64_get_current_rxslot(struct b43_dmaring *ring)
214{
215 u32 val;
216
217 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
218 val &= B43_DMA64_RXSTATDPTR;
219
220 return (val / sizeof(struct b43_dmadesc64));
221}
222
223static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
224{
225 b43_dma_write(ring, B43_DMA64_RXINDEX,
226 (u32) (slot * sizeof(struct b43_dmadesc64)));
227}
228
229static const struct b43_dma_ops dma64_ops = {
230 .idx2desc = op64_idx2desc,
231 .fill_descriptor = op64_fill_descriptor,
232 .poke_tx = op64_poke_tx,
233 .tx_suspend = op64_tx_suspend,
234 .tx_resume = op64_tx_resume,
235 .get_current_rxslot = op64_get_current_rxslot,
236 .set_current_rxslot = op64_set_current_rxslot,
237};
238
239static inline int free_slots(struct b43_dmaring *ring)
240{
241 return (ring->nr_slots - ring->used_slots);
242}
243
244static inline int next_slot(struct b43_dmaring *ring, int slot)
245{
246 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
247 if (slot == ring->nr_slots - 1)
248 return 0;
249 return slot + 1;
250}
251
252static inline int prev_slot(struct b43_dmaring *ring, int slot)
253{
254 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
255 if (slot == 0)
256 return ring->nr_slots - 1;
257 return slot - 1;
258}
259
260#ifdef CONFIG_B43_DEBUG
261static void update_max_used_slots(struct b43_dmaring *ring,
262 int current_used_slots)
263{
264 if (current_used_slots <= ring->max_used_slots)
265 return;
266 ring->max_used_slots = current_used_slots;
267 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
268 b43dbg(ring->dev->wl,
269 "max_used_slots increased to %d on %s ring %d\n",
270 ring->max_used_slots,
271 ring->tx ? "TX" : "RX", ring->index);
272 }
273}
274#else
275static inline
276 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
277{
278}
279#endif /* DEBUG */
280
281/* Request a slot for usage. */
282static inline int request_slot(struct b43_dmaring *ring)
283{
284 int slot;
285
286 B43_WARN_ON(!ring->tx);
287 B43_WARN_ON(ring->stopped);
288 B43_WARN_ON(free_slots(ring) == 0);
289
290 slot = next_slot(ring, ring->current_slot);
291 ring->current_slot = slot;
292 ring->used_slots++;
293
294 update_max_used_slots(ring, ring->used_slots);
295
296 return slot;
297}
298
Michael Bueschb79caa62008-02-05 12:50:41 +0100299static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
Michael Buesche4d6b792007-09-18 15:39:42 -0400300{
301 static const u16 map64[] = {
302 B43_MMIO_DMA64_BASE0,
303 B43_MMIO_DMA64_BASE1,
304 B43_MMIO_DMA64_BASE2,
305 B43_MMIO_DMA64_BASE3,
306 B43_MMIO_DMA64_BASE4,
307 B43_MMIO_DMA64_BASE5,
308 };
309 static const u16 map32[] = {
310 B43_MMIO_DMA32_BASE0,
311 B43_MMIO_DMA32_BASE1,
312 B43_MMIO_DMA32_BASE2,
313 B43_MMIO_DMA32_BASE3,
314 B43_MMIO_DMA32_BASE4,
315 B43_MMIO_DMA32_BASE5,
316 };
317
Michael Bueschb79caa62008-02-05 12:50:41 +0100318 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400319 B43_WARN_ON(!(controller_idx >= 0 &&
320 controller_idx < ARRAY_SIZE(map64)));
321 return map64[controller_idx];
322 }
323 B43_WARN_ON(!(controller_idx >= 0 &&
324 controller_idx < ARRAY_SIZE(map32)));
325 return map32[controller_idx];
326}
327
328static inline
329 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
330 unsigned char *buf, size_t len, int tx)
331{
332 dma_addr_t dmaaddr;
333
334 if (tx) {
Michael Bueschf2257632008-06-20 11:50:29 +0200335 dmaaddr = ssb_dma_map_single(ring->dev->dev,
336 buf, len, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400337 } else {
Michael Bueschf2257632008-06-20 11:50:29 +0200338 dmaaddr = ssb_dma_map_single(ring->dev->dev,
339 buf, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400340 }
341
342 return dmaaddr;
343}
344
345static inline
346 void unmap_descbuffer(struct b43_dmaring *ring,
347 dma_addr_t addr, size_t len, int tx)
348{
349 if (tx) {
Michael Bueschf2257632008-06-20 11:50:29 +0200350 ssb_dma_unmap_single(ring->dev->dev,
351 addr, len, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400352 } else {
Michael Bueschf2257632008-06-20 11:50:29 +0200353 ssb_dma_unmap_single(ring->dev->dev,
354 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400355 }
356}
357
358static inline
359 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
360 dma_addr_t addr, size_t len)
361{
362 B43_WARN_ON(ring->tx);
Michael Bueschf2257632008-06-20 11:50:29 +0200363 ssb_dma_sync_single_for_cpu(ring->dev->dev,
364 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400365}
366
367static inline
368 void sync_descbuffer_for_device(struct b43_dmaring *ring,
369 dma_addr_t addr, size_t len)
370{
371 B43_WARN_ON(ring->tx);
Michael Bueschf2257632008-06-20 11:50:29 +0200372 ssb_dma_sync_single_for_device(ring->dev->dev,
373 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400374}
375
376static inline
377 void free_descriptor_buffer(struct b43_dmaring *ring,
378 struct b43_dmadesc_meta *meta)
379{
380 if (meta->skb) {
381 dev_kfree_skb_any(meta->skb);
382 meta->skb = NULL;
383 }
384}
385
Michael Buesch9bd568a2009-11-18 20:53:05 +0100386/* Check if a DMA region fits the device constraints.
387 * Returns true, if the region is OK for usage with this device. */
388static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
389 dma_addr_t addr, size_t size)
390{
391 switch (ring->type) {
392 case B43_DMA_30BIT:
393 if ((u64)addr + size > (1ULL << 30))
394 return 0;
395 break;
396 case B43_DMA_32BIT:
397 if ((u64)addr + size > (1ULL << 32))
398 return 0;
399 break;
400 case B43_DMA_64BIT:
401 /* Currently we can't have addresses beyond
402 * 64bit in the kernel. */
403 break;
404 }
405 return 1;
406}
407
408#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
409#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
410
411static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
412 dma_addr_t dmaaddr, size_t size)
413{
414 ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
415 free_pages((unsigned long)base, get_order(size));
416}
417
418static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
419 dma_addr_t *dmaaddr, size_t size,
420 gfp_t gfp_flags)
421{
422 void *base;
423
424 base = (void *)__get_free_pages(gfp_flags, get_order(size));
425 if (!base)
426 return NULL;
427 memset(base, 0, size);
428 *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
429 DMA_TO_DEVICE);
430 if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
431 free_pages((unsigned long)base, get_order(size));
432 return NULL;
433 }
434
435 return base;
436}
437
438static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
439 dma_addr_t *dmaaddr, size_t size)
440{
441 void *base;
442
443 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
444 GFP_KERNEL);
445 if (!base) {
446 b43err(ring->dev->wl, "Failed to allocate or map pages "
447 "for DMA ringmemory\n");
448 return NULL;
449 }
450 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
451 /* The memory does not fit our device constraints.
452 * Retry with GFP_DMA set to get lower memory. */
453 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
454 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
455 GFP_KERNEL | GFP_DMA);
456 if (!base) {
457 b43err(ring->dev->wl, "Failed to allocate or map pages "
458 "in the GFP_DMA region for DMA ringmemory\n");
459 return NULL;
460 }
461 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
462 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
463 b43err(ring->dev->wl, "Failed to allocate DMA "
464 "ringmemory that fits device constraints\n");
465 return NULL;
466 }
467 }
468 /* We expect the memory to be 4k aligned, at least. */
469 if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
470 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
471 return NULL;
472 }
473
474 return base;
475}
476
Michael Buesche4d6b792007-09-18 15:39:42 -0400477static int alloc_ringmemory(struct b43_dmaring *ring)
478{
Michael Buesch9bd568a2009-11-18 20:53:05 +0100479 unsigned int required;
480 void *base;
481 dma_addr_t dmaaddr;
Michael Buesche4d6b792007-09-18 15:39:42 -0400482
Michael Buesch9bd568a2009-11-18 20:53:05 +0100483 /* There are several requirements to the descriptor ring memory:
484 * - The memory region needs to fit the address constraints for the
485 * device (same as for frame buffers).
486 * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
487 * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
Larry Finger013978b2007-11-26 10:29:47 -0600488 */
Michael Buesch9bd568a2009-11-18 20:53:05 +0100489
Michael Bueschb79caa62008-02-05 12:50:41 +0100490 if (ring->type == B43_DMA_64BIT)
Michael Buesch9bd568a2009-11-18 20:53:05 +0100491 required = ring->nr_slots * sizeof(struct b43_dmadesc64);
492 else
493 required = ring->nr_slots * sizeof(struct b43_dmadesc32);
494 if (B43_WARN_ON(required > 0x1000))
Michael Buesche4d6b792007-09-18 15:39:42 -0400495 return -ENOMEM;
Michael Buesch9bd568a2009-11-18 20:53:05 +0100496
497 ring->alloc_descsize = 0x1000;
498 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
499 if (!base)
500 return -ENOMEM;
501 ring->alloc_descbase = base;
502 ring->alloc_dmabase = dmaaddr;
503
504 if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
505 /* We're on <=32bit DMA, or we already got 8k aligned memory.
506 * That's all we need, so we're fine. */
507 ring->descbase = base;
508 ring->dmabase = dmaaddr;
509 return 0;
Michael Buesche4d6b792007-09-18 15:39:42 -0400510 }
Michael Buesch9bd568a2009-11-18 20:53:05 +0100511 b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
512
513 /* Ok, we failed at the 8k alignment requirement.
514 * Try to force-align the memory region now. */
515 ring->alloc_descsize = 0x2000;
516 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
517 if (!base)
518 return -ENOMEM;
519 ring->alloc_descbase = base;
520 ring->alloc_dmabase = dmaaddr;
521
522 if (is_8k_aligned(dmaaddr)) {
523 /* We're already 8k aligned. That Ok, too. */
524 ring->descbase = base;
525 ring->dmabase = dmaaddr;
526 return 0;
527 }
528 /* Force-align it to 8k */
529 ring->descbase = (void *)((u8 *)base + 0x1000);
530 ring->dmabase = dmaaddr + 0x1000;
531 B43_WARN_ON(!is_8k_aligned(ring->dmabase));
Michael Buesche4d6b792007-09-18 15:39:42 -0400532
533 return 0;
534}
535
536static void free_ringmemory(struct b43_dmaring *ring)
537{
Michael Buesch9bd568a2009-11-18 20:53:05 +0100538 b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
539 ring->alloc_dmabase, ring->alloc_descsize);
Michael Buesche4d6b792007-09-18 15:39:42 -0400540}
541
542/* Reset the RX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100543static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
544 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400545{
546 int i;
547 u32 value;
548 u16 offset;
549
550 might_sleep();
551
Michael Bueschb79caa62008-02-05 12:50:41 +0100552 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400553 b43_write32(dev, mmio_base + offset, 0);
554 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100555 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
556 B43_DMA32_RXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400557 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100558 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400559 value &= B43_DMA64_RXSTAT;
560 if (value == B43_DMA64_RXSTAT_DISABLED) {
561 i = -1;
562 break;
563 }
564 } else {
565 value &= B43_DMA32_RXSTATE;
566 if (value == B43_DMA32_RXSTAT_DISABLED) {
567 i = -1;
568 break;
569 }
570 }
571 msleep(1);
572 }
573 if (i != -1) {
574 b43err(dev->wl, "DMA RX reset timed out\n");
575 return -ENODEV;
576 }
577
578 return 0;
579}
580
Larry Finger013978b2007-11-26 10:29:47 -0600581/* Reset the TX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100582static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
583 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400584{
585 int i;
586 u32 value;
587 u16 offset;
588
589 might_sleep();
590
591 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100592 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
593 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400594 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100595 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400596 value &= B43_DMA64_TXSTAT;
597 if (value == B43_DMA64_TXSTAT_DISABLED ||
598 value == B43_DMA64_TXSTAT_IDLEWAIT ||
599 value == B43_DMA64_TXSTAT_STOPPED)
600 break;
601 } else {
602 value &= B43_DMA32_TXSTATE;
603 if (value == B43_DMA32_TXSTAT_DISABLED ||
604 value == B43_DMA32_TXSTAT_IDLEWAIT ||
605 value == B43_DMA32_TXSTAT_STOPPED)
606 break;
607 }
608 msleep(1);
609 }
Michael Bueschb79caa62008-02-05 12:50:41 +0100610 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400611 b43_write32(dev, mmio_base + offset, 0);
612 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100613 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
614 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400615 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100616 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400617 value &= B43_DMA64_TXSTAT;
618 if (value == B43_DMA64_TXSTAT_DISABLED) {
619 i = -1;
620 break;
621 }
622 } else {
623 value &= B43_DMA32_TXSTATE;
624 if (value == B43_DMA32_TXSTAT_DISABLED) {
625 i = -1;
626 break;
627 }
628 }
629 msleep(1);
630 }
631 if (i != -1) {
632 b43err(dev->wl, "DMA TX reset timed out\n");
633 return -ENODEV;
634 }
635 /* ensure the reset is completed. */
636 msleep(1);
637
638 return 0;
639}
640
Michael Bueschb79caa62008-02-05 12:50:41 +0100641/* Check if a DMA mapping address is invalid. */
642static bool b43_dma_mapping_error(struct b43_dmaring *ring,
643 dma_addr_t addr,
Michael Bueschffa92562008-03-22 22:04:45 +0100644 size_t buffersize, bool dma_to_device)
Michael Bueschb79caa62008-02-05 12:50:41 +0100645{
Michael Bueschf2257632008-06-20 11:50:29 +0200646 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
Michael Bueschb79caa62008-02-05 12:50:41 +0100647 return 1;
648
Michael Buesch9bd568a2009-11-18 20:53:05 +0100649 if (!b43_dma_address_ok(ring, addr, buffersize)) {
650 /* We can't support this address. Unmap it again. */
651 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
652 return 1;
Michael Bueschb79caa62008-02-05 12:50:41 +0100653 }
654
655 /* The address is OK. */
656 return 0;
657}
658
Michael Bueschec9a1d82009-03-27 22:51:58 +0100659static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
660{
661 unsigned char *f = skb->data + ring->frameoffset;
662
663 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
664}
665
666static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
667{
668 struct b43_rxhdr_fw4 *rxhdr;
669 unsigned char *frame;
670
671 /* This poisons the RX buffer to detect DMA failures. */
672
673 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
674 rxhdr->frame_len = 0;
675
676 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
677 frame = skb->data + ring->frameoffset;
678 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
679}
680
Michael Buesche4d6b792007-09-18 15:39:42 -0400681static int setup_rx_descbuffer(struct b43_dmaring *ring,
682 struct b43_dmadesc_generic *desc,
683 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
684{
Michael Buesche4d6b792007-09-18 15:39:42 -0400685 dma_addr_t dmaaddr;
686 struct sk_buff *skb;
687
688 B43_WARN_ON(ring->tx);
689
690 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
691 if (unlikely(!skb))
692 return -ENOMEM;
Michael Bueschec9a1d82009-03-27 22:51:58 +0100693 b43_poison_rx_buffer(ring, skb);
Michael Buesche4d6b792007-09-18 15:39:42 -0400694 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
Michael Bueschffa92562008-03-22 22:04:45 +0100695 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400696 /* ugh. try to realloc in zone_dma */
697 gfp_flags |= GFP_DMA;
698
699 dev_kfree_skb_any(skb);
700
701 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
702 if (unlikely(!skb))
703 return -ENOMEM;
Michael Bueschec9a1d82009-03-27 22:51:58 +0100704 b43_poison_rx_buffer(ring, skb);
Michael Buesche4d6b792007-09-18 15:39:42 -0400705 dmaaddr = map_descbuffer(ring, skb->data,
706 ring->rx_buffersize, 0);
Michael Bueschbdceeb22009-02-19 23:45:43 +0100707 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
708 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
709 dev_kfree_skb_any(skb);
710 return -EIO;
711 }
Michael Buesche4d6b792007-09-18 15:39:42 -0400712 }
713
714 meta->skb = skb;
715 meta->dmaaddr = dmaaddr;
716 ring->ops->fill_descriptor(ring, desc, dmaaddr,
717 ring->rx_buffersize, 0, 0, 0);
Michael Buesch9bd568a2009-11-18 20:53:05 +0100718 ssb_dma_sync_single_for_device(ring->dev->dev,
719 ring->alloc_dmabase,
720 ring->alloc_descsize, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400721
Michael Buesche4d6b792007-09-18 15:39:42 -0400722 return 0;
723}
724
725/* Allocate the initial descbuffers.
726 * This is used for an RX ring only.
727 */
728static int alloc_initial_descbuffers(struct b43_dmaring *ring)
729{
730 int i, err = -ENOMEM;
731 struct b43_dmadesc_generic *desc;
732 struct b43_dmadesc_meta *meta;
733
734 for (i = 0; i < ring->nr_slots; i++) {
735 desc = ring->ops->idx2desc(ring, i, &meta);
736
737 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
738 if (err) {
739 b43err(ring->dev->wl,
740 "Failed to allocate initial descbuffers\n");
741 goto err_unwind;
742 }
743 }
744 mb();
745 ring->used_slots = ring->nr_slots;
746 err = 0;
747 out:
748 return err;
749
750 err_unwind:
751 for (i--; i >= 0; i--) {
752 desc = ring->ops->idx2desc(ring, i, &meta);
753
754 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
755 dev_kfree_skb(meta->skb);
756 }
757 goto out;
758}
759
760/* Do initial setup of the DMA controller.
761 * Reset the controller, write the ring busaddress
762 * and switch the "enable" bit on.
763 */
764static int dmacontroller_setup(struct b43_dmaring *ring)
765{
766 int err = 0;
767 u32 value;
768 u32 addrext;
769 u32 trans = ssb_dma_translation(ring->dev->dev);
770
771 if (ring->tx) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100772 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400773 u64 ringbase = (u64) (ring->dmabase);
774
775 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
776 >> SSB_DMA_TRANSLATION_SHIFT;
777 value = B43_DMA64_TXENABLE;
778 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
779 & B43_DMA64_TXADDREXT_MASK;
780 b43_dma_write(ring, B43_DMA64_TXCTL, value);
781 b43_dma_write(ring, B43_DMA64_TXRINGLO,
782 (ringbase & 0xFFFFFFFF));
783 b43_dma_write(ring, B43_DMA64_TXRINGHI,
784 ((ringbase >> 32) &
785 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600786 | (trans << 1));
Michael Buesche4d6b792007-09-18 15:39:42 -0400787 } else {
788 u32 ringbase = (u32) (ring->dmabase);
789
790 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
791 >> SSB_DMA_TRANSLATION_SHIFT;
792 value = B43_DMA32_TXENABLE;
793 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
794 & B43_DMA32_TXADDREXT_MASK;
795 b43_dma_write(ring, B43_DMA32_TXCTL, value);
796 b43_dma_write(ring, B43_DMA32_TXRING,
797 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
798 | trans);
799 }
800 } else {
801 err = alloc_initial_descbuffers(ring);
802 if (err)
803 goto out;
Michael Bueschb79caa62008-02-05 12:50:41 +0100804 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400805 u64 ringbase = (u64) (ring->dmabase);
806
807 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
808 >> SSB_DMA_TRANSLATION_SHIFT;
809 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
810 value |= B43_DMA64_RXENABLE;
811 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
812 & B43_DMA64_RXADDREXT_MASK;
813 b43_dma_write(ring, B43_DMA64_RXCTL, value);
814 b43_dma_write(ring, B43_DMA64_RXRINGLO,
815 (ringbase & 0xFFFFFFFF));
816 b43_dma_write(ring, B43_DMA64_RXRINGHI,
817 ((ringbase >> 32) &
818 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600819 | (trans << 1));
820 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
821 sizeof(struct b43_dmadesc64));
Michael Buesche4d6b792007-09-18 15:39:42 -0400822 } else {
823 u32 ringbase = (u32) (ring->dmabase);
824
825 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
826 >> SSB_DMA_TRANSLATION_SHIFT;
827 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
828 value |= B43_DMA32_RXENABLE;
829 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
830 & B43_DMA32_RXADDREXT_MASK;
831 b43_dma_write(ring, B43_DMA32_RXCTL, value);
832 b43_dma_write(ring, B43_DMA32_RXRING,
833 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
834 | trans);
Larry Finger013978b2007-11-26 10:29:47 -0600835 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
836 sizeof(struct b43_dmadesc32));
Michael Buesche4d6b792007-09-18 15:39:42 -0400837 }
838 }
839
Larry Finger013978b2007-11-26 10:29:47 -0600840out:
Michael Buesche4d6b792007-09-18 15:39:42 -0400841 return err;
842}
843
844/* Shutdown the DMA controller. */
845static void dmacontroller_cleanup(struct b43_dmaring *ring)
846{
847 if (ring->tx) {
848 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100849 ring->type);
850 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400851 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
852 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
853 } else
854 b43_dma_write(ring, B43_DMA32_TXRING, 0);
855 } else {
856 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100857 ring->type);
858 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400859 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
860 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
861 } else
862 b43_dma_write(ring, B43_DMA32_RXRING, 0);
863 }
864}
865
866static void free_all_descbuffers(struct b43_dmaring *ring)
867{
868 struct b43_dmadesc_generic *desc;
869 struct b43_dmadesc_meta *meta;
870 int i;
871
872 if (!ring->used_slots)
873 return;
874 for (i = 0; i < ring->nr_slots; i++) {
875 desc = ring->ops->idx2desc(ring, i, &meta);
876
Michael Buesch07681e22009-11-19 22:24:29 +0100877 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400878 B43_WARN_ON(!ring->tx);
879 continue;
880 }
881 if (ring->tx) {
882 unmap_descbuffer(ring, meta->dmaaddr,
883 meta->skb->len, 1);
884 } else {
885 unmap_descbuffer(ring, meta->dmaaddr,
886 ring->rx_buffersize, 0);
887 }
888 free_descriptor_buffer(ring, meta);
889 }
890}
891
892static u64 supported_dma_mask(struct b43_wldev *dev)
893{
894 u32 tmp;
895 u16 mmio_base;
896
897 tmp = b43_read32(dev, SSB_TMSHIGH);
898 if (tmp & SSB_TMSHIGH_DMA64)
Yang Hongyang6a355282009-04-06 19:01:13 -0700899 return DMA_BIT_MASK(64);
Michael Buesche4d6b792007-09-18 15:39:42 -0400900 mmio_base = b43_dmacontroller_base(0, 0);
901 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
902 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
903 if (tmp & B43_DMA32_TXADDREXT_MASK)
Yang Hongyang284901a2009-04-06 19:01:15 -0700904 return DMA_BIT_MASK(32);
Michael Buesche4d6b792007-09-18 15:39:42 -0400905
Yang Hongyang28b76792009-04-06 19:01:17 -0700906 return DMA_BIT_MASK(30);
Michael Buesche4d6b792007-09-18 15:39:42 -0400907}
908
Michael Buesch5100d5a2008-03-29 21:01:16 +0100909static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
910{
Yang Hongyang28b76792009-04-06 19:01:17 -0700911 if (dmamask == DMA_BIT_MASK(30))
Michael Buesch5100d5a2008-03-29 21:01:16 +0100912 return B43_DMA_30BIT;
Yang Hongyang284901a2009-04-06 19:01:15 -0700913 if (dmamask == DMA_BIT_MASK(32))
Michael Buesch5100d5a2008-03-29 21:01:16 +0100914 return B43_DMA_32BIT;
Yang Hongyang6a355282009-04-06 19:01:13 -0700915 if (dmamask == DMA_BIT_MASK(64))
Michael Buesch5100d5a2008-03-29 21:01:16 +0100916 return B43_DMA_64BIT;
917 B43_WARN_ON(1);
918 return B43_DMA_30BIT;
919}
920
Michael Buesche4d6b792007-09-18 15:39:42 -0400921/* Main initialization function. */
922static
923struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
924 int controller_index,
Michael Bueschb79caa62008-02-05 12:50:41 +0100925 int for_tx,
926 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400927{
928 struct b43_dmaring *ring;
Michael Buesch07681e22009-11-19 22:24:29 +0100929 int i, err;
Michael Buesche4d6b792007-09-18 15:39:42 -0400930 dma_addr_t dma_test;
931
932 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
933 if (!ring)
934 goto out;
935
Michael Buesch028118a2008-06-12 11:58:56 +0200936 ring->nr_slots = B43_RXRING_SLOTS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400937 if (for_tx)
Michael Buesch028118a2008-06-12 11:58:56 +0200938 ring->nr_slots = B43_TXRING_SLOTS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400939
Michael Buesch028118a2008-06-12 11:58:56 +0200940 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
Michael Buesche4d6b792007-09-18 15:39:42 -0400941 GFP_KERNEL);
942 if (!ring->meta)
943 goto err_kfree_ring;
Michael Buesch07681e22009-11-19 22:24:29 +0100944 for (i = 0; i < ring->nr_slots; i++)
945 ring->meta->skb = B43_DMA_PTR_POISON;
Michael Buesche4d6b792007-09-18 15:39:42 -0400946
Michael Buesch028118a2008-06-12 11:58:56 +0200947 ring->type = type;
Michael Buesche4d6b792007-09-18 15:39:42 -0400948 ring->dev = dev;
Michael Bueschb79caa62008-02-05 12:50:41 +0100949 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
Michael Buesche4d6b792007-09-18 15:39:42 -0400950 ring->index = controller_index;
Michael Bueschb79caa62008-02-05 12:50:41 +0100951 if (type == B43_DMA_64BIT)
Michael Buesche4d6b792007-09-18 15:39:42 -0400952 ring->ops = &dma64_ops;
953 else
954 ring->ops = &dma32_ops;
955 if (for_tx) {
956 ring->tx = 1;
957 ring->current_slot = -1;
958 } else {
959 if (ring->index == 0) {
960 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
961 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
Michael Buesche4d6b792007-09-18 15:39:42 -0400962 } else
963 B43_WARN_ON(1);
964 }
Michael Buesche4d6b792007-09-18 15:39:42 -0400965#ifdef CONFIG_B43_DEBUG
966 ring->last_injected_overflow = jiffies;
967#endif
968
Michael Buesch028118a2008-06-12 11:58:56 +0200969 if (for_tx) {
Michael Buesch2d071ca2009-02-20 12:24:52 +0100970 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
971 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
972
Michael Bueschbdceeb22009-02-19 23:45:43 +0100973 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
Michael Buesch028118a2008-06-12 11:58:56 +0200974 b43_txhdr_size(dev),
975 GFP_KERNEL);
976 if (!ring->txhdr_cache)
977 goto err_kfree_meta;
978
979 /* test for ability to dma to txhdr_cache */
Michael Bueschf2257632008-06-20 11:50:29 +0200980 dma_test = ssb_dma_map_single(dev->dev,
981 ring->txhdr_cache,
982 b43_txhdr_size(dev),
983 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200984
985 if (b43_dma_mapping_error(ring, dma_test,
986 b43_txhdr_size(dev), 1)) {
987 /* ugh realloc */
988 kfree(ring->txhdr_cache);
Michael Bueschbdceeb22009-02-19 23:45:43 +0100989 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
Michael Buesch028118a2008-06-12 11:58:56 +0200990 b43_txhdr_size(dev),
991 GFP_KERNEL | GFP_DMA);
992 if (!ring->txhdr_cache)
993 goto err_kfree_meta;
994
Michael Bueschf2257632008-06-20 11:50:29 +0200995 dma_test = ssb_dma_map_single(dev->dev,
996 ring->txhdr_cache,
997 b43_txhdr_size(dev),
998 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200999
1000 if (b43_dma_mapping_error(ring, dma_test,
1001 b43_txhdr_size(dev), 1)) {
1002
1003 b43err(dev->wl,
1004 "TXHDR DMA allocation failed\n");
1005 goto err_kfree_txhdr_cache;
1006 }
1007 }
1008
Michael Bueschf2257632008-06-20 11:50:29 +02001009 ssb_dma_unmap_single(dev->dev,
1010 dma_test, b43_txhdr_size(dev),
1011 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +02001012 }
1013
Michael Buesche4d6b792007-09-18 15:39:42 -04001014 err = alloc_ringmemory(ring);
1015 if (err)
1016 goto err_kfree_txhdr_cache;
1017 err = dmacontroller_setup(ring);
1018 if (err)
1019 goto err_free_ringmemory;
1020
1021 out:
1022 return ring;
1023
1024 err_free_ringmemory:
1025 free_ringmemory(ring);
1026 err_kfree_txhdr_cache:
1027 kfree(ring->txhdr_cache);
1028 err_kfree_meta:
1029 kfree(ring->meta);
1030 err_kfree_ring:
1031 kfree(ring);
1032 ring = NULL;
1033 goto out;
1034}
1035
Michael Buesch57df40d2008-03-07 15:50:02 +01001036#define divide(a, b) ({ \
1037 typeof(a) __a = a; \
1038 do_div(__a, b); \
1039 __a; \
1040 })
1041
1042#define modulo(a, b) ({ \
1043 typeof(a) __a = a; \
1044 do_div(__a, b); \
1045 })
1046
Michael Buesche4d6b792007-09-18 15:39:42 -04001047/* Main cleanup function. */
Michael Bueschb27faf82008-03-06 16:32:46 +01001048static void b43_destroy_dmaring(struct b43_dmaring *ring,
1049 const char *ringname)
Michael Buesche4d6b792007-09-18 15:39:42 -04001050{
1051 if (!ring)
1052 return;
1053
Michael Buesch57df40d2008-03-07 15:50:02 +01001054#ifdef CONFIG_B43_DEBUG
1055 {
1056 /* Print some statistics. */
1057 u64 failed_packets = ring->nr_failed_tx_packets;
1058 u64 succeed_packets = ring->nr_succeed_tx_packets;
1059 u64 nr_packets = failed_packets + succeed_packets;
1060 u64 permille_failed = 0, average_tries = 0;
1061
1062 if (nr_packets)
1063 permille_failed = divide(failed_packets * 1000, nr_packets);
1064 if (nr_packets)
1065 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
1066
1067 b43dbg(ring->dev->wl, "DMA-%u %s: "
1068 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
1069 "Average tries %llu.%02llu\n",
1070 (unsigned int)(ring->type), ringname,
1071 ring->max_used_slots,
1072 ring->nr_slots,
1073 (unsigned long long)failed_packets,
Michael Buesch87d96112008-03-07 19:52:24 +01001074 (unsigned long long)nr_packets,
Michael Buesch57df40d2008-03-07 15:50:02 +01001075 (unsigned long long)divide(permille_failed, 10),
1076 (unsigned long long)modulo(permille_failed, 10),
1077 (unsigned long long)divide(average_tries, 100),
1078 (unsigned long long)modulo(average_tries, 100));
1079 }
1080#endif /* DEBUG */
1081
Michael Buesche4d6b792007-09-18 15:39:42 -04001082 /* Device IRQs are disabled prior entering this function,
1083 * so no need to take care of concurrency with rx handler stuff.
1084 */
1085 dmacontroller_cleanup(ring);
1086 free_all_descbuffers(ring);
1087 free_ringmemory(ring);
1088
1089 kfree(ring->txhdr_cache);
1090 kfree(ring->meta);
1091 kfree(ring);
1092}
1093
Michael Bueschb27faf82008-03-06 16:32:46 +01001094#define destroy_ring(dma, ring) do { \
1095 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1096 (dma)->ring = NULL; \
1097 } while (0)
1098
Michael Buesche4d6b792007-09-18 15:39:42 -04001099void b43_dma_free(struct b43_wldev *dev)
1100{
Michael Buesch5100d5a2008-03-29 21:01:16 +01001101 struct b43_dma *dma;
1102
1103 if (b43_using_pio_transfers(dev))
1104 return;
1105 dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -04001106
Michael Bueschb27faf82008-03-06 16:32:46 +01001107 destroy_ring(dma, rx_ring);
1108 destroy_ring(dma, tx_ring_AC_BK);
1109 destroy_ring(dma, tx_ring_AC_BE);
1110 destroy_ring(dma, tx_ring_AC_VI);
1111 destroy_ring(dma, tx_ring_AC_VO);
1112 destroy_ring(dma, tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -04001113}
1114
Michael Buesch1033b3e2008-04-23 19:13:01 +02001115static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1116{
1117 u64 orig_mask = mask;
1118 bool fallback = 0;
1119 int err;
1120
1121 /* Try to set the DMA mask. If it fails, try falling back to a
1122 * lower mask, as we can always also support a lower one. */
1123 while (1) {
1124 err = ssb_dma_set_mask(dev->dev, mask);
1125 if (!err)
1126 break;
Yang Hongyang6a355282009-04-06 19:01:13 -07001127 if (mask == DMA_BIT_MASK(64)) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001128 mask = DMA_BIT_MASK(32);
Michael Buesch1033b3e2008-04-23 19:13:01 +02001129 fallback = 1;
1130 continue;
1131 }
Yang Hongyang284901a2009-04-06 19:01:15 -07001132 if (mask == DMA_BIT_MASK(32)) {
Yang Hongyang28b76792009-04-06 19:01:17 -07001133 mask = DMA_BIT_MASK(30);
Michael Buesch1033b3e2008-04-23 19:13:01 +02001134 fallback = 1;
1135 continue;
1136 }
1137 b43err(dev->wl, "The machine/kernel does not support "
1138 "the required %u-bit DMA mask\n",
1139 (unsigned int)dma_mask_to_engine_type(orig_mask));
1140 return -EOPNOTSUPP;
1141 }
1142 if (fallback) {
1143 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1144 (unsigned int)dma_mask_to_engine_type(orig_mask),
1145 (unsigned int)dma_mask_to_engine_type(mask));
1146 }
1147
1148 return 0;
1149}
1150
Michael Buesche4d6b792007-09-18 15:39:42 -04001151int b43_dma_init(struct b43_wldev *dev)
1152{
1153 struct b43_dma *dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -04001154 int err;
1155 u64 dmamask;
Michael Bueschb79caa62008-02-05 12:50:41 +01001156 enum b43_dmatype type;
Michael Buesche4d6b792007-09-18 15:39:42 -04001157
1158 dmamask = supported_dma_mask(dev);
Michael Buesch5100d5a2008-03-29 21:01:16 +01001159 type = dma_mask_to_engine_type(dmamask);
Michael Buesch1033b3e2008-04-23 19:13:01 +02001160 err = b43_dma_set_mask(dev, dmamask);
1161 if (err)
1162 return err;
Michael Buesche4d6b792007-09-18 15:39:42 -04001163
1164 err = -ENOMEM;
1165 /* setup TX DMA channels. */
Michael Bueschb27faf82008-03-06 16:32:46 +01001166 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1167 if (!dma->tx_ring_AC_BK)
Michael Buesche4d6b792007-09-18 15:39:42 -04001168 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -04001169
Michael Bueschb27faf82008-03-06 16:32:46 +01001170 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1171 if (!dma->tx_ring_AC_BE)
1172 goto err_destroy_bk;
Michael Buesche4d6b792007-09-18 15:39:42 -04001173
Michael Bueschb27faf82008-03-06 16:32:46 +01001174 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1175 if (!dma->tx_ring_AC_VI)
1176 goto err_destroy_be;
Michael Buesche4d6b792007-09-18 15:39:42 -04001177
Michael Bueschb27faf82008-03-06 16:32:46 +01001178 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1179 if (!dma->tx_ring_AC_VO)
1180 goto err_destroy_vi;
Michael Buesche4d6b792007-09-18 15:39:42 -04001181
Michael Bueschb27faf82008-03-06 16:32:46 +01001182 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1183 if (!dma->tx_ring_mcast)
1184 goto err_destroy_vo;
Michael Buesche4d6b792007-09-18 15:39:42 -04001185
Michael Bueschb27faf82008-03-06 16:32:46 +01001186 /* setup RX DMA channel. */
1187 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1188 if (!dma->rx_ring)
1189 goto err_destroy_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -04001190
Michael Bueschb27faf82008-03-06 16:32:46 +01001191 /* No support for the TX status DMA ring. */
1192 B43_WARN_ON(dev->dev->id.revision < 5);
Michael Buesche4d6b792007-09-18 15:39:42 -04001193
Michael Bueschb79caa62008-02-05 12:50:41 +01001194 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1195 (unsigned int)type);
Michael Buesche4d6b792007-09-18 15:39:42 -04001196 err = 0;
Michael Bueschb27faf82008-03-06 16:32:46 +01001197out:
Michael Buesche4d6b792007-09-18 15:39:42 -04001198 return err;
1199
Michael Bueschb27faf82008-03-06 16:32:46 +01001200err_destroy_mcast:
1201 destroy_ring(dma, tx_ring_mcast);
1202err_destroy_vo:
1203 destroy_ring(dma, tx_ring_AC_VO);
1204err_destroy_vi:
1205 destroy_ring(dma, tx_ring_AC_VI);
1206err_destroy_be:
1207 destroy_ring(dma, tx_ring_AC_BE);
1208err_destroy_bk:
1209 destroy_ring(dma, tx_ring_AC_BK);
1210 return err;
Michael Buesche4d6b792007-09-18 15:39:42 -04001211}
1212
1213/* Generate a cookie for the TX header. */
1214static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1215{
Michael Bueschb27faf82008-03-06 16:32:46 +01001216 u16 cookie;
Michael Buesche4d6b792007-09-18 15:39:42 -04001217
1218 /* Use the upper 4 bits of the cookie as
1219 * DMA controller ID and store the slot number
1220 * in the lower 12 bits.
1221 * Note that the cookie must never be 0, as this
1222 * is a special value used in RX path.
Michael Buesch280d0e12007-12-26 18:26:17 +01001223 * It can also not be 0xFFFF because that is special
1224 * for multicast frames.
Michael Buesche4d6b792007-09-18 15:39:42 -04001225 */
Michael Bueschb27faf82008-03-06 16:32:46 +01001226 cookie = (((u16)ring->index + 1) << 12);
Michael Buesche4d6b792007-09-18 15:39:42 -04001227 B43_WARN_ON(slot & ~0x0FFF);
Michael Bueschb27faf82008-03-06 16:32:46 +01001228 cookie |= (u16)slot;
Michael Buesche4d6b792007-09-18 15:39:42 -04001229
1230 return cookie;
1231}
1232
1233/* Inspect a cookie and find out to which controller/slot it belongs. */
1234static
1235struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1236{
1237 struct b43_dma *dma = &dev->dma;
1238 struct b43_dmaring *ring = NULL;
1239
1240 switch (cookie & 0xF000) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001241 case 0x1000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001242 ring = dma->tx_ring_AC_BK;
Michael Buesche4d6b792007-09-18 15:39:42 -04001243 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001244 case 0x2000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001245 ring = dma->tx_ring_AC_BE;
Michael Buesche4d6b792007-09-18 15:39:42 -04001246 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001247 case 0x3000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001248 ring = dma->tx_ring_AC_VI;
Michael Buesche4d6b792007-09-18 15:39:42 -04001249 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001250 case 0x4000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001251 ring = dma->tx_ring_AC_VO;
Michael Buesche4d6b792007-09-18 15:39:42 -04001252 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001253 case 0x5000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001254 ring = dma->tx_ring_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -04001255 break;
Michael Buesche4d6b792007-09-18 15:39:42 -04001256 }
1257 *slot = (cookie & 0x0FFF);
Michael Buesch07681e22009-11-19 22:24:29 +01001258 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1259 b43dbg(dev->wl, "TX-status contains "
1260 "invalid cookie: 0x%04X\n", cookie);
1261 return NULL;
1262 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001263
1264 return ring;
1265}
1266
1267static int dma_tx_fragment(struct b43_dmaring *ring,
Michael Bueschf54a5202009-11-06 18:32:44 +01001268 struct sk_buff *skb)
Michael Buesche4d6b792007-09-18 15:39:42 -04001269{
1270 const struct b43_dma_ops *ops = ring->ops;
Johannes Berge039fa42008-05-15 12:55:29 +02001271 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Bueschf54a5202009-11-06 18:32:44 +01001272 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
Michael Buesche4d6b792007-09-18 15:39:42 -04001273 u8 *header;
Michael Buesch09552cc2008-01-23 21:44:15 +01001274 int slot, old_top_slot, old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001275 int err;
1276 struct b43_dmadesc_generic *desc;
1277 struct b43_dmadesc_meta *meta;
1278 struct b43_dmadesc_meta *meta_hdr;
Michael Buesch280d0e12007-12-26 18:26:17 +01001279 u16 cookie;
Michael Buescheb189d8b2008-01-28 14:47:41 -08001280 size_t hdrsize = b43_txhdr_size(ring->dev);
Michael Buesche4d6b792007-09-18 15:39:42 -04001281
Michael Bueschbdceeb22009-02-19 23:45:43 +01001282 /* Important note: If the number of used DMA slots per TX frame
1283 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1284 * the file has to be updated, too!
1285 */
Michael Buesche4d6b792007-09-18 15:39:42 -04001286
Michael Buesch09552cc2008-01-23 21:44:15 +01001287 old_top_slot = ring->current_slot;
1288 old_used_slots = ring->used_slots;
1289
Michael Buesche4d6b792007-09-18 15:39:42 -04001290 /* Get a slot for the header. */
1291 slot = request_slot(ring);
1292 desc = ops->idx2desc(ring, slot, &meta_hdr);
1293 memset(meta_hdr, 0, sizeof(*meta_hdr));
1294
Michael Bueschbdceeb22009-02-19 23:45:43 +01001295 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
Michael Buesch280d0e12007-12-26 18:26:17 +01001296 cookie = generate_cookie(ring, slot);
Michael Buesch09552cc2008-01-23 21:44:15 +01001297 err = b43_generate_txhdr(ring->dev, header,
gregor kowski035d0242009-08-19 22:35:45 +02001298 skb, info, cookie);
Michael Buesch09552cc2008-01-23 21:44:15 +01001299 if (unlikely(err)) {
1300 ring->current_slot = old_top_slot;
1301 ring->used_slots = old_used_slots;
1302 return err;
1303 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001304
1305 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001306 hdrsize, 1);
Michael Bueschffa92562008-03-22 22:04:45 +01001307 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001308 ring->current_slot = old_top_slot;
1309 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001310 return -EIO;
Michael Buesch09552cc2008-01-23 21:44:15 +01001311 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001312 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001313 hdrsize, 1, 0, 0);
Michael Buesche4d6b792007-09-18 15:39:42 -04001314
1315 /* Get a slot for the payload. */
1316 slot = request_slot(ring);
1317 desc = ops->idx2desc(ring, slot, &meta);
1318 memset(meta, 0, sizeof(*meta));
1319
Michael Buesche4d6b792007-09-18 15:39:42 -04001320 meta->skb = skb;
1321 meta->is_last_fragment = 1;
Michael Bueschf54a5202009-11-06 18:32:44 +01001322 priv_info->bouncebuffer = NULL;
Michael Buesche4d6b792007-09-18 15:39:42 -04001323
1324 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1325 /* create a bounce buffer in zone_dma on mapping failure. */
Michael Bueschffa92562008-03-22 22:04:45 +01001326 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
Michael Bueschf54a5202009-11-06 18:32:44 +01001327 priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
1328 if (!priv_info->bouncebuffer) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001329 ring->current_slot = old_top_slot;
1330 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001331 err = -ENOMEM;
1332 goto out_unmap_hdr;
1333 }
Michael Bueschf54a5202009-11-06 18:32:44 +01001334 memcpy(priv_info->bouncebuffer, skb->data, skb->len);
Michael Buesche4d6b792007-09-18 15:39:42 -04001335
Michael Bueschf54a5202009-11-06 18:32:44 +01001336 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
Michael Bueschffa92562008-03-22 22:04:45 +01001337 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
Michael Bueschf54a5202009-11-06 18:32:44 +01001338 kfree(priv_info->bouncebuffer);
1339 priv_info->bouncebuffer = NULL;
Michael Buesch09552cc2008-01-23 21:44:15 +01001340 ring->current_slot = old_top_slot;
1341 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001342 err = -EIO;
Michael Bueschf54a5202009-11-06 18:32:44 +01001343 goto out_unmap_hdr;
Michael Buesche4d6b792007-09-18 15:39:42 -04001344 }
1345 }
1346
1347 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1348
Johannes Berge039fa42008-05-15 12:55:29 +02001349 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001350 /* Tell the firmware about the cookie of the last
1351 * mcast frame, so it can clear the more-data bit in it. */
1352 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1353 B43_SHM_SH_MCASTCOOKIE, cookie);
1354 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001355 /* Now transfer the whole frame. */
1356 wmb();
Michael Buesch9bd568a2009-11-18 20:53:05 +01001357 ssb_dma_sync_single_for_device(ring->dev->dev,
1358 ring->alloc_dmabase,
1359 ring->alloc_descsize, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -04001360 ops->poke_tx(ring, next_slot(ring, slot));
1361 return 0;
1362
Michael Buesch280d0e12007-12-26 18:26:17 +01001363out_unmap_hdr:
Michael Buesche4d6b792007-09-18 15:39:42 -04001364 unmap_descbuffer(ring, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001365 hdrsize, 1);
Michael Buesche4d6b792007-09-18 15:39:42 -04001366 return err;
1367}
1368
1369static inline int should_inject_overflow(struct b43_dmaring *ring)
1370{
1371#ifdef CONFIG_B43_DEBUG
1372 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1373 /* Check if we should inject another ringbuffer overflow
1374 * to test handling of this situation in the stack. */
1375 unsigned long next_overflow;
1376
1377 next_overflow = ring->last_injected_overflow + HZ;
1378 if (time_after(jiffies, next_overflow)) {
1379 ring->last_injected_overflow = jiffies;
1380 b43dbg(ring->dev->wl,
1381 "Injecting TX ring overflow on "
1382 "DMA controller %d\n", ring->index);
1383 return 1;
1384 }
1385 }
1386#endif /* CONFIG_B43_DEBUG */
1387 return 0;
1388}
1389
Michael Buesche6f5b932008-03-05 21:18:49 +01001390/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
John Daiker99da1852009-02-24 02:16:42 -08001391static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1392 u8 queue_prio)
Michael Buesche6f5b932008-03-05 21:18:49 +01001393{
1394 struct b43_dmaring *ring;
1395
Michael Buesch403a3a12009-06-08 21:04:57 +02001396 if (dev->qos_enabled) {
Michael Buesche6f5b932008-03-05 21:18:49 +01001397 /* 0 = highest priority */
1398 switch (queue_prio) {
1399 default:
1400 B43_WARN_ON(1);
1401 /* fallthrough */
1402 case 0:
Michael Bueschb27faf82008-03-06 16:32:46 +01001403 ring = dev->dma.tx_ring_AC_VO;
Michael Buesche6f5b932008-03-05 21:18:49 +01001404 break;
1405 case 1:
Michael Bueschb27faf82008-03-06 16:32:46 +01001406 ring = dev->dma.tx_ring_AC_VI;
Michael Buesche6f5b932008-03-05 21:18:49 +01001407 break;
1408 case 2:
Michael Bueschb27faf82008-03-06 16:32:46 +01001409 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001410 break;
1411 case 3:
Michael Bueschb27faf82008-03-06 16:32:46 +01001412 ring = dev->dma.tx_ring_AC_BK;
Michael Buesche6f5b932008-03-05 21:18:49 +01001413 break;
1414 }
1415 } else
Michael Bueschb27faf82008-03-06 16:32:46 +01001416 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001417
1418 return ring;
1419}
1420
Johannes Berge039fa42008-05-15 12:55:29 +02001421int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
Michael Buesche4d6b792007-09-18 15:39:42 -04001422{
1423 struct b43_dmaring *ring;
Michael Buesch280d0e12007-12-26 18:26:17 +01001424 struct ieee80211_hdr *hdr;
Michael Buesche4d6b792007-09-18 15:39:42 -04001425 int err = 0;
Johannes Berge039fa42008-05-15 12:55:29 +02001426 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesche4d6b792007-09-18 15:39:42 -04001427
Michael Buesch280d0e12007-12-26 18:26:17 +01001428 hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +02001429 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001430 /* The multicast ring will be sent after the DTIM */
Michael Bueschb27faf82008-03-06 16:32:46 +01001431 ring = dev->dma.tx_ring_mcast;
Michael Buesch280d0e12007-12-26 18:26:17 +01001432 /* Set the more-data bit. Ucode will clear it on
1433 * the last frame for us. */
1434 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1435 } else {
1436 /* Decide by priority where to put this frame. */
Johannes Berge2530082008-05-17 00:57:14 +02001437 ring = select_ring_by_priority(
1438 dev, skb_get_queue_mapping(skb));
Michael Buesch280d0e12007-12-26 18:26:17 +01001439 }
1440
Michael Buesche4d6b792007-09-18 15:39:42 -04001441 B43_WARN_ON(!ring->tx);
Michael Bueschca2d5592009-02-19 20:17:36 +01001442
Larry Finger18c69512009-07-29 10:54:06 -05001443 if (unlikely(ring->stopped)) {
1444 /* We get here only because of a bug in mac80211.
1445 * Because of a race, one packet may be queued after
1446 * the queue is stopped, thus we got called when we shouldn't.
1447 * For now, just refuse the transmit. */
1448 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1449 b43err(dev->wl, "Packet after queue stopped\n");
1450 err = -ENOSPC;
Michael Buesch637dae32009-09-04 22:55:00 +02001451 goto out;
Larry Finger18c69512009-07-29 10:54:06 -05001452 }
1453
1454 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1455 /* If we get here, we have a real error with the queue
1456 * full, but queues not stopped. */
1457 b43err(dev->wl, "DMA queue overflow\n");
Michael Buesche4d6b792007-09-18 15:39:42 -04001458 err = -ENOSPC;
Michael Buesch637dae32009-09-04 22:55:00 +02001459 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -04001460 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001461
Michael Buesche6f5b932008-03-05 21:18:49 +01001462 /* Assign the queue number to the ring (if not already done before)
1463 * so TX status handling can use it. The queue to ring mapping is
1464 * static, so we don't need to store it per frame. */
Johannes Berge2530082008-05-17 00:57:14 +02001465 ring->queue_prio = skb_get_queue_mapping(skb);
Michael Buesche6f5b932008-03-05 21:18:49 +01001466
Michael Bueschf54a5202009-11-06 18:32:44 +01001467 err = dma_tx_fragment(ring, skb);
Michael Buesch09552cc2008-01-23 21:44:15 +01001468 if (unlikely(err == -ENOKEY)) {
1469 /* Drop this packet, as we don't have the encryption key
1470 * anymore and must not transmit it unencrypted. */
1471 dev_kfree_skb_any(skb);
1472 err = 0;
Michael Buesch637dae32009-09-04 22:55:00 +02001473 goto out;
Michael Buesch09552cc2008-01-23 21:44:15 +01001474 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001475 if (unlikely(err)) {
1476 b43err(dev->wl, "DMA tx mapping failure\n");
Michael Buesch637dae32009-09-04 22:55:00 +02001477 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -04001478 }
1479 ring->nr_tx_packets++;
Michael Bueschbdceeb22009-02-19 23:45:43 +01001480 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
Michael Buesche4d6b792007-09-18 15:39:42 -04001481 should_inject_overflow(ring)) {
1482 /* This TX ring is full. */
Johannes Berge2530082008-05-17 00:57:14 +02001483 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Michael Buesche4d6b792007-09-18 15:39:42 -04001484 ring->stopped = 1;
1485 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1486 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1487 }
1488 }
Michael Buesch637dae32009-09-04 22:55:00 +02001489out:
Michael Buesche4d6b792007-09-18 15:39:42 -04001490
1491 return err;
1492}
1493
1494void b43_dma_handle_txstatus(struct b43_wldev *dev,
1495 const struct b43_txstatus *status)
1496{
1497 const struct b43_dma_ops *ops;
1498 struct b43_dmaring *ring;
1499 struct b43_dmadesc_generic *desc;
1500 struct b43_dmadesc_meta *meta;
Michael Buesch07681e22009-11-19 22:24:29 +01001501 int slot, firstused;
Michael Buesch5100d5a2008-03-29 21:01:16 +01001502 bool frame_succeed;
Michael Buesche4d6b792007-09-18 15:39:42 -04001503
1504 ring = parse_cookie(dev, status->cookie, &slot);
1505 if (unlikely(!ring))
1506 return;
Michael Buesche4d6b792007-09-18 15:39:42 -04001507 B43_WARN_ON(!ring->tx);
Michael Buesch07681e22009-11-19 22:24:29 +01001508
1509 /* Sanity check: TX packets are processed in-order on one ring.
1510 * Check if the slot deduced from the cookie really is the first
1511 * used slot. */
1512 firstused = ring->current_slot - ring->used_slots + 1;
1513 if (firstused < 0)
1514 firstused = ring->nr_slots + firstused;
1515 if (unlikely(slot != firstused)) {
1516 /* This possibly is a firmware bug and will result in
1517 * malfunction, memory leaks and/or stall of DMA functionality. */
1518 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1519 "Expected %d, but got %d\n",
1520 ring->index, firstused, slot);
1521 return;
1522 }
1523
Michael Buesche4d6b792007-09-18 15:39:42 -04001524 ops = ring->ops;
1525 while (1) {
Michael Buesch07681e22009-11-19 22:24:29 +01001526 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
Michael Buesche4d6b792007-09-18 15:39:42 -04001527 desc = ops->idx2desc(ring, slot, &meta);
1528
Michael Buesch07681e22009-11-19 22:24:29 +01001529 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1530 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1531 "on ring %d\n",
1532 slot, firstused, ring->index);
1533 break;
1534 }
Michael Bueschf54a5202009-11-06 18:32:44 +01001535 if (meta->skb) {
1536 struct b43_private_tx_info *priv_info =
1537 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1538
1539 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1540 kfree(priv_info->bouncebuffer);
1541 priv_info->bouncebuffer = NULL;
1542 } else {
Michael Buesche4d6b792007-09-18 15:39:42 -04001543 unmap_descbuffer(ring, meta->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001544 b43_txhdr_size(dev), 1);
Michael Bueschf54a5202009-11-06 18:32:44 +01001545 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001546
1547 if (meta->is_last_fragment) {
Johannes Berge039fa42008-05-15 12:55:29 +02001548 struct ieee80211_tx_info *info;
1549
Michael Buesch07681e22009-11-19 22:24:29 +01001550 if (unlikely(!meta->skb)) {
1551 /* This is a scatter-gather fragment of a frame, so
1552 * the skb pointer must not be NULL. */
1553 b43dbg(dev->wl, "TX status unexpected NULL skb "
1554 "at slot %d (first=%d) on ring %d\n",
1555 slot, firstused, ring->index);
1556 break;
1557 }
Johannes Berge039fa42008-05-15 12:55:29 +02001558
1559 info = IEEE80211_SKB_CB(meta->skb);
1560
Johannes Berge039fa42008-05-15 12:55:29 +02001561 /*
1562 * Call back to inform the ieee80211 subsystem about
1563 * the status of the transmission.
Michael Buesche4d6b792007-09-18 15:39:42 -04001564 */
Johannes Berge6a98542008-10-21 12:40:02 +02001565 frame_succeed = b43_fill_txstatus_report(dev, info, status);
Michael Buesch5100d5a2008-03-29 21:01:16 +01001566#ifdef CONFIG_B43_DEBUG
1567 if (frame_succeed)
1568 ring->nr_succeed_tx_packets++;
1569 else
1570 ring->nr_failed_tx_packets++;
1571 ring->nr_total_packet_tries += status->frame_count;
1572#endif /* DEBUG */
Michael Bueschce6c4a12009-09-10 20:22:02 +02001573 ieee80211_tx_status(dev->wl->hw, meta->skb);
Johannes Berge039fa42008-05-15 12:55:29 +02001574
Michael Buesch07681e22009-11-19 22:24:29 +01001575 /* skb will be freed by ieee80211_tx_status().
1576 * Poison our pointer. */
1577 meta->skb = B43_DMA_PTR_POISON;
Michael Buesche4d6b792007-09-18 15:39:42 -04001578 } else {
1579 /* No need to call free_descriptor_buffer here, as
1580 * this is only the txhdr, which is not allocated.
1581 */
Michael Buesch07681e22009-11-19 22:24:29 +01001582 if (unlikely(meta->skb)) {
1583 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1584 "at slot %d (first=%d) on ring %d\n",
1585 slot, firstused, ring->index);
1586 break;
1587 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001588 }
1589
1590 /* Everything unmapped and free'd. So it's not used anymore. */
1591 ring->used_slots--;
1592
Michael Buesch07681e22009-11-19 22:24:29 +01001593 if (meta->is_last_fragment) {
1594 /* This is the last scatter-gather
1595 * fragment of the frame. We are done. */
Michael Buesche4d6b792007-09-18 15:39:42 -04001596 break;
Michael Buesch07681e22009-11-19 22:24:29 +01001597 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001598 slot = next_slot(ring, slot);
1599 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001600 if (ring->stopped) {
Michael Bueschbdceeb22009-02-19 23:45:43 +01001601 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
Michael Buesche6f5b932008-03-05 21:18:49 +01001602 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
Michael Buesche4d6b792007-09-18 15:39:42 -04001603 ring->stopped = 0;
1604 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1605 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1606 }
1607 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001608}
1609
1610void b43_dma_get_tx_stats(struct b43_wldev *dev,
1611 struct ieee80211_tx_queue_stats *stats)
1612{
1613 const int nr_queues = dev->wl->hw->queues;
1614 struct b43_dmaring *ring;
Michael Buesche4d6b792007-09-18 15:39:42 -04001615 int i;
1616
1617 for (i = 0; i < nr_queues; i++) {
Michael Buesche6f5b932008-03-05 21:18:49 +01001618 ring = select_ring_by_priority(dev, i);
Michael Buesche4d6b792007-09-18 15:39:42 -04001619
Michael Bueschbdceeb22009-02-19 23:45:43 +01001620 stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
1621 stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
Johannes Berg57ffc582008-04-29 17:18:59 +02001622 stats[i].count = ring->nr_tx_packets;
Michael Buesche4d6b792007-09-18 15:39:42 -04001623 }
1624}
1625
1626static void dma_rx(struct b43_dmaring *ring, int *slot)
1627{
1628 const struct b43_dma_ops *ops = ring->ops;
1629 struct b43_dmadesc_generic *desc;
1630 struct b43_dmadesc_meta *meta;
1631 struct b43_rxhdr_fw4 *rxhdr;
1632 struct sk_buff *skb;
1633 u16 len;
1634 int err;
1635 dma_addr_t dmaaddr;
1636
1637 desc = ops->idx2desc(ring, *slot, &meta);
1638
1639 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1640 skb = meta->skb;
1641
Michael Buesche4d6b792007-09-18 15:39:42 -04001642 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1643 len = le16_to_cpu(rxhdr->frame_len);
1644 if (len == 0) {
1645 int i = 0;
1646
1647 do {
1648 udelay(2);
1649 barrier();
1650 len = le16_to_cpu(rxhdr->frame_len);
1651 } while (len == 0 && i++ < 5);
1652 if (unlikely(len == 0)) {
Michael Bueschcf686362009-03-28 00:41:25 +01001653 dmaaddr = meta->dmaaddr;
1654 goto drop_recycle_buffer;
Michael Buesche4d6b792007-09-18 15:39:42 -04001655 }
1656 }
Michael Bueschec9a1d82009-03-27 22:51:58 +01001657 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1658 /* Something went wrong with the DMA.
1659 * The device did not touch the buffer and did not overwrite the poison. */
1660 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
Michael Bueschcf686362009-03-28 00:41:25 +01001661 dmaaddr = meta->dmaaddr;
1662 goto drop_recycle_buffer;
Michael Bueschec9a1d82009-03-27 22:51:58 +01001663 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001664 if (unlikely(len > ring->rx_buffersize)) {
1665 /* The data did not fit into one descriptor buffer
1666 * and is split over multiple buffers.
1667 * This should never happen, as we try to allocate buffers
1668 * big enough. So simply ignore this packet.
1669 */
1670 int cnt = 0;
1671 s32 tmp = len;
1672
1673 while (1) {
1674 desc = ops->idx2desc(ring, *slot, &meta);
1675 /* recycle the descriptor buffer. */
Michael Bueschcf686362009-03-28 00:41:25 +01001676 b43_poison_rx_buffer(ring, meta->skb);
Michael Buesche4d6b792007-09-18 15:39:42 -04001677 sync_descbuffer_for_device(ring, meta->dmaaddr,
1678 ring->rx_buffersize);
1679 *slot = next_slot(ring, *slot);
1680 cnt++;
1681 tmp -= ring->rx_buffersize;
1682 if (tmp <= 0)
1683 break;
1684 }
1685 b43err(ring->dev->wl, "DMA RX buffer too small "
1686 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1687 len, ring->rx_buffersize, cnt);
1688 goto drop;
1689 }
1690
1691 dmaaddr = meta->dmaaddr;
1692 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1693 if (unlikely(err)) {
1694 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
Michael Bueschcf686362009-03-28 00:41:25 +01001695 goto drop_recycle_buffer;
Michael Buesche4d6b792007-09-18 15:39:42 -04001696 }
1697
1698 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1699 skb_put(skb, len + ring->frameoffset);
1700 skb_pull(skb, ring->frameoffset);
1701
1702 b43_rx(ring->dev, skb, rxhdr);
Michael Bueschb27faf82008-03-06 16:32:46 +01001703drop:
Michael Buesche4d6b792007-09-18 15:39:42 -04001704 return;
Michael Bueschcf686362009-03-28 00:41:25 +01001705
1706drop_recycle_buffer:
1707 /* Poison and recycle the RX buffer. */
1708 b43_poison_rx_buffer(ring, skb);
1709 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
Michael Buesche4d6b792007-09-18 15:39:42 -04001710}
1711
1712void b43_dma_rx(struct b43_dmaring *ring)
1713{
1714 const struct b43_dma_ops *ops = ring->ops;
1715 int slot, current_slot;
1716 int used_slots = 0;
1717
1718 B43_WARN_ON(ring->tx);
1719 current_slot = ops->get_current_rxslot(ring);
1720 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1721
1722 slot = ring->current_slot;
1723 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1724 dma_rx(ring, &slot);
1725 update_max_used_slots(ring, ++used_slots);
1726 }
1727 ops->set_current_rxslot(ring, slot);
1728 ring->current_slot = slot;
1729}
1730
1731static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1732{
Michael Buesche4d6b792007-09-18 15:39:42 -04001733 B43_WARN_ON(!ring->tx);
1734 ring->ops->tx_suspend(ring);
Michael Buesche4d6b792007-09-18 15:39:42 -04001735}
1736
1737static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1738{
Michael Buesche4d6b792007-09-18 15:39:42 -04001739 B43_WARN_ON(!ring->tx);
1740 ring->ops->tx_resume(ring);
Michael Buesche4d6b792007-09-18 15:39:42 -04001741}
1742
1743void b43_dma_tx_suspend(struct b43_wldev *dev)
1744{
1745 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
Michael Bueschb27faf82008-03-06 16:32:46 +01001746 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1747 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1748 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1749 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1750 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -04001751}
1752
1753void b43_dma_tx_resume(struct b43_wldev *dev)
1754{
Michael Bueschb27faf82008-03-06 16:32:46 +01001755 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1756 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1757 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1758 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1759 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
Michael Buesche4d6b792007-09-18 15:39:42 -04001760 b43_power_saving_ctl_bits(dev, 0);
1761}
Michael Buesch5100d5a2008-03-29 21:01:16 +01001762
1763#ifdef CONFIG_B43_PIO
1764static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1765 u16 mmio_base, bool enable)
1766{
1767 u32 ctl;
1768
1769 if (type == B43_DMA_64BIT) {
1770 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1771 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1772 if (enable)
1773 ctl |= B43_DMA64_RXDIRECTFIFO;
1774 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1775 } else {
1776 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1777 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1778 if (enable)
1779 ctl |= B43_DMA32_RXDIRECTFIFO;
1780 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1781 }
1782}
1783
1784/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1785 * This is called from PIO code, so DMA structures are not available. */
1786void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1787 unsigned int engine_index, bool enable)
1788{
1789 enum b43_dmatype type;
1790 u16 mmio_base;
1791
1792 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1793
1794 mmio_base = b43_dmacontroller_base(type, engine_index);
1795 direct_fifo_rx(dev, type, mmio_base, enable);
1796}
1797#endif /* CONFIG_B43_PIO */