blob: 3b1f0e76b9a4316c4a172b434a3a7dc593de512a [file] [log] [blame]
Michael Buesche4d6b792007-09-18 15:39:42 -04001/*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
Michael Buesch280d0e12007-12-26 18:26:17 +010040#include <linux/etherdevice.h>
41
Michael Buesche4d6b792007-09-18 15:39:42 -040042
43/* 32bit DMA ops. */
44static
45struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
46 int slot,
47 struct b43_dmadesc_meta **meta)
48{
49 struct b43_dmadesc32 *desc;
50
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
53 desc = &(desc[slot]);
54
55 return (struct b43_dmadesc_generic *)desc;
56}
57
58static void op32_fill_descriptor(struct b43_dmaring *ring,
59 struct b43_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
62{
63 struct b43_dmadesc32 *descbase = ring->descbase;
64 int slot;
65 u32 ctl;
66 u32 addr;
67 u32 addrext;
68
69 slot = (int)(&(desc->dma32) - descbase);
70 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
71
72 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & B43_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= B43_DMA32_DCTL_DTABLEEND;
80 if (start)
81 ctl |= B43_DMA32_DCTL_FRAMESTART;
82 if (end)
83 ctl |= B43_DMA32_DCTL_FRAMEEND;
84 if (irq)
85 ctl |= B43_DMA32_DCTL_IRQ;
86 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
87 & B43_DMA32_DCTL_ADDREXT_MASK;
88
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
91}
92
93static void op32_poke_tx(struct b43_dmaring *ring, int slot)
94{
95 b43_dma_write(ring, B43_DMA32_TXINDEX,
96 (u32) (slot * sizeof(struct b43_dmadesc32)));
97}
98
99static void op32_tx_suspend(struct b43_dmaring *ring)
100{
101 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
102 | B43_DMA32_TXSUSPEND);
103}
104
105static void op32_tx_resume(struct b43_dmaring *ring)
106{
107 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108 & ~B43_DMA32_TXSUSPEND);
109}
110
111static int op32_get_current_rxslot(struct b43_dmaring *ring)
112{
113 u32 val;
114
115 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
116 val &= B43_DMA32_RXDPTR;
117
118 return (val / sizeof(struct b43_dmadesc32));
119}
120
121static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
122{
123 b43_dma_write(ring, B43_DMA32_RXINDEX,
124 (u32) (slot * sizeof(struct b43_dmadesc32)));
125}
126
127static const struct b43_dma_ops dma32_ops = {
128 .idx2desc = op32_idx2desc,
129 .fill_descriptor = op32_fill_descriptor,
130 .poke_tx = op32_poke_tx,
131 .tx_suspend = op32_tx_suspend,
132 .tx_resume = op32_tx_resume,
133 .get_current_rxslot = op32_get_current_rxslot,
134 .set_current_rxslot = op32_set_current_rxslot,
135};
136
137/* 64bit DMA ops. */
138static
139struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
140 int slot,
141 struct b43_dmadesc_meta **meta)
142{
143 struct b43_dmadesc64 *desc;
144
145 *meta = &(ring->meta[slot]);
146 desc = ring->descbase;
147 desc = &(desc[slot]);
148
149 return (struct b43_dmadesc_generic *)desc;
150}
151
152static void op64_fill_descriptor(struct b43_dmaring *ring,
153 struct b43_dmadesc_generic *desc,
154 dma_addr_t dmaaddr, u16 bufsize,
155 int start, int end, int irq)
156{
157 struct b43_dmadesc64 *descbase = ring->descbase;
158 int slot;
159 u32 ctl0 = 0, ctl1 = 0;
160 u32 addrlo, addrhi;
161 u32 addrext;
162
163 slot = (int)(&(desc->dma64) - descbase);
164 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
165
166 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
167 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
168 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
169 >> SSB_DMA_TRANSLATION_SHIFT;
Larry Finger013978b2007-11-26 10:29:47 -0600170 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
Michael Buesche4d6b792007-09-18 15:39:42 -0400171 if (slot == ring->nr_slots - 1)
172 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
173 if (start)
174 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
175 if (end)
176 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
177 if (irq)
178 ctl0 |= B43_DMA64_DCTL0_IRQ;
179 ctl1 |= (bufsize - ring->frameoffset)
180 & B43_DMA64_DCTL1_BYTECNT;
181 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
182 & B43_DMA64_DCTL1_ADDREXT_MASK;
183
184 desc->dma64.control0 = cpu_to_le32(ctl0);
185 desc->dma64.control1 = cpu_to_le32(ctl1);
186 desc->dma64.address_low = cpu_to_le32(addrlo);
187 desc->dma64.address_high = cpu_to_le32(addrhi);
188}
189
190static void op64_poke_tx(struct b43_dmaring *ring, int slot)
191{
192 b43_dma_write(ring, B43_DMA64_TXINDEX,
193 (u32) (slot * sizeof(struct b43_dmadesc64)));
194}
195
196static void op64_tx_suspend(struct b43_dmaring *ring)
197{
198 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
199 | B43_DMA64_TXSUSPEND);
200}
201
202static void op64_tx_resume(struct b43_dmaring *ring)
203{
204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
205 & ~B43_DMA64_TXSUSPEND);
206}
207
208static int op64_get_current_rxslot(struct b43_dmaring *ring)
209{
210 u32 val;
211
212 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
213 val &= B43_DMA64_RXSTATDPTR;
214
215 return (val / sizeof(struct b43_dmadesc64));
216}
217
218static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
219{
220 b43_dma_write(ring, B43_DMA64_RXINDEX,
221 (u32) (slot * sizeof(struct b43_dmadesc64)));
222}
223
224static const struct b43_dma_ops dma64_ops = {
225 .idx2desc = op64_idx2desc,
226 .fill_descriptor = op64_fill_descriptor,
227 .poke_tx = op64_poke_tx,
228 .tx_suspend = op64_tx_suspend,
229 .tx_resume = op64_tx_resume,
230 .get_current_rxslot = op64_get_current_rxslot,
231 .set_current_rxslot = op64_set_current_rxslot,
232};
233
234static inline int free_slots(struct b43_dmaring *ring)
235{
236 return (ring->nr_slots - ring->used_slots);
237}
238
239static inline int next_slot(struct b43_dmaring *ring, int slot)
240{
241 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
242 if (slot == ring->nr_slots - 1)
243 return 0;
244 return slot + 1;
245}
246
247static inline int prev_slot(struct b43_dmaring *ring, int slot)
248{
249 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
250 if (slot == 0)
251 return ring->nr_slots - 1;
252 return slot - 1;
253}
254
255#ifdef CONFIG_B43_DEBUG
256static void update_max_used_slots(struct b43_dmaring *ring,
257 int current_used_slots)
258{
259 if (current_used_slots <= ring->max_used_slots)
260 return;
261 ring->max_used_slots = current_used_slots;
262 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
263 b43dbg(ring->dev->wl,
264 "max_used_slots increased to %d on %s ring %d\n",
265 ring->max_used_slots,
266 ring->tx ? "TX" : "RX", ring->index);
267 }
268}
269#else
270static inline
271 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
272{
273}
274#endif /* DEBUG */
275
276/* Request a slot for usage. */
277static inline int request_slot(struct b43_dmaring *ring)
278{
279 int slot;
280
281 B43_WARN_ON(!ring->tx);
282 B43_WARN_ON(ring->stopped);
283 B43_WARN_ON(free_slots(ring) == 0);
284
285 slot = next_slot(ring, ring->current_slot);
286 ring->current_slot = slot;
287 ring->used_slots++;
288
289 update_max_used_slots(ring, ring->used_slots);
290
291 return slot;
292}
293
Michael Bueschb79caa62008-02-05 12:50:41 +0100294static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
Michael Buesche4d6b792007-09-18 15:39:42 -0400295{
296 static const u16 map64[] = {
297 B43_MMIO_DMA64_BASE0,
298 B43_MMIO_DMA64_BASE1,
299 B43_MMIO_DMA64_BASE2,
300 B43_MMIO_DMA64_BASE3,
301 B43_MMIO_DMA64_BASE4,
302 B43_MMIO_DMA64_BASE5,
303 };
304 static const u16 map32[] = {
305 B43_MMIO_DMA32_BASE0,
306 B43_MMIO_DMA32_BASE1,
307 B43_MMIO_DMA32_BASE2,
308 B43_MMIO_DMA32_BASE3,
309 B43_MMIO_DMA32_BASE4,
310 B43_MMIO_DMA32_BASE5,
311 };
312
Michael Bueschb79caa62008-02-05 12:50:41 +0100313 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400314 B43_WARN_ON(!(controller_idx >= 0 &&
315 controller_idx < ARRAY_SIZE(map64)));
316 return map64[controller_idx];
317 }
318 B43_WARN_ON(!(controller_idx >= 0 &&
319 controller_idx < ARRAY_SIZE(map32)));
320 return map32[controller_idx];
321}
322
323static inline
324 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
325 unsigned char *buf, size_t len, int tx)
326{
327 dma_addr_t dmaaddr;
328
329 if (tx) {
330 dmaaddr = dma_map_single(ring->dev->dev->dev,
331 buf, len, DMA_TO_DEVICE);
332 } else {
333 dmaaddr = dma_map_single(ring->dev->dev->dev,
334 buf, len, DMA_FROM_DEVICE);
335 }
336
337 return dmaaddr;
338}
339
340static inline
341 void unmap_descbuffer(struct b43_dmaring *ring,
342 dma_addr_t addr, size_t len, int tx)
343{
344 if (tx) {
345 dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
346 } else {
347 dma_unmap_single(ring->dev->dev->dev,
348 addr, len, DMA_FROM_DEVICE);
349 }
350}
351
352static inline
353 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
354 dma_addr_t addr, size_t len)
355{
356 B43_WARN_ON(ring->tx);
357 dma_sync_single_for_cpu(ring->dev->dev->dev,
358 addr, len, DMA_FROM_DEVICE);
359}
360
361static inline
362 void sync_descbuffer_for_device(struct b43_dmaring *ring,
363 dma_addr_t addr, size_t len)
364{
365 B43_WARN_ON(ring->tx);
366 dma_sync_single_for_device(ring->dev->dev->dev,
367 addr, len, DMA_FROM_DEVICE);
368}
369
370static inline
371 void free_descriptor_buffer(struct b43_dmaring *ring,
372 struct b43_dmadesc_meta *meta)
373{
374 if (meta->skb) {
375 dev_kfree_skb_any(meta->skb);
376 meta->skb = NULL;
377 }
378}
379
380static int alloc_ringmemory(struct b43_dmaring *ring)
381{
382 struct device *dev = ring->dev->dev->dev;
Larry Finger013978b2007-11-26 10:29:47 -0600383 gfp_t flags = GFP_KERNEL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400384
Larry Finger013978b2007-11-26 10:29:47 -0600385 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
386 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
387 * has shown that 4K is sufficient for the latter as long as the buffer
388 * does not cross an 8K boundary.
389 *
390 * For unknown reasons - possibly a hardware error - the BCM4311 rev
391 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
392 * which accounts for the GFP_DMA flag below.
393 */
Michael Bueschb79caa62008-02-05 12:50:41 +0100394 if (ring->type == B43_DMA_64BIT)
Larry Finger013978b2007-11-26 10:29:47 -0600395 flags |= GFP_DMA;
Michael Buesche4d6b792007-09-18 15:39:42 -0400396 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
Larry Finger013978b2007-11-26 10:29:47 -0600397 &(ring->dmabase), flags);
Michael Buesche4d6b792007-09-18 15:39:42 -0400398 if (!ring->descbase) {
399 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
400 return -ENOMEM;
401 }
402 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
403
404 return 0;
405}
406
407static void free_ringmemory(struct b43_dmaring *ring)
408{
409 struct device *dev = ring->dev->dev->dev;
410
411 dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
412 ring->descbase, ring->dmabase);
413}
414
415/* Reset the RX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100416static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
417 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400418{
419 int i;
420 u32 value;
421 u16 offset;
422
423 might_sleep();
424
Michael Bueschb79caa62008-02-05 12:50:41 +0100425 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400426 b43_write32(dev, mmio_base + offset, 0);
427 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100428 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
429 B43_DMA32_RXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400430 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100431 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400432 value &= B43_DMA64_RXSTAT;
433 if (value == B43_DMA64_RXSTAT_DISABLED) {
434 i = -1;
435 break;
436 }
437 } else {
438 value &= B43_DMA32_RXSTATE;
439 if (value == B43_DMA32_RXSTAT_DISABLED) {
440 i = -1;
441 break;
442 }
443 }
444 msleep(1);
445 }
446 if (i != -1) {
447 b43err(dev->wl, "DMA RX reset timed out\n");
448 return -ENODEV;
449 }
450
451 return 0;
452}
453
Larry Finger013978b2007-11-26 10:29:47 -0600454/* Reset the TX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100455static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
456 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400457{
458 int i;
459 u32 value;
460 u16 offset;
461
462 might_sleep();
463
464 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100465 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
466 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400467 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100468 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400469 value &= B43_DMA64_TXSTAT;
470 if (value == B43_DMA64_TXSTAT_DISABLED ||
471 value == B43_DMA64_TXSTAT_IDLEWAIT ||
472 value == B43_DMA64_TXSTAT_STOPPED)
473 break;
474 } else {
475 value &= B43_DMA32_TXSTATE;
476 if (value == B43_DMA32_TXSTAT_DISABLED ||
477 value == B43_DMA32_TXSTAT_IDLEWAIT ||
478 value == B43_DMA32_TXSTAT_STOPPED)
479 break;
480 }
481 msleep(1);
482 }
Michael Bueschb79caa62008-02-05 12:50:41 +0100483 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400484 b43_write32(dev, mmio_base + offset, 0);
485 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100486 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
487 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400488 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100489 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400490 value &= B43_DMA64_TXSTAT;
491 if (value == B43_DMA64_TXSTAT_DISABLED) {
492 i = -1;
493 break;
494 }
495 } else {
496 value &= B43_DMA32_TXSTATE;
497 if (value == B43_DMA32_TXSTAT_DISABLED) {
498 i = -1;
499 break;
500 }
501 }
502 msleep(1);
503 }
504 if (i != -1) {
505 b43err(dev->wl, "DMA TX reset timed out\n");
506 return -ENODEV;
507 }
508 /* ensure the reset is completed. */
509 msleep(1);
510
511 return 0;
512}
513
Michael Bueschb79caa62008-02-05 12:50:41 +0100514/* Check if a DMA mapping address is invalid. */
515static bool b43_dma_mapping_error(struct b43_dmaring *ring,
516 dma_addr_t addr,
517 size_t buffersize)
518{
519 if (unlikely(dma_mapping_error(addr)))
520 return 1;
521
522 switch (ring->type) {
523 case B43_DMA_30BIT:
524 if ((u64)addr + buffersize > (1ULL << 30))
525 return 1;
526 break;
527 case B43_DMA_32BIT:
528 if ((u64)addr + buffersize > (1ULL << 32))
529 return 1;
530 break;
531 case B43_DMA_64BIT:
532 /* Currently we can't have addresses beyond
533 * 64bit in the kernel. */
534 break;
535 }
536
537 /* The address is OK. */
538 return 0;
539}
540
Michael Buesche4d6b792007-09-18 15:39:42 -0400541static int setup_rx_descbuffer(struct b43_dmaring *ring,
542 struct b43_dmadesc_generic *desc,
543 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
544{
545 struct b43_rxhdr_fw4 *rxhdr;
546 struct b43_hwtxstatus *txstat;
547 dma_addr_t dmaaddr;
548 struct sk_buff *skb;
549
550 B43_WARN_ON(ring->tx);
551
552 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
553 if (unlikely(!skb))
554 return -ENOMEM;
555 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
Michael Bueschb79caa62008-02-05 12:50:41 +0100556 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400557 /* ugh. try to realloc in zone_dma */
558 gfp_flags |= GFP_DMA;
559
560 dev_kfree_skb_any(skb);
561
562 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
563 if (unlikely(!skb))
564 return -ENOMEM;
565 dmaaddr = map_descbuffer(ring, skb->data,
566 ring->rx_buffersize, 0);
567 }
568
Michael Bueschb79caa62008-02-05 12:50:41 +0100569 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400570 dev_kfree_skb_any(skb);
571 return -EIO;
572 }
573
574 meta->skb = skb;
575 meta->dmaaddr = dmaaddr;
576 ring->ops->fill_descriptor(ring, desc, dmaaddr,
577 ring->rx_buffersize, 0, 0, 0);
578
579 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
580 rxhdr->frame_len = 0;
581 txstat = (struct b43_hwtxstatus *)(skb->data);
582 txstat->cookie = 0;
583
584 return 0;
585}
586
587/* Allocate the initial descbuffers.
588 * This is used for an RX ring only.
589 */
590static int alloc_initial_descbuffers(struct b43_dmaring *ring)
591{
592 int i, err = -ENOMEM;
593 struct b43_dmadesc_generic *desc;
594 struct b43_dmadesc_meta *meta;
595
596 for (i = 0; i < ring->nr_slots; i++) {
597 desc = ring->ops->idx2desc(ring, i, &meta);
598
599 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
600 if (err) {
601 b43err(ring->dev->wl,
602 "Failed to allocate initial descbuffers\n");
603 goto err_unwind;
604 }
605 }
606 mb();
607 ring->used_slots = ring->nr_slots;
608 err = 0;
609 out:
610 return err;
611
612 err_unwind:
613 for (i--; i >= 0; i--) {
614 desc = ring->ops->idx2desc(ring, i, &meta);
615
616 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
617 dev_kfree_skb(meta->skb);
618 }
619 goto out;
620}
621
622/* Do initial setup of the DMA controller.
623 * Reset the controller, write the ring busaddress
624 * and switch the "enable" bit on.
625 */
626static int dmacontroller_setup(struct b43_dmaring *ring)
627{
628 int err = 0;
629 u32 value;
630 u32 addrext;
631 u32 trans = ssb_dma_translation(ring->dev->dev);
632
633 if (ring->tx) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100634 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400635 u64 ringbase = (u64) (ring->dmabase);
636
637 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
638 >> SSB_DMA_TRANSLATION_SHIFT;
639 value = B43_DMA64_TXENABLE;
640 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
641 & B43_DMA64_TXADDREXT_MASK;
642 b43_dma_write(ring, B43_DMA64_TXCTL, value);
643 b43_dma_write(ring, B43_DMA64_TXRINGLO,
644 (ringbase & 0xFFFFFFFF));
645 b43_dma_write(ring, B43_DMA64_TXRINGHI,
646 ((ringbase >> 32) &
647 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600648 | (trans << 1));
Michael Buesche4d6b792007-09-18 15:39:42 -0400649 } else {
650 u32 ringbase = (u32) (ring->dmabase);
651
652 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
653 >> SSB_DMA_TRANSLATION_SHIFT;
654 value = B43_DMA32_TXENABLE;
655 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
656 & B43_DMA32_TXADDREXT_MASK;
657 b43_dma_write(ring, B43_DMA32_TXCTL, value);
658 b43_dma_write(ring, B43_DMA32_TXRING,
659 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
660 | trans);
661 }
662 } else {
663 err = alloc_initial_descbuffers(ring);
664 if (err)
665 goto out;
Michael Bueschb79caa62008-02-05 12:50:41 +0100666 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400667 u64 ringbase = (u64) (ring->dmabase);
668
669 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
670 >> SSB_DMA_TRANSLATION_SHIFT;
671 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
672 value |= B43_DMA64_RXENABLE;
673 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
674 & B43_DMA64_RXADDREXT_MASK;
675 b43_dma_write(ring, B43_DMA64_RXCTL, value);
676 b43_dma_write(ring, B43_DMA64_RXRINGLO,
677 (ringbase & 0xFFFFFFFF));
678 b43_dma_write(ring, B43_DMA64_RXRINGHI,
679 ((ringbase >> 32) &
680 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600681 | (trans << 1));
682 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
683 sizeof(struct b43_dmadesc64));
Michael Buesche4d6b792007-09-18 15:39:42 -0400684 } else {
685 u32 ringbase = (u32) (ring->dmabase);
686
687 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
688 >> SSB_DMA_TRANSLATION_SHIFT;
689 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
690 value |= B43_DMA32_RXENABLE;
691 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
692 & B43_DMA32_RXADDREXT_MASK;
693 b43_dma_write(ring, B43_DMA32_RXCTL, value);
694 b43_dma_write(ring, B43_DMA32_RXRING,
695 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
696 | trans);
Larry Finger013978b2007-11-26 10:29:47 -0600697 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
698 sizeof(struct b43_dmadesc32));
Michael Buesche4d6b792007-09-18 15:39:42 -0400699 }
700 }
701
Larry Finger013978b2007-11-26 10:29:47 -0600702out:
Michael Buesche4d6b792007-09-18 15:39:42 -0400703 return err;
704}
705
706/* Shutdown the DMA controller. */
707static void dmacontroller_cleanup(struct b43_dmaring *ring)
708{
709 if (ring->tx) {
710 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100711 ring->type);
712 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400713 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
714 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
715 } else
716 b43_dma_write(ring, B43_DMA32_TXRING, 0);
717 } else {
718 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100719 ring->type);
720 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400721 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
722 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
723 } else
724 b43_dma_write(ring, B43_DMA32_RXRING, 0);
725 }
726}
727
728static void free_all_descbuffers(struct b43_dmaring *ring)
729{
730 struct b43_dmadesc_generic *desc;
731 struct b43_dmadesc_meta *meta;
732 int i;
733
734 if (!ring->used_slots)
735 return;
736 for (i = 0; i < ring->nr_slots; i++) {
737 desc = ring->ops->idx2desc(ring, i, &meta);
738
739 if (!meta->skb) {
740 B43_WARN_ON(!ring->tx);
741 continue;
742 }
743 if (ring->tx) {
744 unmap_descbuffer(ring, meta->dmaaddr,
745 meta->skb->len, 1);
746 } else {
747 unmap_descbuffer(ring, meta->dmaaddr,
748 ring->rx_buffersize, 0);
749 }
750 free_descriptor_buffer(ring, meta);
751 }
752}
753
754static u64 supported_dma_mask(struct b43_wldev *dev)
755{
756 u32 tmp;
757 u16 mmio_base;
758
759 tmp = b43_read32(dev, SSB_TMSHIGH);
760 if (tmp & SSB_TMSHIGH_DMA64)
761 return DMA_64BIT_MASK;
762 mmio_base = b43_dmacontroller_base(0, 0);
763 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
764 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
765 if (tmp & B43_DMA32_TXADDREXT_MASK)
766 return DMA_32BIT_MASK;
767
768 return DMA_30BIT_MASK;
769}
770
771/* Main initialization function. */
772static
773struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
774 int controller_index,
Michael Bueschb79caa62008-02-05 12:50:41 +0100775 int for_tx,
776 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400777{
778 struct b43_dmaring *ring;
779 int err;
780 int nr_slots;
781 dma_addr_t dma_test;
782
783 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
784 if (!ring)
785 goto out;
Michael Bueschb79caa62008-02-05 12:50:41 +0100786 ring->type = type;
Michael Buesche4d6b792007-09-18 15:39:42 -0400787
788 nr_slots = B43_RXRING_SLOTS;
789 if (for_tx)
790 nr_slots = B43_TXRING_SLOTS;
791
792 ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
793 GFP_KERNEL);
794 if (!ring->meta)
795 goto err_kfree_ring;
796 if (for_tx) {
797 ring->txhdr_cache = kcalloc(nr_slots,
Michael Buescheb189d8b2008-01-28 14:47:41 -0800798 b43_txhdr_size(dev),
Michael Buesche4d6b792007-09-18 15:39:42 -0400799 GFP_KERNEL);
800 if (!ring->txhdr_cache)
801 goto err_kfree_meta;
802
803 /* test for ability to dma to txhdr_cache */
804 dma_test = dma_map_single(dev->dev->dev,
805 ring->txhdr_cache,
Michael Buescheb189d8b2008-01-28 14:47:41 -0800806 b43_txhdr_size(dev),
Michael Buesche4d6b792007-09-18 15:39:42 -0400807 DMA_TO_DEVICE);
808
Michael Bueschb79caa62008-02-05 12:50:41 +0100809 if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400810 /* ugh realloc */
811 kfree(ring->txhdr_cache);
812 ring->txhdr_cache = kcalloc(nr_slots,
Michael Buescheb189d8b2008-01-28 14:47:41 -0800813 b43_txhdr_size(dev),
Michael Buesche4d6b792007-09-18 15:39:42 -0400814 GFP_KERNEL | GFP_DMA);
815 if (!ring->txhdr_cache)
816 goto err_kfree_meta;
817
818 dma_test = dma_map_single(dev->dev->dev,
819 ring->txhdr_cache,
Michael Buescheb189d8b2008-01-28 14:47:41 -0800820 b43_txhdr_size(dev),
Michael Buesche4d6b792007-09-18 15:39:42 -0400821 DMA_TO_DEVICE);
822
Michael Bueschb79caa62008-02-05 12:50:41 +0100823 if (b43_dma_mapping_error(ring, dma_test,
824 b43_txhdr_size(dev)))
Michael Buesche4d6b792007-09-18 15:39:42 -0400825 goto err_kfree_txhdr_cache;
826 }
827
828 dma_unmap_single(dev->dev->dev,
Michael Buescheb189d8b2008-01-28 14:47:41 -0800829 dma_test, b43_txhdr_size(dev),
Michael Buesche4d6b792007-09-18 15:39:42 -0400830 DMA_TO_DEVICE);
831 }
832
833 ring->dev = dev;
834 ring->nr_slots = nr_slots;
Michael Bueschb79caa62008-02-05 12:50:41 +0100835 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
Michael Buesche4d6b792007-09-18 15:39:42 -0400836 ring->index = controller_index;
Michael Bueschb79caa62008-02-05 12:50:41 +0100837 if (type == B43_DMA_64BIT)
Michael Buesche4d6b792007-09-18 15:39:42 -0400838 ring->ops = &dma64_ops;
839 else
840 ring->ops = &dma32_ops;
841 if (for_tx) {
842 ring->tx = 1;
843 ring->current_slot = -1;
844 } else {
845 if (ring->index == 0) {
846 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
847 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
848 } else if (ring->index == 3) {
849 ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
850 ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
851 } else
852 B43_WARN_ON(1);
853 }
854 spin_lock_init(&ring->lock);
855#ifdef CONFIG_B43_DEBUG
856 ring->last_injected_overflow = jiffies;
857#endif
858
859 err = alloc_ringmemory(ring);
860 if (err)
861 goto err_kfree_txhdr_cache;
862 err = dmacontroller_setup(ring);
863 if (err)
864 goto err_free_ringmemory;
865
866 out:
867 return ring;
868
869 err_free_ringmemory:
870 free_ringmemory(ring);
871 err_kfree_txhdr_cache:
872 kfree(ring->txhdr_cache);
873 err_kfree_meta:
874 kfree(ring->meta);
875 err_kfree_ring:
876 kfree(ring);
877 ring = NULL;
878 goto out;
879}
880
881/* Main cleanup function. */
Michael Bueschb27faf82008-03-06 16:32:46 +0100882static void b43_destroy_dmaring(struct b43_dmaring *ring,
883 const char *ringname)
Michael Buesche4d6b792007-09-18 15:39:42 -0400884{
885 if (!ring)
886 return;
887
Michael Bueschb27faf82008-03-06 16:32:46 +0100888 b43dbg(ring->dev->wl, "DMA-%u %s max used slots: %d/%d\n",
889 (unsigned int)(ring->type), ringname,
890 ring->max_used_slots, ring->nr_slots);
Michael Buesche4d6b792007-09-18 15:39:42 -0400891 /* Device IRQs are disabled prior entering this function,
892 * so no need to take care of concurrency with rx handler stuff.
893 */
894 dmacontroller_cleanup(ring);
895 free_all_descbuffers(ring);
896 free_ringmemory(ring);
897
898 kfree(ring->txhdr_cache);
899 kfree(ring->meta);
900 kfree(ring);
901}
902
Michael Bueschb27faf82008-03-06 16:32:46 +0100903#define destroy_ring(dma, ring) do { \
904 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
905 (dma)->ring = NULL; \
906 } while (0)
907
Michael Buesche4d6b792007-09-18 15:39:42 -0400908void b43_dma_free(struct b43_wldev *dev)
909{
Michael Buesch03b29772007-12-26 14:41:30 +0100910 struct b43_dma *dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -0400911
Michael Bueschb27faf82008-03-06 16:32:46 +0100912 destroy_ring(dma, rx_ring);
913 destroy_ring(dma, tx_ring_AC_BK);
914 destroy_ring(dma, tx_ring_AC_BE);
915 destroy_ring(dma, tx_ring_AC_VI);
916 destroy_ring(dma, tx_ring_AC_VO);
917 destroy_ring(dma, tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -0400918}
919
920int b43_dma_init(struct b43_wldev *dev)
921{
922 struct b43_dma *dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -0400923 int err;
924 u64 dmamask;
Michael Bueschb79caa62008-02-05 12:50:41 +0100925 enum b43_dmatype type;
Michael Buesche4d6b792007-09-18 15:39:42 -0400926
927 dmamask = supported_dma_mask(dev);
Michael Bueschb79caa62008-02-05 12:50:41 +0100928 switch (dmamask) {
929 default:
930 B43_WARN_ON(1);
931 case DMA_30BIT_MASK:
932 type = B43_DMA_30BIT;
933 break;
934 case DMA_32BIT_MASK:
935 type = B43_DMA_32BIT;
936 break;
937 case DMA_64BIT_MASK:
938 type = B43_DMA_64BIT;
939 break;
940 }
Michael Buesche4d6b792007-09-18 15:39:42 -0400941 err = ssb_dma_set_mask(dev->dev, dmamask);
942 if (err) {
Michael Buesch03b29772007-12-26 14:41:30 +0100943 b43err(dev->wl, "The machine/kernel does not support "
944 "the required DMA mask (0x%08X%08X)\n",
945 (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
946 (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
Michael Buesche4d6b792007-09-18 15:39:42 -0400947 return -EOPNOTSUPP;
Michael Buesche4d6b792007-09-18 15:39:42 -0400948 }
949
950 err = -ENOMEM;
951 /* setup TX DMA channels. */
Michael Bueschb27faf82008-03-06 16:32:46 +0100952 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
953 if (!dma->tx_ring_AC_BK)
Michael Buesche4d6b792007-09-18 15:39:42 -0400954 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -0400955
Michael Bueschb27faf82008-03-06 16:32:46 +0100956 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
957 if (!dma->tx_ring_AC_BE)
958 goto err_destroy_bk;
Michael Buesche4d6b792007-09-18 15:39:42 -0400959
Michael Bueschb27faf82008-03-06 16:32:46 +0100960 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
961 if (!dma->tx_ring_AC_VI)
962 goto err_destroy_be;
Michael Buesche4d6b792007-09-18 15:39:42 -0400963
Michael Bueschb27faf82008-03-06 16:32:46 +0100964 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
965 if (!dma->tx_ring_AC_VO)
966 goto err_destroy_vi;
Michael Buesche4d6b792007-09-18 15:39:42 -0400967
Michael Bueschb27faf82008-03-06 16:32:46 +0100968 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
969 if (!dma->tx_ring_mcast)
970 goto err_destroy_vo;
Michael Buesche4d6b792007-09-18 15:39:42 -0400971
Michael Bueschb27faf82008-03-06 16:32:46 +0100972 /* setup RX DMA channel. */
973 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
974 if (!dma->rx_ring)
975 goto err_destroy_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -0400976
Michael Bueschb27faf82008-03-06 16:32:46 +0100977 /* No support for the TX status DMA ring. */
978 B43_WARN_ON(dev->dev->id.revision < 5);
Michael Buesche4d6b792007-09-18 15:39:42 -0400979
Michael Bueschb79caa62008-02-05 12:50:41 +0100980 b43dbg(dev->wl, "%u-bit DMA initialized\n",
981 (unsigned int)type);
Michael Buesche4d6b792007-09-18 15:39:42 -0400982 err = 0;
Michael Bueschb27faf82008-03-06 16:32:46 +0100983out:
Michael Buesche4d6b792007-09-18 15:39:42 -0400984 return err;
985
Michael Bueschb27faf82008-03-06 16:32:46 +0100986err_destroy_mcast:
987 destroy_ring(dma, tx_ring_mcast);
988err_destroy_vo:
989 destroy_ring(dma, tx_ring_AC_VO);
990err_destroy_vi:
991 destroy_ring(dma, tx_ring_AC_VI);
992err_destroy_be:
993 destroy_ring(dma, tx_ring_AC_BE);
994err_destroy_bk:
995 destroy_ring(dma, tx_ring_AC_BK);
996 return err;
Michael Buesche4d6b792007-09-18 15:39:42 -0400997}
998
999/* Generate a cookie for the TX header. */
1000static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1001{
Michael Bueschb27faf82008-03-06 16:32:46 +01001002 u16 cookie;
Michael Buesche4d6b792007-09-18 15:39:42 -04001003
1004 /* Use the upper 4 bits of the cookie as
1005 * DMA controller ID and store the slot number
1006 * in the lower 12 bits.
1007 * Note that the cookie must never be 0, as this
1008 * is a special value used in RX path.
Michael Buesch280d0e12007-12-26 18:26:17 +01001009 * It can also not be 0xFFFF because that is special
1010 * for multicast frames.
Michael Buesche4d6b792007-09-18 15:39:42 -04001011 */
Michael Bueschb27faf82008-03-06 16:32:46 +01001012 cookie = (((u16)ring->index + 1) << 12);
Michael Buesche4d6b792007-09-18 15:39:42 -04001013 B43_WARN_ON(slot & ~0x0FFF);
Michael Bueschb27faf82008-03-06 16:32:46 +01001014 cookie |= (u16)slot;
Michael Buesche4d6b792007-09-18 15:39:42 -04001015
1016 return cookie;
1017}
1018
1019/* Inspect a cookie and find out to which controller/slot it belongs. */
1020static
1021struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1022{
1023 struct b43_dma *dma = &dev->dma;
1024 struct b43_dmaring *ring = NULL;
1025
1026 switch (cookie & 0xF000) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001027 case 0x1000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001028 ring = dma->tx_ring_AC_BK;
Michael Buesche4d6b792007-09-18 15:39:42 -04001029 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001030 case 0x2000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001031 ring = dma->tx_ring_AC_BE;
Michael Buesche4d6b792007-09-18 15:39:42 -04001032 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001033 case 0x3000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001034 ring = dma->tx_ring_AC_VI;
Michael Buesche4d6b792007-09-18 15:39:42 -04001035 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001036 case 0x4000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001037 ring = dma->tx_ring_AC_VO;
Michael Buesche4d6b792007-09-18 15:39:42 -04001038 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001039 case 0x5000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001040 ring = dma->tx_ring_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -04001041 break;
1042 default:
1043 B43_WARN_ON(1);
1044 }
1045 *slot = (cookie & 0x0FFF);
1046 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1047
1048 return ring;
1049}
1050
1051static int dma_tx_fragment(struct b43_dmaring *ring,
1052 struct sk_buff *skb,
1053 struct ieee80211_tx_control *ctl)
1054{
1055 const struct b43_dma_ops *ops = ring->ops;
1056 u8 *header;
Michael Buesch09552cc2008-01-23 21:44:15 +01001057 int slot, old_top_slot, old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001058 int err;
1059 struct b43_dmadesc_generic *desc;
1060 struct b43_dmadesc_meta *meta;
1061 struct b43_dmadesc_meta *meta_hdr;
1062 struct sk_buff *bounce_skb;
Michael Buesch280d0e12007-12-26 18:26:17 +01001063 u16 cookie;
Michael Buescheb189d8b2008-01-28 14:47:41 -08001064 size_t hdrsize = b43_txhdr_size(ring->dev);
Michael Buesche4d6b792007-09-18 15:39:42 -04001065
1066#define SLOTS_PER_PACKET 2
1067 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1068
Michael Buesch09552cc2008-01-23 21:44:15 +01001069 old_top_slot = ring->current_slot;
1070 old_used_slots = ring->used_slots;
1071
Michael Buesche4d6b792007-09-18 15:39:42 -04001072 /* Get a slot for the header. */
1073 slot = request_slot(ring);
1074 desc = ops->idx2desc(ring, slot, &meta_hdr);
1075 memset(meta_hdr, 0, sizeof(*meta_hdr));
1076
Michael Buescheb189d8b2008-01-28 14:47:41 -08001077 header = &(ring->txhdr_cache[slot * hdrsize]);
Michael Buesch280d0e12007-12-26 18:26:17 +01001078 cookie = generate_cookie(ring, slot);
Michael Buesch09552cc2008-01-23 21:44:15 +01001079 err = b43_generate_txhdr(ring->dev, header,
1080 skb->data, skb->len, ctl, cookie);
1081 if (unlikely(err)) {
1082 ring->current_slot = old_top_slot;
1083 ring->used_slots = old_used_slots;
1084 return err;
1085 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001086
1087 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001088 hdrsize, 1);
Michael Bueschb79caa62008-02-05 12:50:41 +01001089 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001090 ring->current_slot = old_top_slot;
1091 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001092 return -EIO;
Michael Buesch09552cc2008-01-23 21:44:15 +01001093 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001094 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001095 hdrsize, 1, 0, 0);
Michael Buesche4d6b792007-09-18 15:39:42 -04001096
1097 /* Get a slot for the payload. */
1098 slot = request_slot(ring);
1099 desc = ops->idx2desc(ring, slot, &meta);
1100 memset(meta, 0, sizeof(*meta));
1101
1102 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1103 meta->skb = skb;
1104 meta->is_last_fragment = 1;
1105
1106 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1107 /* create a bounce buffer in zone_dma on mapping failure. */
Michael Bueschb79caa62008-02-05 12:50:41 +01001108 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
Michael Buesche4d6b792007-09-18 15:39:42 -04001109 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1110 if (!bounce_skb) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001111 ring->current_slot = old_top_slot;
1112 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001113 err = -ENOMEM;
1114 goto out_unmap_hdr;
1115 }
1116
1117 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1118 dev_kfree_skb_any(skb);
1119 skb = bounce_skb;
1120 meta->skb = skb;
1121 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
Michael Bueschb79caa62008-02-05 12:50:41 +01001122 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001123 ring->current_slot = old_top_slot;
1124 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001125 err = -EIO;
1126 goto out_free_bounce;
1127 }
1128 }
1129
1130 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1131
Michael Buesch280d0e12007-12-26 18:26:17 +01001132 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1133 /* Tell the firmware about the cookie of the last
1134 * mcast frame, so it can clear the more-data bit in it. */
1135 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1136 B43_SHM_SH_MCASTCOOKIE, cookie);
1137 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001138 /* Now transfer the whole frame. */
1139 wmb();
1140 ops->poke_tx(ring, next_slot(ring, slot));
1141 return 0;
1142
Michael Buesch280d0e12007-12-26 18:26:17 +01001143out_free_bounce:
Michael Buesche4d6b792007-09-18 15:39:42 -04001144 dev_kfree_skb_any(skb);
Michael Buesch280d0e12007-12-26 18:26:17 +01001145out_unmap_hdr:
Michael Buesche4d6b792007-09-18 15:39:42 -04001146 unmap_descbuffer(ring, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001147 hdrsize, 1);
Michael Buesche4d6b792007-09-18 15:39:42 -04001148 return err;
1149}
1150
1151static inline int should_inject_overflow(struct b43_dmaring *ring)
1152{
1153#ifdef CONFIG_B43_DEBUG
1154 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1155 /* Check if we should inject another ringbuffer overflow
1156 * to test handling of this situation in the stack. */
1157 unsigned long next_overflow;
1158
1159 next_overflow = ring->last_injected_overflow + HZ;
1160 if (time_after(jiffies, next_overflow)) {
1161 ring->last_injected_overflow = jiffies;
1162 b43dbg(ring->dev->wl,
1163 "Injecting TX ring overflow on "
1164 "DMA controller %d\n", ring->index);
1165 return 1;
1166 }
1167 }
1168#endif /* CONFIG_B43_DEBUG */
1169 return 0;
1170}
1171
Michael Buesche6f5b932008-03-05 21:18:49 +01001172/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1173static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1174 u8 queue_prio)
1175{
1176 struct b43_dmaring *ring;
1177
1178 if (b43_modparam_qos) {
1179 /* 0 = highest priority */
1180 switch (queue_prio) {
1181 default:
1182 B43_WARN_ON(1);
1183 /* fallthrough */
1184 case 0:
Michael Bueschb27faf82008-03-06 16:32:46 +01001185 ring = dev->dma.tx_ring_AC_VO;
Michael Buesche6f5b932008-03-05 21:18:49 +01001186 break;
1187 case 1:
Michael Bueschb27faf82008-03-06 16:32:46 +01001188 ring = dev->dma.tx_ring_AC_VI;
Michael Buesche6f5b932008-03-05 21:18:49 +01001189 break;
1190 case 2:
Michael Bueschb27faf82008-03-06 16:32:46 +01001191 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001192 break;
1193 case 3:
Michael Bueschb27faf82008-03-06 16:32:46 +01001194 ring = dev->dma.tx_ring_AC_BK;
Michael Buesche6f5b932008-03-05 21:18:49 +01001195 break;
1196 }
1197 } else
Michael Bueschb27faf82008-03-06 16:32:46 +01001198 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001199
1200 return ring;
1201}
1202
Michael Buesche4d6b792007-09-18 15:39:42 -04001203int b43_dma_tx(struct b43_wldev *dev,
1204 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1205{
1206 struct b43_dmaring *ring;
Michael Buesch280d0e12007-12-26 18:26:17 +01001207 struct ieee80211_hdr *hdr;
Michael Buesche4d6b792007-09-18 15:39:42 -04001208 int err = 0;
1209 unsigned long flags;
1210
Michael Buesch280d0e12007-12-26 18:26:17 +01001211 if (unlikely(skb->len < 2 + 2 + 6)) {
1212 /* Too short, this can't be a valid frame. */
1213 return -EINVAL;
1214 }
1215
1216 hdr = (struct ieee80211_hdr *)skb->data;
1217 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1218 /* The multicast ring will be sent after the DTIM */
Michael Bueschb27faf82008-03-06 16:32:46 +01001219 ring = dev->dma.tx_ring_mcast;
Michael Buesch280d0e12007-12-26 18:26:17 +01001220 /* Set the more-data bit. Ucode will clear it on
1221 * the last frame for us. */
1222 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1223 } else {
1224 /* Decide by priority where to put this frame. */
Michael Buesche6f5b932008-03-05 21:18:49 +01001225 ring = select_ring_by_priority(dev, ctl->queue);
Michael Buesch280d0e12007-12-26 18:26:17 +01001226 }
1227
Michael Buesche4d6b792007-09-18 15:39:42 -04001228 spin_lock_irqsave(&ring->lock, flags);
1229 B43_WARN_ON(!ring->tx);
1230 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1231 b43warn(dev->wl, "DMA queue overflow\n");
1232 err = -ENOSPC;
1233 goto out_unlock;
1234 }
1235 /* Check if the queue was stopped in mac80211,
1236 * but we got called nevertheless.
1237 * That would be a mac80211 bug. */
1238 B43_WARN_ON(ring->stopped);
1239
Michael Buesche6f5b932008-03-05 21:18:49 +01001240 /* Assign the queue number to the ring (if not already done before)
1241 * so TX status handling can use it. The queue to ring mapping is
1242 * static, so we don't need to store it per frame. */
1243 ring->queue_prio = ctl->queue;
1244
Michael Buesche4d6b792007-09-18 15:39:42 -04001245 err = dma_tx_fragment(ring, skb, ctl);
Michael Buesch09552cc2008-01-23 21:44:15 +01001246 if (unlikely(err == -ENOKEY)) {
1247 /* Drop this packet, as we don't have the encryption key
1248 * anymore and must not transmit it unencrypted. */
1249 dev_kfree_skb_any(skb);
1250 err = 0;
1251 goto out_unlock;
1252 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001253 if (unlikely(err)) {
1254 b43err(dev->wl, "DMA tx mapping failure\n");
1255 goto out_unlock;
1256 }
1257 ring->nr_tx_packets++;
1258 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1259 should_inject_overflow(ring)) {
1260 /* This TX ring is full. */
Michael Buesche6f5b932008-03-05 21:18:49 +01001261 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
Michael Buesche4d6b792007-09-18 15:39:42 -04001262 ring->stopped = 1;
1263 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1264 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1265 }
1266 }
Michael Buesch280d0e12007-12-26 18:26:17 +01001267out_unlock:
Michael Buesche4d6b792007-09-18 15:39:42 -04001268 spin_unlock_irqrestore(&ring->lock, flags);
1269
1270 return err;
1271}
1272
1273void b43_dma_handle_txstatus(struct b43_wldev *dev,
1274 const struct b43_txstatus *status)
1275{
1276 const struct b43_dma_ops *ops;
1277 struct b43_dmaring *ring;
1278 struct b43_dmadesc_generic *desc;
1279 struct b43_dmadesc_meta *meta;
1280 int slot;
1281
1282 ring = parse_cookie(dev, status->cookie, &slot);
1283 if (unlikely(!ring))
1284 return;
1285 B43_WARN_ON(!irqs_disabled());
1286 spin_lock(&ring->lock);
1287
1288 B43_WARN_ON(!ring->tx);
1289 ops = ring->ops;
1290 while (1) {
1291 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1292 desc = ops->idx2desc(ring, slot, &meta);
1293
1294 if (meta->skb)
1295 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1296 1);
1297 else
1298 unmap_descbuffer(ring, meta->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001299 b43_txhdr_size(dev), 1);
Michael Buesche4d6b792007-09-18 15:39:42 -04001300
1301 if (meta->is_last_fragment) {
1302 B43_WARN_ON(!meta->skb);
1303 /* Call back to inform the ieee80211 subsystem about the
1304 * status of the transmission.
1305 * Some fields of txstat are already filled in dma_tx().
1306 */
1307 if (status->acked) {
1308 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1309 } else {
1310 if (!(meta->txstat.control.flags
1311 & IEEE80211_TXCTL_NO_ACK))
1312 meta->txstat.excessive_retries = 1;
1313 }
1314 if (status->frame_count == 0) {
1315 /* The frame was not transmitted at all. */
1316 meta->txstat.retry_count = 0;
1317 } else
1318 meta->txstat.retry_count = status->frame_count - 1;
1319 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1320 &(meta->txstat));
1321 /* skb is freed by ieee80211_tx_status_irqsafe() */
1322 meta->skb = NULL;
1323 } else {
1324 /* No need to call free_descriptor_buffer here, as
1325 * this is only the txhdr, which is not allocated.
1326 */
1327 B43_WARN_ON(meta->skb);
1328 }
1329
1330 /* Everything unmapped and free'd. So it's not used anymore. */
1331 ring->used_slots--;
1332
1333 if (meta->is_last_fragment)
1334 break;
1335 slot = next_slot(ring, slot);
1336 }
1337 dev->stats.last_tx = jiffies;
1338 if (ring->stopped) {
1339 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
Michael Buesche6f5b932008-03-05 21:18:49 +01001340 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
Michael Buesche4d6b792007-09-18 15:39:42 -04001341 ring->stopped = 0;
1342 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1343 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1344 }
1345 }
1346
1347 spin_unlock(&ring->lock);
1348}
1349
1350void b43_dma_get_tx_stats(struct b43_wldev *dev,
1351 struct ieee80211_tx_queue_stats *stats)
1352{
1353 const int nr_queues = dev->wl->hw->queues;
1354 struct b43_dmaring *ring;
1355 struct ieee80211_tx_queue_stats_data *data;
1356 unsigned long flags;
1357 int i;
1358
1359 for (i = 0; i < nr_queues; i++) {
1360 data = &(stats->data[i]);
Michael Buesche6f5b932008-03-05 21:18:49 +01001361 ring = select_ring_by_priority(dev, i);
Michael Buesche4d6b792007-09-18 15:39:42 -04001362
1363 spin_lock_irqsave(&ring->lock, flags);
1364 data->len = ring->used_slots / SLOTS_PER_PACKET;
1365 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1366 data->count = ring->nr_tx_packets;
1367 spin_unlock_irqrestore(&ring->lock, flags);
1368 }
1369}
1370
1371static void dma_rx(struct b43_dmaring *ring, int *slot)
1372{
1373 const struct b43_dma_ops *ops = ring->ops;
1374 struct b43_dmadesc_generic *desc;
1375 struct b43_dmadesc_meta *meta;
1376 struct b43_rxhdr_fw4 *rxhdr;
1377 struct sk_buff *skb;
1378 u16 len;
1379 int err;
1380 dma_addr_t dmaaddr;
1381
1382 desc = ops->idx2desc(ring, *slot, &meta);
1383
1384 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1385 skb = meta->skb;
1386
Michael Buesche4d6b792007-09-18 15:39:42 -04001387 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1388 len = le16_to_cpu(rxhdr->frame_len);
1389 if (len == 0) {
1390 int i = 0;
1391
1392 do {
1393 udelay(2);
1394 barrier();
1395 len = le16_to_cpu(rxhdr->frame_len);
1396 } while (len == 0 && i++ < 5);
1397 if (unlikely(len == 0)) {
1398 /* recycle the descriptor buffer. */
1399 sync_descbuffer_for_device(ring, meta->dmaaddr,
1400 ring->rx_buffersize);
1401 goto drop;
1402 }
1403 }
1404 if (unlikely(len > ring->rx_buffersize)) {
1405 /* The data did not fit into one descriptor buffer
1406 * and is split over multiple buffers.
1407 * This should never happen, as we try to allocate buffers
1408 * big enough. So simply ignore this packet.
1409 */
1410 int cnt = 0;
1411 s32 tmp = len;
1412
1413 while (1) {
1414 desc = ops->idx2desc(ring, *slot, &meta);
1415 /* recycle the descriptor buffer. */
1416 sync_descbuffer_for_device(ring, meta->dmaaddr,
1417 ring->rx_buffersize);
1418 *slot = next_slot(ring, *slot);
1419 cnt++;
1420 tmp -= ring->rx_buffersize;
1421 if (tmp <= 0)
1422 break;
1423 }
1424 b43err(ring->dev->wl, "DMA RX buffer too small "
1425 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1426 len, ring->rx_buffersize, cnt);
1427 goto drop;
1428 }
1429
1430 dmaaddr = meta->dmaaddr;
1431 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1432 if (unlikely(err)) {
1433 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1434 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1435 goto drop;
1436 }
1437
1438 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1439 skb_put(skb, len + ring->frameoffset);
1440 skb_pull(skb, ring->frameoffset);
1441
1442 b43_rx(ring->dev, skb, rxhdr);
Michael Bueschb27faf82008-03-06 16:32:46 +01001443drop:
Michael Buesche4d6b792007-09-18 15:39:42 -04001444 return;
1445}
1446
1447void b43_dma_rx(struct b43_dmaring *ring)
1448{
1449 const struct b43_dma_ops *ops = ring->ops;
1450 int slot, current_slot;
1451 int used_slots = 0;
1452
1453 B43_WARN_ON(ring->tx);
1454 current_slot = ops->get_current_rxslot(ring);
1455 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1456
1457 slot = ring->current_slot;
1458 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1459 dma_rx(ring, &slot);
1460 update_max_used_slots(ring, ++used_slots);
1461 }
1462 ops->set_current_rxslot(ring, slot);
1463 ring->current_slot = slot;
1464}
1465
1466static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1467{
1468 unsigned long flags;
1469
1470 spin_lock_irqsave(&ring->lock, flags);
1471 B43_WARN_ON(!ring->tx);
1472 ring->ops->tx_suspend(ring);
1473 spin_unlock_irqrestore(&ring->lock, flags);
1474}
1475
1476static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1477{
1478 unsigned long flags;
1479
1480 spin_lock_irqsave(&ring->lock, flags);
1481 B43_WARN_ON(!ring->tx);
1482 ring->ops->tx_resume(ring);
1483 spin_unlock_irqrestore(&ring->lock, flags);
1484}
1485
1486void b43_dma_tx_suspend(struct b43_wldev *dev)
1487{
1488 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
Michael Bueschb27faf82008-03-06 16:32:46 +01001489 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1490 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1491 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1492 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1493 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -04001494}
1495
1496void b43_dma_tx_resume(struct b43_wldev *dev)
1497{
Michael Bueschb27faf82008-03-06 16:32:46 +01001498 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1499 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1500 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1501 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1502 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
Michael Buesche4d6b792007-09-18 15:39:42 -04001503 b43_power_saving_ctl_bits(dev, 0);
1504}