blob: 38246046a42fb523916c225a2ac86a2b9d3bbc2d [file] [log] [blame]
Michael Buesche4d6b792007-09-18 15:39:42 -04001/*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
Michael Buesch280d0e12007-12-26 18:26:17 +010040#include <linux/etherdevice.h>
Michael Buesch57df40d2008-03-07 15:50:02 +010041#include <asm/div64.h>
Michael Buesch280d0e12007-12-26 18:26:17 +010042
Michael Buesche4d6b792007-09-18 15:39:42 -040043
44/* 32bit DMA ops. */
45static
46struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
47 int slot,
48 struct b43_dmadesc_meta **meta)
49{
50 struct b43_dmadesc32 *desc;
51
52 *meta = &(ring->meta[slot]);
53 desc = ring->descbase;
54 desc = &(desc[slot]);
55
56 return (struct b43_dmadesc_generic *)desc;
57}
58
59static void op32_fill_descriptor(struct b43_dmaring *ring,
60 struct b43_dmadesc_generic *desc,
61 dma_addr_t dmaaddr, u16 bufsize,
62 int start, int end, int irq)
63{
64 struct b43_dmadesc32 *descbase = ring->descbase;
65 int slot;
66 u32 ctl;
67 u32 addr;
68 u32 addrext;
69
70 slot = (int)(&(desc->dma32) - descbase);
71 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
72
73 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
74 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
75 >> SSB_DMA_TRANSLATION_SHIFT;
76 addr |= ssb_dma_translation(ring->dev->dev);
Michael Buesch8eccb532009-02-19 23:39:26 +010077 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
Michael Buesche4d6b792007-09-18 15:39:42 -040078 if (slot == ring->nr_slots - 1)
79 ctl |= B43_DMA32_DCTL_DTABLEEND;
80 if (start)
81 ctl |= B43_DMA32_DCTL_FRAMESTART;
82 if (end)
83 ctl |= B43_DMA32_DCTL_FRAMEEND;
84 if (irq)
85 ctl |= B43_DMA32_DCTL_IRQ;
86 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
87 & B43_DMA32_DCTL_ADDREXT_MASK;
88
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
91}
92
93static void op32_poke_tx(struct b43_dmaring *ring, int slot)
94{
95 b43_dma_write(ring, B43_DMA32_TXINDEX,
96 (u32) (slot * sizeof(struct b43_dmadesc32)));
97}
98
99static void op32_tx_suspend(struct b43_dmaring *ring)
100{
101 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
102 | B43_DMA32_TXSUSPEND);
103}
104
105static void op32_tx_resume(struct b43_dmaring *ring)
106{
107 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108 & ~B43_DMA32_TXSUSPEND);
109}
110
111static int op32_get_current_rxslot(struct b43_dmaring *ring)
112{
113 u32 val;
114
115 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
116 val &= B43_DMA32_RXDPTR;
117
118 return (val / sizeof(struct b43_dmadesc32));
119}
120
121static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
122{
123 b43_dma_write(ring, B43_DMA32_RXINDEX,
124 (u32) (slot * sizeof(struct b43_dmadesc32)));
125}
126
127static const struct b43_dma_ops dma32_ops = {
128 .idx2desc = op32_idx2desc,
129 .fill_descriptor = op32_fill_descriptor,
130 .poke_tx = op32_poke_tx,
131 .tx_suspend = op32_tx_suspend,
132 .tx_resume = op32_tx_resume,
133 .get_current_rxslot = op32_get_current_rxslot,
134 .set_current_rxslot = op32_set_current_rxslot,
135};
136
137/* 64bit DMA ops. */
138static
139struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
140 int slot,
141 struct b43_dmadesc_meta **meta)
142{
143 struct b43_dmadesc64 *desc;
144
145 *meta = &(ring->meta[slot]);
146 desc = ring->descbase;
147 desc = &(desc[slot]);
148
149 return (struct b43_dmadesc_generic *)desc;
150}
151
152static void op64_fill_descriptor(struct b43_dmaring *ring,
153 struct b43_dmadesc_generic *desc,
154 dma_addr_t dmaaddr, u16 bufsize,
155 int start, int end, int irq)
156{
157 struct b43_dmadesc64 *descbase = ring->descbase;
158 int slot;
159 u32 ctl0 = 0, ctl1 = 0;
160 u32 addrlo, addrhi;
161 u32 addrext;
162
163 slot = (int)(&(desc->dma64) - descbase);
164 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
165
166 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
167 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
168 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
169 >> SSB_DMA_TRANSLATION_SHIFT;
Larry Finger013978b2007-11-26 10:29:47 -0600170 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
Michael Buesche4d6b792007-09-18 15:39:42 -0400171 if (slot == ring->nr_slots - 1)
172 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
173 if (start)
174 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
175 if (end)
176 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
177 if (irq)
178 ctl0 |= B43_DMA64_DCTL0_IRQ;
Michael Buesch8eccb532009-02-19 23:39:26 +0100179 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
Michael Buesche4d6b792007-09-18 15:39:42 -0400180 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
181 & B43_DMA64_DCTL1_ADDREXT_MASK;
182
183 desc->dma64.control0 = cpu_to_le32(ctl0);
184 desc->dma64.control1 = cpu_to_le32(ctl1);
185 desc->dma64.address_low = cpu_to_le32(addrlo);
186 desc->dma64.address_high = cpu_to_le32(addrhi);
187}
188
189static void op64_poke_tx(struct b43_dmaring *ring, int slot)
190{
191 b43_dma_write(ring, B43_DMA64_TXINDEX,
192 (u32) (slot * sizeof(struct b43_dmadesc64)));
193}
194
195static void op64_tx_suspend(struct b43_dmaring *ring)
196{
197 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
198 | B43_DMA64_TXSUSPEND);
199}
200
201static void op64_tx_resume(struct b43_dmaring *ring)
202{
203 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
204 & ~B43_DMA64_TXSUSPEND);
205}
206
207static int op64_get_current_rxslot(struct b43_dmaring *ring)
208{
209 u32 val;
210
211 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
212 val &= B43_DMA64_RXSTATDPTR;
213
214 return (val / sizeof(struct b43_dmadesc64));
215}
216
217static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
218{
219 b43_dma_write(ring, B43_DMA64_RXINDEX,
220 (u32) (slot * sizeof(struct b43_dmadesc64)));
221}
222
223static const struct b43_dma_ops dma64_ops = {
224 .idx2desc = op64_idx2desc,
225 .fill_descriptor = op64_fill_descriptor,
226 .poke_tx = op64_poke_tx,
227 .tx_suspend = op64_tx_suspend,
228 .tx_resume = op64_tx_resume,
229 .get_current_rxslot = op64_get_current_rxslot,
230 .set_current_rxslot = op64_set_current_rxslot,
231};
232
233static inline int free_slots(struct b43_dmaring *ring)
234{
235 return (ring->nr_slots - ring->used_slots);
236}
237
238static inline int next_slot(struct b43_dmaring *ring, int slot)
239{
240 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
241 if (slot == ring->nr_slots - 1)
242 return 0;
243 return slot + 1;
244}
245
246static inline int prev_slot(struct b43_dmaring *ring, int slot)
247{
248 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
249 if (slot == 0)
250 return ring->nr_slots - 1;
251 return slot - 1;
252}
253
254#ifdef CONFIG_B43_DEBUG
255static void update_max_used_slots(struct b43_dmaring *ring,
256 int current_used_slots)
257{
258 if (current_used_slots <= ring->max_used_slots)
259 return;
260 ring->max_used_slots = current_used_slots;
261 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
262 b43dbg(ring->dev->wl,
263 "max_used_slots increased to %d on %s ring %d\n",
264 ring->max_used_slots,
265 ring->tx ? "TX" : "RX", ring->index);
266 }
267}
268#else
269static inline
270 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
271{
272}
273#endif /* DEBUG */
274
275/* Request a slot for usage. */
276static inline int request_slot(struct b43_dmaring *ring)
277{
278 int slot;
279
280 B43_WARN_ON(!ring->tx);
281 B43_WARN_ON(ring->stopped);
282 B43_WARN_ON(free_slots(ring) == 0);
283
284 slot = next_slot(ring, ring->current_slot);
285 ring->current_slot = slot;
286 ring->used_slots++;
287
288 update_max_used_slots(ring, ring->used_slots);
289
290 return slot;
291}
292
Michael Bueschb79caa62008-02-05 12:50:41 +0100293static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
Michael Buesche4d6b792007-09-18 15:39:42 -0400294{
295 static const u16 map64[] = {
296 B43_MMIO_DMA64_BASE0,
297 B43_MMIO_DMA64_BASE1,
298 B43_MMIO_DMA64_BASE2,
299 B43_MMIO_DMA64_BASE3,
300 B43_MMIO_DMA64_BASE4,
301 B43_MMIO_DMA64_BASE5,
302 };
303 static const u16 map32[] = {
304 B43_MMIO_DMA32_BASE0,
305 B43_MMIO_DMA32_BASE1,
306 B43_MMIO_DMA32_BASE2,
307 B43_MMIO_DMA32_BASE3,
308 B43_MMIO_DMA32_BASE4,
309 B43_MMIO_DMA32_BASE5,
310 };
311
Michael Bueschb79caa62008-02-05 12:50:41 +0100312 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400313 B43_WARN_ON(!(controller_idx >= 0 &&
314 controller_idx < ARRAY_SIZE(map64)));
315 return map64[controller_idx];
316 }
317 B43_WARN_ON(!(controller_idx >= 0 &&
318 controller_idx < ARRAY_SIZE(map32)));
319 return map32[controller_idx];
320}
321
322static inline
323 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
324 unsigned char *buf, size_t len, int tx)
325{
326 dma_addr_t dmaaddr;
327
328 if (tx) {
Michael Bueschf2257632008-06-20 11:50:29 +0200329 dmaaddr = ssb_dma_map_single(ring->dev->dev,
330 buf, len, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400331 } else {
Michael Bueschf2257632008-06-20 11:50:29 +0200332 dmaaddr = ssb_dma_map_single(ring->dev->dev,
333 buf, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400334 }
335
336 return dmaaddr;
337}
338
339static inline
340 void unmap_descbuffer(struct b43_dmaring *ring,
341 dma_addr_t addr, size_t len, int tx)
342{
343 if (tx) {
Michael Bueschf2257632008-06-20 11:50:29 +0200344 ssb_dma_unmap_single(ring->dev->dev,
345 addr, len, DMA_TO_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400346 } else {
Michael Bueschf2257632008-06-20 11:50:29 +0200347 ssb_dma_unmap_single(ring->dev->dev,
348 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400349 }
350}
351
352static inline
353 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
354 dma_addr_t addr, size_t len)
355{
356 B43_WARN_ON(ring->tx);
Michael Bueschf2257632008-06-20 11:50:29 +0200357 ssb_dma_sync_single_for_cpu(ring->dev->dev,
358 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400359}
360
361static inline
362 void sync_descbuffer_for_device(struct b43_dmaring *ring,
363 dma_addr_t addr, size_t len)
364{
365 B43_WARN_ON(ring->tx);
Michael Bueschf2257632008-06-20 11:50:29 +0200366 ssb_dma_sync_single_for_device(ring->dev->dev,
367 addr, len, DMA_FROM_DEVICE);
Michael Buesche4d6b792007-09-18 15:39:42 -0400368}
369
370static inline
371 void free_descriptor_buffer(struct b43_dmaring *ring,
372 struct b43_dmadesc_meta *meta)
373{
374 if (meta->skb) {
375 dev_kfree_skb_any(meta->skb);
376 meta->skb = NULL;
377 }
378}
379
380static int alloc_ringmemory(struct b43_dmaring *ring)
381{
Larry Finger013978b2007-11-26 10:29:47 -0600382 gfp_t flags = GFP_KERNEL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400383
Larry Finger013978b2007-11-26 10:29:47 -0600384 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
385 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
386 * has shown that 4K is sufficient for the latter as long as the buffer
387 * does not cross an 8K boundary.
388 *
389 * For unknown reasons - possibly a hardware error - the BCM4311 rev
390 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
391 * which accounts for the GFP_DMA flag below.
Michael Bueschf2257632008-06-20 11:50:29 +0200392 *
393 * The flags here must match the flags in free_ringmemory below!
Larry Finger013978b2007-11-26 10:29:47 -0600394 */
Michael Bueschb79caa62008-02-05 12:50:41 +0100395 if (ring->type == B43_DMA_64BIT)
Larry Finger013978b2007-11-26 10:29:47 -0600396 flags |= GFP_DMA;
Michael Bueschf2257632008-06-20 11:50:29 +0200397 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
398 B43_DMA_RINGMEMSIZE,
399 &(ring->dmabase), flags);
Michael Buesche4d6b792007-09-18 15:39:42 -0400400 if (!ring->descbase) {
401 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
402 return -ENOMEM;
403 }
404 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
405
406 return 0;
407}
408
409static void free_ringmemory(struct b43_dmaring *ring)
410{
Michael Bueschf2257632008-06-20 11:50:29 +0200411 gfp_t flags = GFP_KERNEL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400412
Michael Bueschf2257632008-06-20 11:50:29 +0200413 if (ring->type == B43_DMA_64BIT)
414 flags |= GFP_DMA;
415
416 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
417 ring->descbase, ring->dmabase, flags);
Michael Buesche4d6b792007-09-18 15:39:42 -0400418}
419
420/* Reset the RX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100421static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
422 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400423{
424 int i;
425 u32 value;
426 u16 offset;
427
428 might_sleep();
429
Michael Bueschb79caa62008-02-05 12:50:41 +0100430 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400431 b43_write32(dev, mmio_base + offset, 0);
432 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100433 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
434 B43_DMA32_RXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400435 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100436 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400437 value &= B43_DMA64_RXSTAT;
438 if (value == B43_DMA64_RXSTAT_DISABLED) {
439 i = -1;
440 break;
441 }
442 } else {
443 value &= B43_DMA32_RXSTATE;
444 if (value == B43_DMA32_RXSTAT_DISABLED) {
445 i = -1;
446 break;
447 }
448 }
449 msleep(1);
450 }
451 if (i != -1) {
452 b43err(dev->wl, "DMA RX reset timed out\n");
453 return -ENODEV;
454 }
455
456 return 0;
457}
458
Larry Finger013978b2007-11-26 10:29:47 -0600459/* Reset the TX DMA channel */
Michael Bueschb79caa62008-02-05 12:50:41 +0100460static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
461 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400462{
463 int i;
464 u32 value;
465 u16 offset;
466
467 might_sleep();
468
469 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100470 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
471 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400472 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100473 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400474 value &= B43_DMA64_TXSTAT;
475 if (value == B43_DMA64_TXSTAT_DISABLED ||
476 value == B43_DMA64_TXSTAT_IDLEWAIT ||
477 value == B43_DMA64_TXSTAT_STOPPED)
478 break;
479 } else {
480 value &= B43_DMA32_TXSTATE;
481 if (value == B43_DMA32_TXSTAT_DISABLED ||
482 value == B43_DMA32_TXSTAT_IDLEWAIT ||
483 value == B43_DMA32_TXSTAT_STOPPED)
484 break;
485 }
486 msleep(1);
487 }
Michael Bueschb79caa62008-02-05 12:50:41 +0100488 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
Michael Buesche4d6b792007-09-18 15:39:42 -0400489 b43_write32(dev, mmio_base + offset, 0);
490 for (i = 0; i < 10; i++) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100491 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
492 B43_DMA32_TXSTATUS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400493 value = b43_read32(dev, mmio_base + offset);
Michael Bueschb79caa62008-02-05 12:50:41 +0100494 if (type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400495 value &= B43_DMA64_TXSTAT;
496 if (value == B43_DMA64_TXSTAT_DISABLED) {
497 i = -1;
498 break;
499 }
500 } else {
501 value &= B43_DMA32_TXSTATE;
502 if (value == B43_DMA32_TXSTAT_DISABLED) {
503 i = -1;
504 break;
505 }
506 }
507 msleep(1);
508 }
509 if (i != -1) {
510 b43err(dev->wl, "DMA TX reset timed out\n");
511 return -ENODEV;
512 }
513 /* ensure the reset is completed. */
514 msleep(1);
515
516 return 0;
517}
518
Michael Bueschb79caa62008-02-05 12:50:41 +0100519/* Check if a DMA mapping address is invalid. */
520static bool b43_dma_mapping_error(struct b43_dmaring *ring,
521 dma_addr_t addr,
Michael Bueschffa92562008-03-22 22:04:45 +0100522 size_t buffersize, bool dma_to_device)
Michael Bueschb79caa62008-02-05 12:50:41 +0100523{
Michael Bueschf2257632008-06-20 11:50:29 +0200524 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
Michael Bueschb79caa62008-02-05 12:50:41 +0100525 return 1;
526
527 switch (ring->type) {
528 case B43_DMA_30BIT:
529 if ((u64)addr + buffersize > (1ULL << 30))
Michael Bueschffa92562008-03-22 22:04:45 +0100530 goto address_error;
Michael Bueschb79caa62008-02-05 12:50:41 +0100531 break;
532 case B43_DMA_32BIT:
533 if ((u64)addr + buffersize > (1ULL << 32))
Michael Bueschffa92562008-03-22 22:04:45 +0100534 goto address_error;
Michael Bueschb79caa62008-02-05 12:50:41 +0100535 break;
536 case B43_DMA_64BIT:
537 /* Currently we can't have addresses beyond
538 * 64bit in the kernel. */
539 break;
540 }
541
542 /* The address is OK. */
543 return 0;
Michael Bueschffa92562008-03-22 22:04:45 +0100544
545address_error:
546 /* We can't support this address. Unmap it again. */
547 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
548
549 return 1;
Michael Bueschb79caa62008-02-05 12:50:41 +0100550}
551
Michael Buesche4d6b792007-09-18 15:39:42 -0400552static int setup_rx_descbuffer(struct b43_dmaring *ring,
553 struct b43_dmadesc_generic *desc,
554 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
555{
556 struct b43_rxhdr_fw4 *rxhdr;
Michael Buesche4d6b792007-09-18 15:39:42 -0400557 dma_addr_t dmaaddr;
558 struct sk_buff *skb;
559
560 B43_WARN_ON(ring->tx);
561
562 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
563 if (unlikely(!skb))
564 return -ENOMEM;
565 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
Michael Bueschffa92562008-03-22 22:04:45 +0100566 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400567 /* ugh. try to realloc in zone_dma */
568 gfp_flags |= GFP_DMA;
569
570 dev_kfree_skb_any(skb);
571
572 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
573 if (unlikely(!skb))
574 return -ENOMEM;
575 dmaaddr = map_descbuffer(ring, skb->data,
576 ring->rx_buffersize, 0);
577 }
578
Michael Bueschffa92562008-03-22 22:04:45 +0100579 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
Michael Buesch539e6f82008-03-28 11:46:58 +0100580 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
Michael Buesche4d6b792007-09-18 15:39:42 -0400581 dev_kfree_skb_any(skb);
582 return -EIO;
583 }
584
585 meta->skb = skb;
586 meta->dmaaddr = dmaaddr;
587 ring->ops->fill_descriptor(ring, desc, dmaaddr,
588 ring->rx_buffersize, 0, 0, 0);
589
590 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
591 rxhdr->frame_len = 0;
Michael Buesche4d6b792007-09-18 15:39:42 -0400592
593 return 0;
594}
595
596/* Allocate the initial descbuffers.
597 * This is used for an RX ring only.
598 */
599static int alloc_initial_descbuffers(struct b43_dmaring *ring)
600{
601 int i, err = -ENOMEM;
602 struct b43_dmadesc_generic *desc;
603 struct b43_dmadesc_meta *meta;
604
605 for (i = 0; i < ring->nr_slots; i++) {
606 desc = ring->ops->idx2desc(ring, i, &meta);
607
608 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
609 if (err) {
610 b43err(ring->dev->wl,
611 "Failed to allocate initial descbuffers\n");
612 goto err_unwind;
613 }
614 }
615 mb();
616 ring->used_slots = ring->nr_slots;
617 err = 0;
618 out:
619 return err;
620
621 err_unwind:
622 for (i--; i >= 0; i--) {
623 desc = ring->ops->idx2desc(ring, i, &meta);
624
625 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
626 dev_kfree_skb(meta->skb);
627 }
628 goto out;
629}
630
631/* Do initial setup of the DMA controller.
632 * Reset the controller, write the ring busaddress
633 * and switch the "enable" bit on.
634 */
635static int dmacontroller_setup(struct b43_dmaring *ring)
636{
637 int err = 0;
638 u32 value;
639 u32 addrext;
640 u32 trans = ssb_dma_translation(ring->dev->dev);
641
642 if (ring->tx) {
Michael Bueschb79caa62008-02-05 12:50:41 +0100643 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400644 u64 ringbase = (u64) (ring->dmabase);
645
646 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
647 >> SSB_DMA_TRANSLATION_SHIFT;
648 value = B43_DMA64_TXENABLE;
649 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
650 & B43_DMA64_TXADDREXT_MASK;
651 b43_dma_write(ring, B43_DMA64_TXCTL, value);
652 b43_dma_write(ring, B43_DMA64_TXRINGLO,
653 (ringbase & 0xFFFFFFFF));
654 b43_dma_write(ring, B43_DMA64_TXRINGHI,
655 ((ringbase >> 32) &
656 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600657 | (trans << 1));
Michael Buesche4d6b792007-09-18 15:39:42 -0400658 } else {
659 u32 ringbase = (u32) (ring->dmabase);
660
661 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
662 >> SSB_DMA_TRANSLATION_SHIFT;
663 value = B43_DMA32_TXENABLE;
664 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
665 & B43_DMA32_TXADDREXT_MASK;
666 b43_dma_write(ring, B43_DMA32_TXCTL, value);
667 b43_dma_write(ring, B43_DMA32_TXRING,
668 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
669 | trans);
670 }
671 } else {
672 err = alloc_initial_descbuffers(ring);
673 if (err)
674 goto out;
Michael Bueschb79caa62008-02-05 12:50:41 +0100675 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400676 u64 ringbase = (u64) (ring->dmabase);
677
678 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
679 >> SSB_DMA_TRANSLATION_SHIFT;
680 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
681 value |= B43_DMA64_RXENABLE;
682 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
683 & B43_DMA64_RXADDREXT_MASK;
684 b43_dma_write(ring, B43_DMA64_RXCTL, value);
685 b43_dma_write(ring, B43_DMA64_RXRINGLO,
686 (ringbase & 0xFFFFFFFF));
687 b43_dma_write(ring, B43_DMA64_RXRINGHI,
688 ((ringbase >> 32) &
689 ~SSB_DMA_TRANSLATION_MASK)
Larry Finger013978b2007-11-26 10:29:47 -0600690 | (trans << 1));
691 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
692 sizeof(struct b43_dmadesc64));
Michael Buesche4d6b792007-09-18 15:39:42 -0400693 } else {
694 u32 ringbase = (u32) (ring->dmabase);
695
696 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
697 >> SSB_DMA_TRANSLATION_SHIFT;
698 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
699 value |= B43_DMA32_RXENABLE;
700 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
701 & B43_DMA32_RXADDREXT_MASK;
702 b43_dma_write(ring, B43_DMA32_RXCTL, value);
703 b43_dma_write(ring, B43_DMA32_RXRING,
704 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
705 | trans);
Larry Finger013978b2007-11-26 10:29:47 -0600706 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
707 sizeof(struct b43_dmadesc32));
Michael Buesche4d6b792007-09-18 15:39:42 -0400708 }
709 }
710
Larry Finger013978b2007-11-26 10:29:47 -0600711out:
Michael Buesche4d6b792007-09-18 15:39:42 -0400712 return err;
713}
714
715/* Shutdown the DMA controller. */
716static void dmacontroller_cleanup(struct b43_dmaring *ring)
717{
718 if (ring->tx) {
719 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100720 ring->type);
721 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400722 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
723 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
724 } else
725 b43_dma_write(ring, B43_DMA32_TXRING, 0);
726 } else {
727 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
Michael Bueschb79caa62008-02-05 12:50:41 +0100728 ring->type);
729 if (ring->type == B43_DMA_64BIT) {
Michael Buesche4d6b792007-09-18 15:39:42 -0400730 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
731 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
732 } else
733 b43_dma_write(ring, B43_DMA32_RXRING, 0);
734 }
735}
736
737static void free_all_descbuffers(struct b43_dmaring *ring)
738{
739 struct b43_dmadesc_generic *desc;
740 struct b43_dmadesc_meta *meta;
741 int i;
742
743 if (!ring->used_slots)
744 return;
745 for (i = 0; i < ring->nr_slots; i++) {
746 desc = ring->ops->idx2desc(ring, i, &meta);
747
748 if (!meta->skb) {
749 B43_WARN_ON(!ring->tx);
750 continue;
751 }
752 if (ring->tx) {
753 unmap_descbuffer(ring, meta->dmaaddr,
754 meta->skb->len, 1);
755 } else {
756 unmap_descbuffer(ring, meta->dmaaddr,
757 ring->rx_buffersize, 0);
758 }
759 free_descriptor_buffer(ring, meta);
760 }
761}
762
763static u64 supported_dma_mask(struct b43_wldev *dev)
764{
765 u32 tmp;
766 u16 mmio_base;
767
768 tmp = b43_read32(dev, SSB_TMSHIGH);
769 if (tmp & SSB_TMSHIGH_DMA64)
770 return DMA_64BIT_MASK;
771 mmio_base = b43_dmacontroller_base(0, 0);
772 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
773 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
774 if (tmp & B43_DMA32_TXADDREXT_MASK)
775 return DMA_32BIT_MASK;
776
777 return DMA_30BIT_MASK;
778}
779
Michael Buesch5100d5a2008-03-29 21:01:16 +0100780static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
781{
782 if (dmamask == DMA_30BIT_MASK)
783 return B43_DMA_30BIT;
784 if (dmamask == DMA_32BIT_MASK)
785 return B43_DMA_32BIT;
786 if (dmamask == DMA_64BIT_MASK)
787 return B43_DMA_64BIT;
788 B43_WARN_ON(1);
789 return B43_DMA_30BIT;
790}
791
Michael Buesche4d6b792007-09-18 15:39:42 -0400792/* Main initialization function. */
793static
794struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
795 int controller_index,
Michael Bueschb79caa62008-02-05 12:50:41 +0100796 int for_tx,
797 enum b43_dmatype type)
Michael Buesche4d6b792007-09-18 15:39:42 -0400798{
799 struct b43_dmaring *ring;
800 int err;
Michael Buesche4d6b792007-09-18 15:39:42 -0400801 dma_addr_t dma_test;
802
803 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
804 if (!ring)
805 goto out;
806
Michael Buesch028118a2008-06-12 11:58:56 +0200807 ring->nr_slots = B43_RXRING_SLOTS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400808 if (for_tx)
Michael Buesch028118a2008-06-12 11:58:56 +0200809 ring->nr_slots = B43_TXRING_SLOTS;
Michael Buesche4d6b792007-09-18 15:39:42 -0400810
Michael Buesch028118a2008-06-12 11:58:56 +0200811 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
Michael Buesche4d6b792007-09-18 15:39:42 -0400812 GFP_KERNEL);
813 if (!ring->meta)
814 goto err_kfree_ring;
Michael Buesche4d6b792007-09-18 15:39:42 -0400815
Michael Buesch028118a2008-06-12 11:58:56 +0200816 ring->type = type;
Michael Buesche4d6b792007-09-18 15:39:42 -0400817 ring->dev = dev;
Michael Bueschb79caa62008-02-05 12:50:41 +0100818 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
Michael Buesche4d6b792007-09-18 15:39:42 -0400819 ring->index = controller_index;
Michael Bueschb79caa62008-02-05 12:50:41 +0100820 if (type == B43_DMA_64BIT)
Michael Buesche4d6b792007-09-18 15:39:42 -0400821 ring->ops = &dma64_ops;
822 else
823 ring->ops = &dma32_ops;
824 if (for_tx) {
825 ring->tx = 1;
826 ring->current_slot = -1;
827 } else {
828 if (ring->index == 0) {
829 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
830 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
Michael Buesche4d6b792007-09-18 15:39:42 -0400831 } else
832 B43_WARN_ON(1);
833 }
834 spin_lock_init(&ring->lock);
835#ifdef CONFIG_B43_DEBUG
836 ring->last_injected_overflow = jiffies;
837#endif
838
Michael Buesch028118a2008-06-12 11:58:56 +0200839 if (for_tx) {
840 ring->txhdr_cache = kcalloc(ring->nr_slots,
841 b43_txhdr_size(dev),
842 GFP_KERNEL);
843 if (!ring->txhdr_cache)
844 goto err_kfree_meta;
845
846 /* test for ability to dma to txhdr_cache */
Michael Bueschf2257632008-06-20 11:50:29 +0200847 dma_test = ssb_dma_map_single(dev->dev,
848 ring->txhdr_cache,
849 b43_txhdr_size(dev),
850 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200851
852 if (b43_dma_mapping_error(ring, dma_test,
853 b43_txhdr_size(dev), 1)) {
854 /* ugh realloc */
855 kfree(ring->txhdr_cache);
856 ring->txhdr_cache = kcalloc(ring->nr_slots,
857 b43_txhdr_size(dev),
858 GFP_KERNEL | GFP_DMA);
859 if (!ring->txhdr_cache)
860 goto err_kfree_meta;
861
Michael Bueschf2257632008-06-20 11:50:29 +0200862 dma_test = ssb_dma_map_single(dev->dev,
863 ring->txhdr_cache,
864 b43_txhdr_size(dev),
865 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200866
867 if (b43_dma_mapping_error(ring, dma_test,
868 b43_txhdr_size(dev), 1)) {
869
870 b43err(dev->wl,
871 "TXHDR DMA allocation failed\n");
872 goto err_kfree_txhdr_cache;
873 }
874 }
875
Michael Bueschf2257632008-06-20 11:50:29 +0200876 ssb_dma_unmap_single(dev->dev,
877 dma_test, b43_txhdr_size(dev),
878 DMA_TO_DEVICE);
Michael Buesch028118a2008-06-12 11:58:56 +0200879 }
880
Michael Buesche4d6b792007-09-18 15:39:42 -0400881 err = alloc_ringmemory(ring);
882 if (err)
883 goto err_kfree_txhdr_cache;
884 err = dmacontroller_setup(ring);
885 if (err)
886 goto err_free_ringmemory;
887
888 out:
889 return ring;
890
891 err_free_ringmemory:
892 free_ringmemory(ring);
893 err_kfree_txhdr_cache:
894 kfree(ring->txhdr_cache);
895 err_kfree_meta:
896 kfree(ring->meta);
897 err_kfree_ring:
898 kfree(ring);
899 ring = NULL;
900 goto out;
901}
902
Michael Buesch57df40d2008-03-07 15:50:02 +0100903#define divide(a, b) ({ \
904 typeof(a) __a = a; \
905 do_div(__a, b); \
906 __a; \
907 })
908
909#define modulo(a, b) ({ \
910 typeof(a) __a = a; \
911 do_div(__a, b); \
912 })
913
Michael Buesche4d6b792007-09-18 15:39:42 -0400914/* Main cleanup function. */
Michael Bueschb27faf82008-03-06 16:32:46 +0100915static void b43_destroy_dmaring(struct b43_dmaring *ring,
916 const char *ringname)
Michael Buesche4d6b792007-09-18 15:39:42 -0400917{
918 if (!ring)
919 return;
920
Michael Buesch57df40d2008-03-07 15:50:02 +0100921#ifdef CONFIG_B43_DEBUG
922 {
923 /* Print some statistics. */
924 u64 failed_packets = ring->nr_failed_tx_packets;
925 u64 succeed_packets = ring->nr_succeed_tx_packets;
926 u64 nr_packets = failed_packets + succeed_packets;
927 u64 permille_failed = 0, average_tries = 0;
928
929 if (nr_packets)
930 permille_failed = divide(failed_packets * 1000, nr_packets);
931 if (nr_packets)
932 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
933
934 b43dbg(ring->dev->wl, "DMA-%u %s: "
935 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
936 "Average tries %llu.%02llu\n",
937 (unsigned int)(ring->type), ringname,
938 ring->max_used_slots,
939 ring->nr_slots,
940 (unsigned long long)failed_packets,
Michael Buesch87d96112008-03-07 19:52:24 +0100941 (unsigned long long)nr_packets,
Michael Buesch57df40d2008-03-07 15:50:02 +0100942 (unsigned long long)divide(permille_failed, 10),
943 (unsigned long long)modulo(permille_failed, 10),
944 (unsigned long long)divide(average_tries, 100),
945 (unsigned long long)modulo(average_tries, 100));
946 }
947#endif /* DEBUG */
948
Michael Buesche4d6b792007-09-18 15:39:42 -0400949 /* Device IRQs are disabled prior entering this function,
950 * so no need to take care of concurrency with rx handler stuff.
951 */
952 dmacontroller_cleanup(ring);
953 free_all_descbuffers(ring);
954 free_ringmemory(ring);
955
956 kfree(ring->txhdr_cache);
957 kfree(ring->meta);
958 kfree(ring);
959}
960
Michael Bueschb27faf82008-03-06 16:32:46 +0100961#define destroy_ring(dma, ring) do { \
962 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
963 (dma)->ring = NULL; \
964 } while (0)
965
Michael Buesche4d6b792007-09-18 15:39:42 -0400966void b43_dma_free(struct b43_wldev *dev)
967{
Michael Buesch5100d5a2008-03-29 21:01:16 +0100968 struct b43_dma *dma;
969
970 if (b43_using_pio_transfers(dev))
971 return;
972 dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -0400973
Michael Bueschb27faf82008-03-06 16:32:46 +0100974 destroy_ring(dma, rx_ring);
975 destroy_ring(dma, tx_ring_AC_BK);
976 destroy_ring(dma, tx_ring_AC_BE);
977 destroy_ring(dma, tx_ring_AC_VI);
978 destroy_ring(dma, tx_ring_AC_VO);
979 destroy_ring(dma, tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -0400980}
981
Michael Buesch1033b3e2008-04-23 19:13:01 +0200982static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
983{
984 u64 orig_mask = mask;
985 bool fallback = 0;
986 int err;
987
988 /* Try to set the DMA mask. If it fails, try falling back to a
989 * lower mask, as we can always also support a lower one. */
990 while (1) {
991 err = ssb_dma_set_mask(dev->dev, mask);
992 if (!err)
993 break;
994 if (mask == DMA_64BIT_MASK) {
995 mask = DMA_32BIT_MASK;
996 fallback = 1;
997 continue;
998 }
999 if (mask == DMA_32BIT_MASK) {
1000 mask = DMA_30BIT_MASK;
1001 fallback = 1;
1002 continue;
1003 }
1004 b43err(dev->wl, "The machine/kernel does not support "
1005 "the required %u-bit DMA mask\n",
1006 (unsigned int)dma_mask_to_engine_type(orig_mask));
1007 return -EOPNOTSUPP;
1008 }
1009 if (fallback) {
1010 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1011 (unsigned int)dma_mask_to_engine_type(orig_mask),
1012 (unsigned int)dma_mask_to_engine_type(mask));
1013 }
1014
1015 return 0;
1016}
1017
Michael Buesche4d6b792007-09-18 15:39:42 -04001018int b43_dma_init(struct b43_wldev *dev)
1019{
1020 struct b43_dma *dma = &dev->dma;
Michael Buesche4d6b792007-09-18 15:39:42 -04001021 int err;
1022 u64 dmamask;
Michael Bueschb79caa62008-02-05 12:50:41 +01001023 enum b43_dmatype type;
Michael Buesche4d6b792007-09-18 15:39:42 -04001024
1025 dmamask = supported_dma_mask(dev);
Michael Buesch5100d5a2008-03-29 21:01:16 +01001026 type = dma_mask_to_engine_type(dmamask);
Michael Buesch1033b3e2008-04-23 19:13:01 +02001027 err = b43_dma_set_mask(dev, dmamask);
1028 if (err)
1029 return err;
Michael Buesche4d6b792007-09-18 15:39:42 -04001030
1031 err = -ENOMEM;
1032 /* setup TX DMA channels. */
Michael Bueschb27faf82008-03-06 16:32:46 +01001033 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1034 if (!dma->tx_ring_AC_BK)
Michael Buesche4d6b792007-09-18 15:39:42 -04001035 goto out;
Michael Buesche4d6b792007-09-18 15:39:42 -04001036
Michael Bueschb27faf82008-03-06 16:32:46 +01001037 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1038 if (!dma->tx_ring_AC_BE)
1039 goto err_destroy_bk;
Michael Buesche4d6b792007-09-18 15:39:42 -04001040
Michael Bueschb27faf82008-03-06 16:32:46 +01001041 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1042 if (!dma->tx_ring_AC_VI)
1043 goto err_destroy_be;
Michael Buesche4d6b792007-09-18 15:39:42 -04001044
Michael Bueschb27faf82008-03-06 16:32:46 +01001045 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1046 if (!dma->tx_ring_AC_VO)
1047 goto err_destroy_vi;
Michael Buesche4d6b792007-09-18 15:39:42 -04001048
Michael Bueschb27faf82008-03-06 16:32:46 +01001049 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1050 if (!dma->tx_ring_mcast)
1051 goto err_destroy_vo;
Michael Buesche4d6b792007-09-18 15:39:42 -04001052
Michael Bueschb27faf82008-03-06 16:32:46 +01001053 /* setup RX DMA channel. */
1054 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1055 if (!dma->rx_ring)
1056 goto err_destroy_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -04001057
Michael Bueschb27faf82008-03-06 16:32:46 +01001058 /* No support for the TX status DMA ring. */
1059 B43_WARN_ON(dev->dev->id.revision < 5);
Michael Buesche4d6b792007-09-18 15:39:42 -04001060
Michael Bueschb79caa62008-02-05 12:50:41 +01001061 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1062 (unsigned int)type);
Michael Buesche4d6b792007-09-18 15:39:42 -04001063 err = 0;
Michael Bueschb27faf82008-03-06 16:32:46 +01001064out:
Michael Buesche4d6b792007-09-18 15:39:42 -04001065 return err;
1066
Michael Bueschb27faf82008-03-06 16:32:46 +01001067err_destroy_mcast:
1068 destroy_ring(dma, tx_ring_mcast);
1069err_destroy_vo:
1070 destroy_ring(dma, tx_ring_AC_VO);
1071err_destroy_vi:
1072 destroy_ring(dma, tx_ring_AC_VI);
1073err_destroy_be:
1074 destroy_ring(dma, tx_ring_AC_BE);
1075err_destroy_bk:
1076 destroy_ring(dma, tx_ring_AC_BK);
1077 return err;
Michael Buesche4d6b792007-09-18 15:39:42 -04001078}
1079
1080/* Generate a cookie for the TX header. */
1081static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1082{
Michael Bueschb27faf82008-03-06 16:32:46 +01001083 u16 cookie;
Michael Buesche4d6b792007-09-18 15:39:42 -04001084
1085 /* Use the upper 4 bits of the cookie as
1086 * DMA controller ID and store the slot number
1087 * in the lower 12 bits.
1088 * Note that the cookie must never be 0, as this
1089 * is a special value used in RX path.
Michael Buesch280d0e12007-12-26 18:26:17 +01001090 * It can also not be 0xFFFF because that is special
1091 * for multicast frames.
Michael Buesche4d6b792007-09-18 15:39:42 -04001092 */
Michael Bueschb27faf82008-03-06 16:32:46 +01001093 cookie = (((u16)ring->index + 1) << 12);
Michael Buesche4d6b792007-09-18 15:39:42 -04001094 B43_WARN_ON(slot & ~0x0FFF);
Michael Bueschb27faf82008-03-06 16:32:46 +01001095 cookie |= (u16)slot;
Michael Buesche4d6b792007-09-18 15:39:42 -04001096
1097 return cookie;
1098}
1099
1100/* Inspect a cookie and find out to which controller/slot it belongs. */
1101static
1102struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1103{
1104 struct b43_dma *dma = &dev->dma;
1105 struct b43_dmaring *ring = NULL;
1106
1107 switch (cookie & 0xF000) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001108 case 0x1000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001109 ring = dma->tx_ring_AC_BK;
Michael Buesche4d6b792007-09-18 15:39:42 -04001110 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001111 case 0x2000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001112 ring = dma->tx_ring_AC_BE;
Michael Buesche4d6b792007-09-18 15:39:42 -04001113 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001114 case 0x3000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001115 ring = dma->tx_ring_AC_VI;
Michael Buesche4d6b792007-09-18 15:39:42 -04001116 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001117 case 0x4000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001118 ring = dma->tx_ring_AC_VO;
Michael Buesche4d6b792007-09-18 15:39:42 -04001119 break;
Michael Buesch280d0e12007-12-26 18:26:17 +01001120 case 0x5000:
Michael Bueschb27faf82008-03-06 16:32:46 +01001121 ring = dma->tx_ring_mcast;
Michael Buesche4d6b792007-09-18 15:39:42 -04001122 break;
1123 default:
1124 B43_WARN_ON(1);
1125 }
1126 *slot = (cookie & 0x0FFF);
1127 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1128
1129 return ring;
1130}
1131
1132static int dma_tx_fragment(struct b43_dmaring *ring,
Johannes Berge039fa42008-05-15 12:55:29 +02001133 struct sk_buff *skb)
Michael Buesche4d6b792007-09-18 15:39:42 -04001134{
1135 const struct b43_dma_ops *ops = ring->ops;
Johannes Berge039fa42008-05-15 12:55:29 +02001136 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesche4d6b792007-09-18 15:39:42 -04001137 u8 *header;
Michael Buesch09552cc2008-01-23 21:44:15 +01001138 int slot, old_top_slot, old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001139 int err;
1140 struct b43_dmadesc_generic *desc;
1141 struct b43_dmadesc_meta *meta;
1142 struct b43_dmadesc_meta *meta_hdr;
1143 struct sk_buff *bounce_skb;
Michael Buesch280d0e12007-12-26 18:26:17 +01001144 u16 cookie;
Michael Buescheb189d8b2008-01-28 14:47:41 -08001145 size_t hdrsize = b43_txhdr_size(ring->dev);
Michael Buesche4d6b792007-09-18 15:39:42 -04001146
1147#define SLOTS_PER_PACKET 2
Michael Buesche4d6b792007-09-18 15:39:42 -04001148
Michael Buesch09552cc2008-01-23 21:44:15 +01001149 old_top_slot = ring->current_slot;
1150 old_used_slots = ring->used_slots;
1151
Michael Buesche4d6b792007-09-18 15:39:42 -04001152 /* Get a slot for the header. */
1153 slot = request_slot(ring);
1154 desc = ops->idx2desc(ring, slot, &meta_hdr);
1155 memset(meta_hdr, 0, sizeof(*meta_hdr));
1156
Michael Buescheb189d8b2008-01-28 14:47:41 -08001157 header = &(ring->txhdr_cache[slot * hdrsize]);
Michael Buesch280d0e12007-12-26 18:26:17 +01001158 cookie = generate_cookie(ring, slot);
Michael Buesch09552cc2008-01-23 21:44:15 +01001159 err = b43_generate_txhdr(ring->dev, header,
Johannes Berge039fa42008-05-15 12:55:29 +02001160 skb->data, skb->len, info, cookie);
Michael Buesch09552cc2008-01-23 21:44:15 +01001161 if (unlikely(err)) {
1162 ring->current_slot = old_top_slot;
1163 ring->used_slots = old_used_slots;
1164 return err;
1165 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001166
1167 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001168 hdrsize, 1);
Michael Bueschffa92562008-03-22 22:04:45 +01001169 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001170 ring->current_slot = old_top_slot;
1171 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001172 return -EIO;
Michael Buesch09552cc2008-01-23 21:44:15 +01001173 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001174 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001175 hdrsize, 1, 0, 0);
Michael Buesche4d6b792007-09-18 15:39:42 -04001176
1177 /* Get a slot for the payload. */
1178 slot = request_slot(ring);
1179 desc = ops->idx2desc(ring, slot, &meta);
1180 memset(meta, 0, sizeof(*meta));
1181
Michael Buesche4d6b792007-09-18 15:39:42 -04001182 meta->skb = skb;
1183 meta->is_last_fragment = 1;
1184
1185 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1186 /* create a bounce buffer in zone_dma on mapping failure. */
Michael Bueschffa92562008-03-22 22:04:45 +01001187 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
Michael Buesche4d6b792007-09-18 15:39:42 -04001188 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1189 if (!bounce_skb) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001190 ring->current_slot = old_top_slot;
1191 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001192 err = -ENOMEM;
1193 goto out_unmap_hdr;
1194 }
1195
1196 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1197 dev_kfree_skb_any(skb);
1198 skb = bounce_skb;
1199 meta->skb = skb;
1200 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
Michael Bueschffa92562008-03-22 22:04:45 +01001201 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
Michael Buesch09552cc2008-01-23 21:44:15 +01001202 ring->current_slot = old_top_slot;
1203 ring->used_slots = old_used_slots;
Michael Buesche4d6b792007-09-18 15:39:42 -04001204 err = -EIO;
1205 goto out_free_bounce;
1206 }
1207 }
1208
1209 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1210
Johannes Berge039fa42008-05-15 12:55:29 +02001211 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001212 /* Tell the firmware about the cookie of the last
1213 * mcast frame, so it can clear the more-data bit in it. */
1214 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1215 B43_SHM_SH_MCASTCOOKIE, cookie);
1216 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001217 /* Now transfer the whole frame. */
1218 wmb();
1219 ops->poke_tx(ring, next_slot(ring, slot));
1220 return 0;
1221
Michael Buesch280d0e12007-12-26 18:26:17 +01001222out_free_bounce:
Michael Buesche4d6b792007-09-18 15:39:42 -04001223 dev_kfree_skb_any(skb);
Michael Buesch280d0e12007-12-26 18:26:17 +01001224out_unmap_hdr:
Michael Buesche4d6b792007-09-18 15:39:42 -04001225 unmap_descbuffer(ring, meta_hdr->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001226 hdrsize, 1);
Michael Buesche4d6b792007-09-18 15:39:42 -04001227 return err;
1228}
1229
1230static inline int should_inject_overflow(struct b43_dmaring *ring)
1231{
1232#ifdef CONFIG_B43_DEBUG
1233 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1234 /* Check if we should inject another ringbuffer overflow
1235 * to test handling of this situation in the stack. */
1236 unsigned long next_overflow;
1237
1238 next_overflow = ring->last_injected_overflow + HZ;
1239 if (time_after(jiffies, next_overflow)) {
1240 ring->last_injected_overflow = jiffies;
1241 b43dbg(ring->dev->wl,
1242 "Injecting TX ring overflow on "
1243 "DMA controller %d\n", ring->index);
1244 return 1;
1245 }
1246 }
1247#endif /* CONFIG_B43_DEBUG */
1248 return 0;
1249}
1250
Michael Buesche6f5b932008-03-05 21:18:49 +01001251/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1252static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1253 u8 queue_prio)
1254{
1255 struct b43_dmaring *ring;
1256
1257 if (b43_modparam_qos) {
1258 /* 0 = highest priority */
1259 switch (queue_prio) {
1260 default:
1261 B43_WARN_ON(1);
1262 /* fallthrough */
1263 case 0:
Michael Bueschb27faf82008-03-06 16:32:46 +01001264 ring = dev->dma.tx_ring_AC_VO;
Michael Buesche6f5b932008-03-05 21:18:49 +01001265 break;
1266 case 1:
Michael Bueschb27faf82008-03-06 16:32:46 +01001267 ring = dev->dma.tx_ring_AC_VI;
Michael Buesche6f5b932008-03-05 21:18:49 +01001268 break;
1269 case 2:
Michael Bueschb27faf82008-03-06 16:32:46 +01001270 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001271 break;
1272 case 3:
Michael Bueschb27faf82008-03-06 16:32:46 +01001273 ring = dev->dma.tx_ring_AC_BK;
Michael Buesche6f5b932008-03-05 21:18:49 +01001274 break;
1275 }
1276 } else
Michael Bueschb27faf82008-03-06 16:32:46 +01001277 ring = dev->dma.tx_ring_AC_BE;
Michael Buesche6f5b932008-03-05 21:18:49 +01001278
1279 return ring;
1280}
1281
Johannes Berge039fa42008-05-15 12:55:29 +02001282int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
Michael Buesche4d6b792007-09-18 15:39:42 -04001283{
1284 struct b43_dmaring *ring;
Michael Buesch280d0e12007-12-26 18:26:17 +01001285 struct ieee80211_hdr *hdr;
Michael Buesche4d6b792007-09-18 15:39:42 -04001286 int err = 0;
1287 unsigned long flags;
Johannes Berge039fa42008-05-15 12:55:29 +02001288 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesche4d6b792007-09-18 15:39:42 -04001289
Michael Buesch280d0e12007-12-26 18:26:17 +01001290 hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +02001291 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch280d0e12007-12-26 18:26:17 +01001292 /* The multicast ring will be sent after the DTIM */
Michael Bueschb27faf82008-03-06 16:32:46 +01001293 ring = dev->dma.tx_ring_mcast;
Michael Buesch280d0e12007-12-26 18:26:17 +01001294 /* Set the more-data bit. Ucode will clear it on
1295 * the last frame for us. */
1296 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1297 } else {
1298 /* Decide by priority where to put this frame. */
Johannes Berge2530082008-05-17 00:57:14 +02001299 ring = select_ring_by_priority(
1300 dev, skb_get_queue_mapping(skb));
Michael Buesch280d0e12007-12-26 18:26:17 +01001301 }
1302
Michael Buesche4d6b792007-09-18 15:39:42 -04001303 spin_lock_irqsave(&ring->lock, flags);
Michael Bueschca2d5592009-02-19 20:17:36 +01001304
Michael Buesche4d6b792007-09-18 15:39:42 -04001305 B43_WARN_ON(!ring->tx);
Michael Bueschca2d5592009-02-19 20:17:36 +01001306 /* Check if the queue was stopped in mac80211,
1307 * but we got called nevertheless.
1308 * That would be a mac80211 bug. */
1309 B43_WARN_ON(ring->stopped);
1310
Michael Buesche4d6b792007-09-18 15:39:42 -04001311 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1312 b43warn(dev->wl, "DMA queue overflow\n");
1313 err = -ENOSPC;
1314 goto out_unlock;
1315 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001316
Michael Buesche6f5b932008-03-05 21:18:49 +01001317 /* Assign the queue number to the ring (if not already done before)
1318 * so TX status handling can use it. The queue to ring mapping is
1319 * static, so we don't need to store it per frame. */
Johannes Berge2530082008-05-17 00:57:14 +02001320 ring->queue_prio = skb_get_queue_mapping(skb);
Michael Buesche6f5b932008-03-05 21:18:49 +01001321
Johannes Berge039fa42008-05-15 12:55:29 +02001322 err = dma_tx_fragment(ring, skb);
Michael Buesch09552cc2008-01-23 21:44:15 +01001323 if (unlikely(err == -ENOKEY)) {
1324 /* Drop this packet, as we don't have the encryption key
1325 * anymore and must not transmit it unencrypted. */
1326 dev_kfree_skb_any(skb);
1327 err = 0;
1328 goto out_unlock;
1329 }
Michael Buesche4d6b792007-09-18 15:39:42 -04001330 if (unlikely(err)) {
1331 b43err(dev->wl, "DMA tx mapping failure\n");
1332 goto out_unlock;
1333 }
1334 ring->nr_tx_packets++;
1335 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1336 should_inject_overflow(ring)) {
1337 /* This TX ring is full. */
Johannes Berge2530082008-05-17 00:57:14 +02001338 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Michael Buesche4d6b792007-09-18 15:39:42 -04001339 ring->stopped = 1;
1340 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1341 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1342 }
1343 }
Michael Buesch280d0e12007-12-26 18:26:17 +01001344out_unlock:
Michael Buesche4d6b792007-09-18 15:39:42 -04001345 spin_unlock_irqrestore(&ring->lock, flags);
1346
1347 return err;
1348}
1349
Michael Buesch7a193a52008-03-23 01:08:22 +01001350/* Called with IRQs disabled. */
Michael Buesche4d6b792007-09-18 15:39:42 -04001351void b43_dma_handle_txstatus(struct b43_wldev *dev,
1352 const struct b43_txstatus *status)
1353{
1354 const struct b43_dma_ops *ops;
1355 struct b43_dmaring *ring;
1356 struct b43_dmadesc_generic *desc;
1357 struct b43_dmadesc_meta *meta;
1358 int slot;
Michael Buesch5100d5a2008-03-29 21:01:16 +01001359 bool frame_succeed;
Michael Buesche4d6b792007-09-18 15:39:42 -04001360
1361 ring = parse_cookie(dev, status->cookie, &slot);
1362 if (unlikely(!ring))
1363 return;
Michael Buesch7a193a52008-03-23 01:08:22 +01001364
1365 spin_lock(&ring->lock); /* IRQs are already disabled. */
Michael Buesche4d6b792007-09-18 15:39:42 -04001366
1367 B43_WARN_ON(!ring->tx);
1368 ops = ring->ops;
1369 while (1) {
1370 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1371 desc = ops->idx2desc(ring, slot, &meta);
1372
1373 if (meta->skb)
1374 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1375 1);
1376 else
1377 unmap_descbuffer(ring, meta->dmaaddr,
Michael Buescheb189d8b2008-01-28 14:47:41 -08001378 b43_txhdr_size(dev), 1);
Michael Buesche4d6b792007-09-18 15:39:42 -04001379
1380 if (meta->is_last_fragment) {
Johannes Berge039fa42008-05-15 12:55:29 +02001381 struct ieee80211_tx_info *info;
1382
1383 BUG_ON(!meta->skb);
1384
1385 info = IEEE80211_SKB_CB(meta->skb);
1386
Johannes Berge039fa42008-05-15 12:55:29 +02001387 /*
1388 * Call back to inform the ieee80211 subsystem about
1389 * the status of the transmission.
Michael Buesche4d6b792007-09-18 15:39:42 -04001390 */
Johannes Berge6a98542008-10-21 12:40:02 +02001391 frame_succeed = b43_fill_txstatus_report(dev, info, status);
Michael Buesch5100d5a2008-03-29 21:01:16 +01001392#ifdef CONFIG_B43_DEBUG
1393 if (frame_succeed)
1394 ring->nr_succeed_tx_packets++;
1395 else
1396 ring->nr_failed_tx_packets++;
1397 ring->nr_total_packet_tries += status->frame_count;
1398#endif /* DEBUG */
Johannes Berge039fa42008-05-15 12:55:29 +02001399 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1400
Michael Buesche4d6b792007-09-18 15:39:42 -04001401 /* skb is freed by ieee80211_tx_status_irqsafe() */
1402 meta->skb = NULL;
1403 } else {
1404 /* No need to call free_descriptor_buffer here, as
1405 * this is only the txhdr, which is not allocated.
1406 */
1407 B43_WARN_ON(meta->skb);
1408 }
1409
1410 /* Everything unmapped and free'd. So it's not used anymore. */
1411 ring->used_slots--;
1412
1413 if (meta->is_last_fragment)
1414 break;
1415 slot = next_slot(ring, slot);
1416 }
1417 dev->stats.last_tx = jiffies;
1418 if (ring->stopped) {
1419 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
Michael Buesche6f5b932008-03-05 21:18:49 +01001420 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
Michael Buesche4d6b792007-09-18 15:39:42 -04001421 ring->stopped = 0;
1422 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1423 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1424 }
1425 }
1426
1427 spin_unlock(&ring->lock);
1428}
1429
1430void b43_dma_get_tx_stats(struct b43_wldev *dev,
1431 struct ieee80211_tx_queue_stats *stats)
1432{
1433 const int nr_queues = dev->wl->hw->queues;
1434 struct b43_dmaring *ring;
Michael Buesche4d6b792007-09-18 15:39:42 -04001435 unsigned long flags;
1436 int i;
1437
1438 for (i = 0; i < nr_queues; i++) {
Michael Buesche6f5b932008-03-05 21:18:49 +01001439 ring = select_ring_by_priority(dev, i);
Michael Buesche4d6b792007-09-18 15:39:42 -04001440
1441 spin_lock_irqsave(&ring->lock, flags);
Johannes Berg57ffc582008-04-29 17:18:59 +02001442 stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1443 stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1444 stats[i].count = ring->nr_tx_packets;
Michael Buesche4d6b792007-09-18 15:39:42 -04001445 spin_unlock_irqrestore(&ring->lock, flags);
1446 }
1447}
1448
1449static void dma_rx(struct b43_dmaring *ring, int *slot)
1450{
1451 const struct b43_dma_ops *ops = ring->ops;
1452 struct b43_dmadesc_generic *desc;
1453 struct b43_dmadesc_meta *meta;
1454 struct b43_rxhdr_fw4 *rxhdr;
1455 struct sk_buff *skb;
1456 u16 len;
1457 int err;
1458 dma_addr_t dmaaddr;
1459
1460 desc = ops->idx2desc(ring, *slot, &meta);
1461
1462 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1463 skb = meta->skb;
1464
Michael Buesche4d6b792007-09-18 15:39:42 -04001465 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1466 len = le16_to_cpu(rxhdr->frame_len);
1467 if (len == 0) {
1468 int i = 0;
1469
1470 do {
1471 udelay(2);
1472 barrier();
1473 len = le16_to_cpu(rxhdr->frame_len);
1474 } while (len == 0 && i++ < 5);
1475 if (unlikely(len == 0)) {
1476 /* recycle the descriptor buffer. */
1477 sync_descbuffer_for_device(ring, meta->dmaaddr,
1478 ring->rx_buffersize);
1479 goto drop;
1480 }
1481 }
1482 if (unlikely(len > ring->rx_buffersize)) {
1483 /* The data did not fit into one descriptor buffer
1484 * and is split over multiple buffers.
1485 * This should never happen, as we try to allocate buffers
1486 * big enough. So simply ignore this packet.
1487 */
1488 int cnt = 0;
1489 s32 tmp = len;
1490
1491 while (1) {
1492 desc = ops->idx2desc(ring, *slot, &meta);
1493 /* recycle the descriptor buffer. */
1494 sync_descbuffer_for_device(ring, meta->dmaaddr,
1495 ring->rx_buffersize);
1496 *slot = next_slot(ring, *slot);
1497 cnt++;
1498 tmp -= ring->rx_buffersize;
1499 if (tmp <= 0)
1500 break;
1501 }
1502 b43err(ring->dev->wl, "DMA RX buffer too small "
1503 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1504 len, ring->rx_buffersize, cnt);
1505 goto drop;
1506 }
1507
1508 dmaaddr = meta->dmaaddr;
1509 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1510 if (unlikely(err)) {
1511 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1512 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1513 goto drop;
1514 }
1515
1516 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1517 skb_put(skb, len + ring->frameoffset);
1518 skb_pull(skb, ring->frameoffset);
1519
1520 b43_rx(ring->dev, skb, rxhdr);
Michael Bueschb27faf82008-03-06 16:32:46 +01001521drop:
Michael Buesche4d6b792007-09-18 15:39:42 -04001522 return;
1523}
1524
1525void b43_dma_rx(struct b43_dmaring *ring)
1526{
1527 const struct b43_dma_ops *ops = ring->ops;
1528 int slot, current_slot;
1529 int used_slots = 0;
1530
1531 B43_WARN_ON(ring->tx);
1532 current_slot = ops->get_current_rxslot(ring);
1533 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1534
1535 slot = ring->current_slot;
1536 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1537 dma_rx(ring, &slot);
1538 update_max_used_slots(ring, ++used_slots);
1539 }
1540 ops->set_current_rxslot(ring, slot);
1541 ring->current_slot = slot;
1542}
1543
1544static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1545{
1546 unsigned long flags;
1547
1548 spin_lock_irqsave(&ring->lock, flags);
1549 B43_WARN_ON(!ring->tx);
1550 ring->ops->tx_suspend(ring);
1551 spin_unlock_irqrestore(&ring->lock, flags);
1552}
1553
1554static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1555{
1556 unsigned long flags;
1557
1558 spin_lock_irqsave(&ring->lock, flags);
1559 B43_WARN_ON(!ring->tx);
1560 ring->ops->tx_resume(ring);
1561 spin_unlock_irqrestore(&ring->lock, flags);
1562}
1563
1564void b43_dma_tx_suspend(struct b43_wldev *dev)
1565{
1566 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
Michael Bueschb27faf82008-03-06 16:32:46 +01001567 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1568 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1569 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1570 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1571 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
Michael Buesche4d6b792007-09-18 15:39:42 -04001572}
1573
1574void b43_dma_tx_resume(struct b43_wldev *dev)
1575{
Michael Bueschb27faf82008-03-06 16:32:46 +01001576 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1577 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1578 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1579 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1580 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
Michael Buesche4d6b792007-09-18 15:39:42 -04001581 b43_power_saving_ctl_bits(dev, 0);
1582}
Michael Buesch5100d5a2008-03-29 21:01:16 +01001583
1584#ifdef CONFIG_B43_PIO
1585static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1586 u16 mmio_base, bool enable)
1587{
1588 u32 ctl;
1589
1590 if (type == B43_DMA_64BIT) {
1591 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1592 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1593 if (enable)
1594 ctl |= B43_DMA64_RXDIRECTFIFO;
1595 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1596 } else {
1597 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1598 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1599 if (enable)
1600 ctl |= B43_DMA32_RXDIRECTFIFO;
1601 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1602 }
1603}
1604
1605/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1606 * This is called from PIO code, so DMA structures are not available. */
1607void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1608 unsigned int engine_index, bool enable)
1609{
1610 enum b43_dmatype type;
1611 u16 mmio_base;
1612
1613 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1614
1615 mmio_base = b43_dmacontroller_base(type, engine_index);
1616 direct_fifo_rx(dev, type, mmio_base, enable);
1617}
1618#endif /* CONFIG_B43_PIO */