blob: 5af029442632395a4eca47bcb934ecca86cda1e1 [file] [log] [blame]
Michael Buesch5100d5a2008-03-29 21:01:16 +01001/*
2
3 Broadcom B43 wireless driver
4
5 PIO data transfer
6
Michael Büscheb032b92011-07-04 20:50:05 +02007 Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
Michael Buesch5100d5a2008-03-29 21:01:16 +01008
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24*/
25
26#include "b43.h"
27#include "pio.h"
28#include "dma.h"
29#include "main.h"
30#include "xmit.h"
31
32#include <linux/delay.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040033#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Michael Buesch5100d5a2008-03-29 21:01:16 +010035
36
Michael Buesch5100d5a2008-03-29 21:01:16 +010037static u16 generate_cookie(struct b43_pio_txqueue *q,
38 struct b43_pio_txpacket *pack)
39{
40 u16 cookie;
41
42 /* Use the upper 4 bits of the cookie as
43 * PIO controller ID and store the packet index number
44 * in the lower 12 bits.
45 * Note that the cookie must never be 0, as this
46 * is a special value used in RX path.
47 * It can also not be 0xFFFF because that is special
48 * for multicast frames.
49 */
50 cookie = (((u16)q->index + 1) << 12);
51 cookie |= pack->index;
52
53 return cookie;
54}
55
56static
John Daiker99da1852009-02-24 02:16:42 -080057struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev,
58 u16 cookie,
Michael Buesch5100d5a2008-03-29 21:01:16 +010059 struct b43_pio_txpacket **pack)
60{
61 struct b43_pio *pio = &dev->pio;
62 struct b43_pio_txqueue *q = NULL;
63 unsigned int pack_index;
64
65 switch (cookie & 0xF000) {
66 case 0x1000:
67 q = pio->tx_queue_AC_BK;
68 break;
69 case 0x2000:
70 q = pio->tx_queue_AC_BE;
71 break;
72 case 0x3000:
73 q = pio->tx_queue_AC_VI;
74 break;
75 case 0x4000:
76 q = pio->tx_queue_AC_VO;
77 break;
78 case 0x5000:
79 q = pio->tx_queue_mcast;
80 break;
81 }
82 if (B43_WARN_ON(!q))
83 return NULL;
84 pack_index = (cookie & 0x0FFF);
85 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
86 return NULL;
87 *pack = &q->packets[pack_index];
88
89 return q;
90}
91
92static u16 index_to_pioqueue_base(struct b43_wldev *dev,
93 unsigned int index)
94{
95 static const u16 bases[] = {
96 B43_MMIO_PIO_BASE0,
97 B43_MMIO_PIO_BASE1,
98 B43_MMIO_PIO_BASE2,
99 B43_MMIO_PIO_BASE3,
100 B43_MMIO_PIO_BASE4,
101 B43_MMIO_PIO_BASE5,
102 B43_MMIO_PIO_BASE6,
103 B43_MMIO_PIO_BASE7,
104 };
105 static const u16 bases_rev11[] = {
106 B43_MMIO_PIO11_BASE0,
107 B43_MMIO_PIO11_BASE1,
108 B43_MMIO_PIO11_BASE2,
109 B43_MMIO_PIO11_BASE3,
110 B43_MMIO_PIO11_BASE4,
111 B43_MMIO_PIO11_BASE5,
112 };
113
Rafał Miłeckidedb1eb2011-05-14 00:04:38 +0200114 if (dev->sdev->id.revision >= 11) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100115 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
116 return bases_rev11[index];
117 }
118 B43_WARN_ON(index >= ARRAY_SIZE(bases));
119 return bases[index];
120}
121
122static u16 pio_txqueue_offset(struct b43_wldev *dev)
123{
Rafał Miłeckidedb1eb2011-05-14 00:04:38 +0200124 if (dev->sdev->id.revision >= 11)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100125 return 0x18;
126 return 0;
127}
128
129static u16 pio_rxqueue_offset(struct b43_wldev *dev)
130{
Rafał Miłeckidedb1eb2011-05-14 00:04:38 +0200131 if (dev->sdev->id.revision >= 11)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100132 return 0x38;
133 return 8;
134}
135
John Daiker99da1852009-02-24 02:16:42 -0800136static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
137 unsigned int index)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100138{
139 struct b43_pio_txqueue *q;
140 struct b43_pio_txpacket *p;
141 unsigned int i;
142
143 q = kzalloc(sizeof(*q), GFP_KERNEL);
144 if (!q)
145 return NULL;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100146 q->dev = dev;
Rafał Miłeckidedb1eb2011-05-14 00:04:38 +0200147 q->rev = dev->sdev->id.revision;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100148 q->mmio_base = index_to_pioqueue_base(dev, index) +
149 pio_txqueue_offset(dev);
150 q->index = index;
151
152 q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
153 if (q->rev >= 8) {
154 q->buffer_size = 1920; //FIXME this constant is wrong.
155 } else {
156 q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
157 q->buffer_size -= 80;
158 }
159
160 INIT_LIST_HEAD(&q->packets_list);
161 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
162 p = &(q->packets[i]);
163 INIT_LIST_HEAD(&p->list);
164 p->index = i;
165 p->queue = q;
166 list_add(&p->list, &q->packets_list);
167 }
168
169 return q;
170}
171
John Daiker99da1852009-02-24 02:16:42 -0800172static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
173 unsigned int index)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100174{
175 struct b43_pio_rxqueue *q;
176
177 q = kzalloc(sizeof(*q), GFP_KERNEL);
178 if (!q)
179 return NULL;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100180 q->dev = dev;
Rafał Miłeckidedb1eb2011-05-14 00:04:38 +0200181 q->rev = dev->sdev->id.revision;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100182 q->mmio_base = index_to_pioqueue_base(dev, index) +
183 pio_rxqueue_offset(dev);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100184
185 /* Enable Direct FIFO RX (PIO) on the engine. */
186 b43_dma_direct_fifo_rx(dev, index, 1);
187
188 return q;
189}
190
191static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
192{
193 struct b43_pio_txpacket *pack;
194 unsigned int i;
195
196 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
197 pack = &(q->packets[i]);
198 if (pack->skb) {
199 dev_kfree_skb_any(pack->skb);
200 pack->skb = NULL;
201 }
202 }
203}
204
205static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
206 const char *name)
207{
208 if (!q)
209 return;
210 b43_pio_cancel_tx_packets(q);
211 kfree(q);
212}
213
214static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
215 const char *name)
216{
217 if (!q)
218 return;
219 kfree(q);
220}
221
222#define destroy_queue_tx(pio, queue) do { \
223 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
224 (pio)->queue = NULL; \
225 } while (0)
226
227#define destroy_queue_rx(pio, queue) do { \
228 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
229 (pio)->queue = NULL; \
230 } while (0)
231
232void b43_pio_free(struct b43_wldev *dev)
233{
234 struct b43_pio *pio;
235
236 if (!b43_using_pio_transfers(dev))
237 return;
238 pio = &dev->pio;
239
240 destroy_queue_rx(pio, rx_queue);
241 destroy_queue_tx(pio, tx_queue_mcast);
242 destroy_queue_tx(pio, tx_queue_AC_VO);
243 destroy_queue_tx(pio, tx_queue_AC_VI);
244 destroy_queue_tx(pio, tx_queue_AC_BE);
245 destroy_queue_tx(pio, tx_queue_AC_BK);
246}
247
Michael Buesch5100d5a2008-03-29 21:01:16 +0100248int b43_pio_init(struct b43_wldev *dev)
249{
250 struct b43_pio *pio = &dev->pio;
251 int err = -ENOMEM;
252
253 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
254 & ~B43_MACCTL_BE);
255 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
256
257 pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
258 if (!pio->tx_queue_AC_BK)
259 goto out;
260
261 pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
262 if (!pio->tx_queue_AC_BE)
263 goto err_destroy_bk;
264
265 pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
266 if (!pio->tx_queue_AC_VI)
267 goto err_destroy_be;
268
269 pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
270 if (!pio->tx_queue_AC_VO)
271 goto err_destroy_vi;
272
273 pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
274 if (!pio->tx_queue_mcast)
275 goto err_destroy_vo;
276
277 pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
278 if (!pio->rx_queue)
279 goto err_destroy_mcast;
280
281 b43dbg(dev->wl, "PIO initialized\n");
282 err = 0;
283out:
284 return err;
285
286err_destroy_mcast:
287 destroy_queue_tx(pio, tx_queue_mcast);
288err_destroy_vo:
289 destroy_queue_tx(pio, tx_queue_AC_VO);
290err_destroy_vi:
291 destroy_queue_tx(pio, tx_queue_AC_VI);
292err_destroy_be:
293 destroy_queue_tx(pio, tx_queue_AC_BE);
294err_destroy_bk:
295 destroy_queue_tx(pio, tx_queue_AC_BK);
296 return err;
297}
298
299/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
John Daiker99da1852009-02-24 02:16:42 -0800300static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
301 u8 queue_prio)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100302{
303 struct b43_pio_txqueue *q;
304
Michael Buesch403a3a12009-06-08 21:04:57 +0200305 if (dev->qos_enabled) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100306 /* 0 = highest priority */
307 switch (queue_prio) {
308 default:
309 B43_WARN_ON(1);
310 /* fallthrough */
311 case 0:
312 q = dev->pio.tx_queue_AC_VO;
313 break;
314 case 1:
315 q = dev->pio.tx_queue_AC_VI;
316 break;
317 case 2:
318 q = dev->pio.tx_queue_AC_BE;
319 break;
320 case 3:
321 q = dev->pio.tx_queue_AC_BK;
322 break;
323 }
324 } else
325 q = dev->pio.tx_queue_AC_BE;
326
327 return q;
328}
329
Michael Bueschd8c17e12008-04-02 19:58:20 +0200330static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
331 u16 ctl,
332 const void *_data,
333 unsigned int data_len)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100334{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200335 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200336 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100337 const u8 *data = _data;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100338
Michael Bueschd8c17e12008-04-02 19:58:20 +0200339 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
340 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
341
Rafał Miłecki620d7852011-05-17 14:00:00 +0200342 b43_block_write(dev, data, (data_len & ~1),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200343 q->mmio_base + B43_PIO_TXDATA,
344 sizeof(u16));
345 if (data_len & 1) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200346 u8 *tail = wl->pio_tailspace;
347 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
348
Michael Bueschd8c17e12008-04-02 19:58:20 +0200349 /* Write the last byte. */
350 ctl &= ~B43_PIO_TXCTL_WRITEHI;
351 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
Michael Buesch88499ab2009-10-09 20:33:32 +0200352 tail[0] = data[data_len - 1];
353 tail[1] = 0;
Rafał Miłecki620d7852011-05-17 14:00:00 +0200354 b43_block_write(dev, tail, 2,
Michael Bueschb96ab542009-09-23 18:51:21 +0200355 q->mmio_base + B43_PIO_TXDATA,
356 sizeof(u16));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100357 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200358
359 return ctl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100360}
361
362static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
363 const u8 *hdr, unsigned int hdrlen)
364{
365 struct b43_pio_txqueue *q = pack->queue;
366 const char *frame = pack->skb->data;
367 unsigned int frame_len = pack->skb->len;
368 u16 ctl;
369
370 ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
371 ctl |= B43_PIO_TXCTL_FREADY;
372 ctl &= ~B43_PIO_TXCTL_EOF;
373
374 /* Transfer the header data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200375 ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100376 /* Transfer the frame data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200377 ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100378
379 ctl |= B43_PIO_TXCTL_EOF;
380 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
381}
382
Michael Bueschd8c17e12008-04-02 19:58:20 +0200383static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
384 u32 ctl,
385 const void *_data,
386 unsigned int data_len)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100387{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200388 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200389 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100390 const u8 *data = _data;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100391
Michael Bueschd8c17e12008-04-02 19:58:20 +0200392 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
393 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
394 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
395
Rafał Miłecki620d7852011-05-17 14:00:00 +0200396 b43_block_write(dev, data, (data_len & ~3),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200397 q->mmio_base + B43_PIO8_TXDATA,
398 sizeof(u32));
399 if (data_len & 3) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200400 u8 *tail = wl->pio_tailspace;
401 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
402
403 memset(tail, 0, 4);
Michael Bueschd8c17e12008-04-02 19:58:20 +0200404 /* Write the last few bytes. */
405 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
406 B43_PIO8_TXCTL_24_31);
Michael Bueschd8c17e12008-04-02 19:58:20 +0200407 switch (data_len & 3) {
408 case 3:
Michael Bueschb96ab542009-09-23 18:51:21 +0200409 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
Michael Buesch88499ab2009-10-09 20:33:32 +0200410 tail[0] = data[data_len - 3];
411 tail[1] = data[data_len - 2];
412 tail[2] = data[data_len - 1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200413 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200414 case 2:
415 ctl |= B43_PIO8_TXCTL_8_15;
Michael Buesch88499ab2009-10-09 20:33:32 +0200416 tail[0] = data[data_len - 2];
417 tail[1] = data[data_len - 1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200418 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200419 case 1:
Michael Buesch88499ab2009-10-09 20:33:32 +0200420 tail[0] = data[data_len - 1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200421 break;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100422 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200423 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
Rafał Miłecki620d7852011-05-17 14:00:00 +0200424 b43_block_write(dev, tail, 4,
Michael Bueschb96ab542009-09-23 18:51:21 +0200425 q->mmio_base + B43_PIO8_TXDATA,
426 sizeof(u32));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100427 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200428
429 return ctl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100430}
431
432static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
433 const u8 *hdr, unsigned int hdrlen)
434{
435 struct b43_pio_txqueue *q = pack->queue;
436 const char *frame = pack->skb->data;
437 unsigned int frame_len = pack->skb->len;
438 u32 ctl;
439
440 ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
441 ctl |= B43_PIO8_TXCTL_FREADY;
442 ctl &= ~B43_PIO8_TXCTL_EOF;
443
444 /* Transfer the header data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200445 ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100446 /* Transfer the frame data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200447 ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100448
449 ctl |= B43_PIO8_TXCTL_EOF;
450 b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
451}
452
453static int pio_tx_frame(struct b43_pio_txqueue *q,
Johannes Berge039fa42008-05-15 12:55:29 +0200454 struct sk_buff *skb)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100455{
Albert Herranz7e937c62009-10-07 00:07:44 +0200456 struct b43_wldev *dev = q->dev;
457 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100458 struct b43_pio_txpacket *pack;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100459 u16 cookie;
460 int err;
461 unsigned int hdrlen;
Johannes Berge039fa42008-05-15 12:55:29 +0200462 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesch88499ab2009-10-09 20:33:32 +0200463 struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100464
465 B43_WARN_ON(list_empty(&q->packets_list));
466 pack = list_entry(q->packets_list.next,
467 struct b43_pio_txpacket, list);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100468
469 cookie = generate_cookie(q, pack);
Albert Herranz7e937c62009-10-07 00:07:44 +0200470 hdrlen = b43_txhdr_size(dev);
Michael Buesch88499ab2009-10-09 20:33:32 +0200471 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
472 B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
473 err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
gregor kowski035d0242009-08-19 22:35:45 +0200474 info, cookie);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100475 if (err)
476 return err;
477
Johannes Berge039fa42008-05-15 12:55:29 +0200478 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100479 /* Tell the firmware about the cookie of the last
480 * mcast frame, so it can clear the more-data bit in it. */
Albert Herranz7e937c62009-10-07 00:07:44 +0200481 b43_shm_write16(dev, B43_SHM_SHARED,
Michael Buesch5100d5a2008-03-29 21:01:16 +0100482 B43_SHM_SH_MCASTCOOKIE, cookie);
483 }
484
485 pack->skb = skb;
486 if (q->rev >= 8)
Michael Buesch88499ab2009-10-09 20:33:32 +0200487 pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100488 else
Michael Buesch88499ab2009-10-09 20:33:32 +0200489 pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100490
491 /* Remove it from the list of available packet slots.
492 * It will be put back when we receive the status report. */
493 list_del(&pack->list);
494
495 /* Update the queue statistics. */
496 q->buffer_used += roundup(skb->len + hdrlen, 4);
497 q->free_packet_slots -= 1;
498
499 return 0;
500}
501
Johannes Berge039fa42008-05-15 12:55:29 +0200502int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100503{
504 struct b43_pio_txqueue *q;
505 struct ieee80211_hdr *hdr;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100506 unsigned int hdrlen, total_len;
507 int err = 0;
Johannes Berge039fa42008-05-15 12:55:29 +0200508 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100509
510 hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +0200511
512 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100513 /* The multicast queue will be sent after the DTIM. */
514 q = dev->pio.tx_queue_mcast;
515 /* Set the frame More-Data bit. Ucode will clear it
516 * for us on the last frame. */
517 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
518 } else {
519 /* Decide by priority where to put this frame. */
Johannes Berge2530082008-05-17 00:57:14 +0200520 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100521 }
522
Michael Buesch5100d5a2008-03-29 21:01:16 +0100523 hdrlen = b43_txhdr_size(dev);
524 total_len = roundup(skb->len + hdrlen, 4);
525
526 if (unlikely(total_len > q->buffer_size)) {
527 err = -ENOBUFS;
528 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200529 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100530 }
531 if (unlikely(q->free_packet_slots == 0)) {
532 err = -ENOBUFS;
533 b43warn(dev->wl, "PIO: TX packet overflow.\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200534 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100535 }
536 B43_WARN_ON(q->buffer_used > q->buffer_size);
537
538 if (total_len > (q->buffer_size - q->buffer_used)) {
539 /* Not enough memory on the queue. */
540 err = -EBUSY;
Johannes Berge2530082008-05-17 00:57:14 +0200541 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100542 q->stopped = 1;
Michael Buesch637dae32009-09-04 22:55:00 +0200543 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100544 }
545
546 /* Assign the queue number to the ring (if not already done before)
547 * so TX status handling can use it. The mac80211-queue to b43-queue
548 * mapping is static, so we don't need to store it per frame. */
Johannes Berge2530082008-05-17 00:57:14 +0200549 q->queue_prio = skb_get_queue_mapping(skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100550
Johannes Berge039fa42008-05-15 12:55:29 +0200551 err = pio_tx_frame(q, skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100552 if (unlikely(err == -ENOKEY)) {
553 /* Drop this packet, as we don't have the encryption key
554 * anymore and must not transmit it unencrypted. */
555 dev_kfree_skb_any(skb);
556 err = 0;
Michael Buesch637dae32009-09-04 22:55:00 +0200557 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100558 }
559 if (unlikely(err)) {
560 b43err(dev->wl, "PIO transmission failure\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200561 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100562 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100563
564 B43_WARN_ON(q->buffer_used > q->buffer_size);
565 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
566 (q->free_packet_slots == 0)) {
567 /* The queue is full. */
Johannes Berge2530082008-05-17 00:57:14 +0200568 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100569 q->stopped = 1;
570 }
571
Michael Buesch637dae32009-09-04 22:55:00 +0200572out:
Michael Buesch5100d5a2008-03-29 21:01:16 +0100573 return err;
574}
575
Michael Buesch5100d5a2008-03-29 21:01:16 +0100576void b43_pio_handle_txstatus(struct b43_wldev *dev,
577 const struct b43_txstatus *status)
578{
579 struct b43_pio_txqueue *q;
580 struct b43_pio_txpacket *pack = NULL;
581 unsigned int total_len;
Johannes Berge039fa42008-05-15 12:55:29 +0200582 struct ieee80211_tx_info *info;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100583
584 q = parse_cookie(dev, status->cookie, &pack);
585 if (unlikely(!q))
586 return;
587 B43_WARN_ON(!pack);
588
Michael Buesch14a7dd62008-06-24 12:22:05 +0200589 info = IEEE80211_SKB_CB(pack->skb);
Johannes Berge039fa42008-05-15 12:55:29 +0200590
Johannes Berge6a98542008-10-21 12:40:02 +0200591 b43_fill_txstatus_report(dev, info, status);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100592
593 total_len = pack->skb->len + b43_txhdr_size(dev);
594 total_len = roundup(total_len, 4);
595 q->buffer_used -= total_len;
596 q->free_packet_slots += 1;
597
Michael Bueschce6c4a12009-09-10 20:22:02 +0200598 ieee80211_tx_status(dev->wl->hw, pack->skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100599 pack->skb = NULL;
600 list_add(&pack->list, &q->packets_list);
601
602 if (q->stopped) {
603 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
604 q->stopped = 0;
605 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100606}
607
Michael Buesch5100d5a2008-03-29 21:01:16 +0100608/* Returns whether we should fetch another frame. */
609static bool pio_rx_frame(struct b43_pio_rxqueue *q)
610{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200611 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200612 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100613 u16 len;
614 u32 macstat;
615 unsigned int i, padding;
616 struct sk_buff *skb;
617 const char *err_msg = NULL;
Michael Buesch88499ab2009-10-09 20:33:32 +0200618 struct b43_rxhdr_fw4 *rxhdr =
619 (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100620
Michael Buesch88499ab2009-10-09 20:33:32 +0200621 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
622 memset(rxhdr, 0, sizeof(*rxhdr));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100623
624 /* Check if we have data and wait for it to get ready. */
625 if (q->rev >= 8) {
626 u32 ctl;
627
628 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
629 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
630 return 0;
631 b43_piorx_write32(q, B43_PIO8_RXCTL,
632 B43_PIO8_RXCTL_FRAMERDY);
633 for (i = 0; i < 10; i++) {
634 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
635 if (ctl & B43_PIO8_RXCTL_DATARDY)
636 goto data_ready;
637 udelay(10);
638 }
639 } else {
640 u16 ctl;
641
642 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
643 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
644 return 0;
645 b43_piorx_write16(q, B43_PIO_RXCTL,
646 B43_PIO_RXCTL_FRAMERDY);
647 for (i = 0; i < 10; i++) {
648 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
649 if (ctl & B43_PIO_RXCTL_DATARDY)
650 goto data_ready;
651 udelay(10);
652 }
653 }
654 b43dbg(q->dev->wl, "PIO RX timed out\n");
655 return 1;
656data_ready:
657
658 /* Get the preamble (RX header) */
659 if (q->rev >= 8) {
Rafał Miłecki620d7852011-05-17 14:00:00 +0200660 b43_block_read(dev, rxhdr, sizeof(*rxhdr),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200661 q->mmio_base + B43_PIO8_RXDATA,
662 sizeof(u32));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100663 } else {
Rafał Miłecki620d7852011-05-17 14:00:00 +0200664 b43_block_read(dev, rxhdr, sizeof(*rxhdr),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200665 q->mmio_base + B43_PIO_RXDATA,
666 sizeof(u16));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100667 }
668 /* Sanity checks. */
Michael Buesch88499ab2009-10-09 20:33:32 +0200669 len = le16_to_cpu(rxhdr->frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100670 if (unlikely(len > 0x700)) {
671 err_msg = "len > 0x700";
672 goto rx_error;
673 }
674 if (unlikely(len == 0)) {
675 err_msg = "len == 0";
676 goto rx_error;
677 }
678
Michael Buesch88499ab2009-10-09 20:33:32 +0200679 macstat = le32_to_cpu(rxhdr->mac_status);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100680 if (macstat & B43_RX_MAC_FCSERR) {
681 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
682 /* Drop frames with failed FCS. */
683 err_msg = "Frame FCS error";
684 goto rx_error;
685 }
686 }
687
688 /* We always pad 2 bytes, as that's what upstream code expects
689 * due to the RX-header being 30 bytes. In case the frame is
690 * unaligned, we pad another 2 bytes. */
691 padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
692 skb = dev_alloc_skb(len + padding + 2);
693 if (unlikely(!skb)) {
694 err_msg = "Out of memory";
695 goto rx_error;
696 }
697 skb_reserve(skb, 2);
698 skb_put(skb, len + padding);
699 if (q->rev >= 8) {
Rafał Miłecki620d7852011-05-17 14:00:00 +0200700 b43_block_read(dev, skb->data + padding, (len & ~3),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200701 q->mmio_base + B43_PIO8_RXDATA,
702 sizeof(u32));
703 if (len & 3) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200704 u8 *tail = wl->pio_tailspace;
705 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
706
Michael Bueschd8c17e12008-04-02 19:58:20 +0200707 /* Read the last few bytes. */
Rafał Miłecki620d7852011-05-17 14:00:00 +0200708 b43_block_read(dev, tail, 4,
Michael Bueschb96ab542009-09-23 18:51:21 +0200709 q->mmio_base + B43_PIO8_RXDATA,
710 sizeof(u32));
Michael Bueschd8c17e12008-04-02 19:58:20 +0200711 switch (len & 3) {
712 case 3:
Michael Buesch88499ab2009-10-09 20:33:32 +0200713 skb->data[len + padding - 3] = tail[0];
714 skb->data[len + padding - 2] = tail[1];
715 skb->data[len + padding - 1] = tail[2];
Michael Bueschb96ab542009-09-23 18:51:21 +0200716 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200717 case 2:
Michael Buesch88499ab2009-10-09 20:33:32 +0200718 skb->data[len + padding - 2] = tail[0];
719 skb->data[len + padding - 1] = tail[1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200720 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200721 case 1:
Michael Buesch88499ab2009-10-09 20:33:32 +0200722 skb->data[len + padding - 1] = tail[0];
Michael Bueschb96ab542009-09-23 18:51:21 +0200723 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200724 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100725 }
726 } else {
Rafał Miłecki620d7852011-05-17 14:00:00 +0200727 b43_block_read(dev, skb->data + padding, (len & ~1),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200728 q->mmio_base + B43_PIO_RXDATA,
729 sizeof(u16));
730 if (len & 1) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200731 u8 *tail = wl->pio_tailspace;
732 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
733
Michael Bueschd8c17e12008-04-02 19:58:20 +0200734 /* Read the last byte. */
Rafał Miłecki620d7852011-05-17 14:00:00 +0200735 b43_block_read(dev, tail, 2,
Michael Bueschb96ab542009-09-23 18:51:21 +0200736 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16));
Michael Buesch88499ab2009-10-09 20:33:32 +0200738 skb->data[len + padding - 1] = tail[0];
Michael Buesch5100d5a2008-03-29 21:01:16 +0100739 }
740 }
741
Michael Buesch88499ab2009-10-09 20:33:32 +0200742 b43_rx(q->dev, skb, rxhdr);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100743
744 return 1;
745
746rx_error:
747 if (err_msg)
748 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
Michael Bueschc2861812009-11-07 18:54:22 +0100749 if (q->rev >= 8)
750 b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
751 else
752 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
753
Michael Buesch5100d5a2008-03-29 21:01:16 +0100754 return 1;
755}
756
Michael Buesch5100d5a2008-03-29 21:01:16 +0100757void b43_pio_rx(struct b43_pio_rxqueue *q)
758{
Michael Buesch77ca07ff2009-09-04 22:56:19 +0200759 unsigned int count = 0;
760 bool stop;
761
762 while (1) {
763 stop = (pio_rx_frame(q) == 0);
764 if (stop)
765 break;
766 cond_resched();
767 if (WARN_ON_ONCE(++count > 10000))
768 break;
769 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100770}
771
772static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
773{
Michael Buesch5100d5a2008-03-29 21:01:16 +0100774 if (q->rev >= 8) {
775 b43_piotx_write32(q, B43_PIO8_TXCTL,
776 b43_piotx_read32(q, B43_PIO8_TXCTL)
777 | B43_PIO8_TXCTL_SUSPREQ);
778 } else {
779 b43_piotx_write16(q, B43_PIO_TXCTL,
780 b43_piotx_read16(q, B43_PIO_TXCTL)
781 | B43_PIO_TXCTL_SUSPREQ);
782 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100783}
784
785static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
786{
Michael Buesch5100d5a2008-03-29 21:01:16 +0100787 if (q->rev >= 8) {
788 b43_piotx_write32(q, B43_PIO8_TXCTL,
789 b43_piotx_read32(q, B43_PIO8_TXCTL)
790 & ~B43_PIO8_TXCTL_SUSPREQ);
791 } else {
792 b43_piotx_write16(q, B43_PIO_TXCTL,
793 b43_piotx_read16(q, B43_PIO_TXCTL)
794 & ~B43_PIO_TXCTL_SUSPREQ);
795 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100796}
797
798void b43_pio_tx_suspend(struct b43_wldev *dev)
799{
800 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
801 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
802 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
803 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
804 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
805 b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
806}
807
808void b43_pio_tx_resume(struct b43_wldev *dev)
809{
810 b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
811 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
812 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
813 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
814 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
815 b43_power_saving_ctl_bits(dev, 0);
816}