blob: 15660b588a120eb247e93997316e3054df3cb10c [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 queue specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020032struct sk_buff *rt2x00queue_alloc_rxskb(struct data_queue *queue)
33{
34 struct sk_buff *skb;
35 unsigned int frame_size;
36 unsigned int reserved_size;
37
38 /*
39 * The frame size includes descriptor size, because the
40 * hardware directly receive the frame into the skbuffer.
41 */
42 frame_size = queue->data_size + queue->desc_size;
43
44 /*
45 * For the allocation we should keep a few things in mind:
46 * 1) 4byte alignment of 802.11 payload
47 *
48 * For (1) we need at most 4 bytes to guarentee the correct
49 * alignment. We are going to optimize the fact that the chance
50 * that the 802.11 header_size % 4 == 2 is much bigger then
51 * anything else. However since we need to move the frame up
52 * to 3 bytes to the front, which means we need to preallocate
53 * 6 bytes.
54 */
55 reserved_size = 6;
56
57 /*
58 * Allocate skbuffer.
59 */
60 skb = dev_alloc_skb(frame_size + reserved_size);
61 if (!skb)
62 return NULL;
63
64 skb_reserve(skb, reserved_size);
65 skb_put(skb, frame_size);
66
67 return skb;
68}
69EXPORT_SYMBOL_GPL(rt2x00queue_alloc_rxskb);
70
Ivo van Doorn7050ec82008-05-10 13:46:13 +020071void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
Johannes Berge039fa42008-05-15 12:55:29 +020072 struct txentry_desc *txdesc)
Ivo van Doorn7050ec82008-05-10 13:46:13 +020073{
Johannes Berg2e92e6f2008-05-15 12:55:27 +020074 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Johannes Berge039fa42008-05-15 12:55:29 +020075 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
Ivo van Doorn7050ec82008-05-10 13:46:13 +020076 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
Johannes Berg2e92e6f2008-05-15 12:55:27 +020077 struct ieee80211_rate *rate =
Johannes Berge039fa42008-05-15 12:55:29 +020078 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +020079 const struct rt2x00_rate *hwrate;
80 unsigned int data_length;
81 unsigned int duration;
82 unsigned int residual;
Ivo van Doorn7050ec82008-05-10 13:46:13 +020083
84 memset(txdesc, 0, sizeof(*txdesc));
85
86 /*
87 * Initialize information from queue
88 */
89 txdesc->queue = entry->queue->qid;
90 txdesc->cw_min = entry->queue->cw_min;
91 txdesc->cw_max = entry->queue->cw_max;
92 txdesc->aifs = entry->queue->aifs;
93
94 /* Data length should be extended with 4 bytes for CRC */
95 data_length = entry->skb->len + 4;
96
97 /*
Ivo van Doorn7050ec82008-05-10 13:46:13 +020098 * Check whether this frame is to be acked.
99 */
Johannes Berge039fa42008-05-15 12:55:29 +0200100 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200101 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
102
103 /*
104 * Check if this is a RTS/CTS frame
105 */
Ivo van Doornac104462008-06-16 19:54:57 +0200106 if (ieee80211_is_rts(hdr->frame_control) ||
107 ieee80211_is_cts(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200108 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
Ivo van Doornac104462008-06-16 19:54:57 +0200109 if (ieee80211_is_rts(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200110 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200111 else
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200112 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200113 if (tx_info->control.rts_cts_rate_idx >= 0)
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200114 rate =
Johannes Berge039fa42008-05-15 12:55:29 +0200115 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200116 }
117
118 /*
119 * Determine retry information.
120 */
Johannes Berge039fa42008-05-15 12:55:29 +0200121 txdesc->retry_limit = tx_info->control.retry_limit;
122 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200123 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
124
125 /*
126 * Check if more fragments are pending
127 */
Harvey Harrison8b7b1e02008-06-11 14:21:56 -0700128 if (ieee80211_has_morefrags(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200129 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
130 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
131 }
132
133 /*
134 * Beacons and probe responses require the tsf timestamp
135 * to be inserted into the frame.
136 */
Ivo van Doornac104462008-06-16 19:54:57 +0200137 if (ieee80211_is_beacon(hdr->frame_control) ||
138 ieee80211_is_probe_resp(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200139 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
140
141 /*
142 * Determine with what IFS priority this frame should be send.
143 * Set ifs to IFS_SIFS when the this is not the first fragment,
144 * or this fragment came after RTS/CTS.
145 */
146 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
147 txdesc->ifs = IFS_SIFS;
Johannes Berge039fa42008-05-15 12:55:29 +0200148 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200149 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
150 txdesc->ifs = IFS_BACKOFF;
151 } else {
152 txdesc->ifs = IFS_SIFS;
153 }
154
155 /*
156 * PLCP setup
157 * Length calculation depends on OFDM/CCK rate.
158 */
159 hwrate = rt2x00_get_rate(rate->hw_value);
160 txdesc->signal = hwrate->plcp;
161 txdesc->service = 0x04;
162
163 if (hwrate->flags & DEV_RATE_OFDM) {
164 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
165
166 txdesc->length_high = (data_length >> 6) & 0x3f;
167 txdesc->length_low = data_length & 0x3f;
168 } else {
169 /*
170 * Convert length to microseconds.
171 */
172 residual = get_duration_res(data_length, hwrate->bitrate);
173 duration = get_duration(data_length, hwrate->bitrate);
174
175 if (residual != 0) {
176 duration++;
177
178 /*
179 * Check if we need to set the Length Extension
180 */
181 if (hwrate->bitrate == 110 && residual <= 30)
182 txdesc->service |= 0x80;
183 }
184
185 txdesc->length_high = (duration >> 8) & 0xff;
186 txdesc->length_low = duration & 0xff;
187
188 /*
189 * When preamble is enabled we should set the
190 * preamble bit for the signal.
191 */
192 if (rt2x00_get_rate_preamble(rate->hw_value))
193 txdesc->signal |= 0x08;
194 }
195}
196EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);
197
198void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
199 struct txentry_desc *txdesc)
200{
Ivo van Doornb8697672008-06-06 22:53:14 +0200201 struct data_queue *queue = entry->queue;
202 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200203
204 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
205
206 /*
207 * All processing on the frame has been completed, this means
208 * it is now ready to be dumped to userspace through debugfs.
209 */
210 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
211
212 /*
Ivo van Doornb8697672008-06-06 22:53:14 +0200213 * Check if we need to kick the queue, there are however a few rules
214 * 1) Don't kick beacon queue
215 * 2) Don't kick unless this is the last in frame in a burst.
216 * When the burst flag is set, this frame is always followed
217 * by another frame which in some way are related to eachother.
218 * This is true for fragments, RTS or CTS-to-self frames.
219 * 3) Rule 2 can be broken when the available entries
220 * in the queue are less then a certain threshold.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200221 */
Ivo van Doornb8697672008-06-06 22:53:14 +0200222 if (entry->queue->qid == QID_BEACON)
223 return;
224
225 if (rt2x00queue_threshold(queue) ||
226 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
227 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200228}
229EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);
230
Ivo van Doorn6db37862008-06-06 22:50:28 +0200231int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
232{
233 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
234 struct txentry_desc txdesc;
235
236 if (unlikely(rt2x00queue_full(queue)))
237 return -EINVAL;
238
239 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
240 ERROR(queue->rt2x00dev,
241 "Arrived at non-free entry in the non-full queue %d.\n"
242 "Please file bug report to %s.\n",
243 queue->qid, DRV_PROJECT);
244 return -EINVAL;
245 }
246
247 /*
248 * Copy all TX descriptor information into txdesc,
249 * after that we are free to use the skb->cb array
250 * for our information.
251 */
252 entry->skb = skb;
253 rt2x00queue_create_tx_descriptor(entry, &txdesc);
254
255 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
256 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
257 return -EIO;
258 }
259
260 __set_bit(ENTRY_DATA_PENDING, &entry->flags);
261
262 rt2x00queue_index_inc(queue, Q_INDEX);
263 rt2x00queue_write_tx_descriptor(entry, &txdesc);
264
265 return 0;
266}
267
Ivo van Doorn181d6902008-02-05 16:42:23 -0500268struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200269 const enum data_queue_qid queue)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500270{
271 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
272
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200273 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500274 return &rt2x00dev->tx[queue];
275
276 if (!rt2x00dev->bcn)
277 return NULL;
278
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200279 if (queue == QID_BEACON)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500280 return &rt2x00dev->bcn[0];
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200281 else if (queue == QID_ATIM && atim)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500282 return &rt2x00dev->bcn[1];
283
284 return NULL;
285}
286EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
287
288struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
289 enum queue_index index)
290{
291 struct queue_entry *entry;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100292 unsigned long irqflags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500293
294 if (unlikely(index >= Q_INDEX_MAX)) {
295 ERROR(queue->rt2x00dev,
296 "Entry requested from invalid index type (%d)\n", index);
297 return NULL;
298 }
299
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100300 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500301
302 entry = &queue->entries[queue->index[index]];
303
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100304 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500305
306 return entry;
307}
308EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
309
310void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
311{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100312 unsigned long irqflags;
313
Ivo van Doorn181d6902008-02-05 16:42:23 -0500314 if (unlikely(index >= Q_INDEX_MAX)) {
315 ERROR(queue->rt2x00dev,
316 "Index change on invalid index type (%d)\n", index);
317 return;
318 }
319
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100320 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500321
322 queue->index[index]++;
323 if (queue->index[index] >= queue->limit)
324 queue->index[index] = 0;
325
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100326 if (index == Q_INDEX) {
327 queue->length++;
328 } else if (index == Q_INDEX_DONE) {
329 queue->length--;
330 queue->count ++;
331 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500332
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100333 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500334}
335EXPORT_SYMBOL_GPL(rt2x00queue_index_inc);
336
337static void rt2x00queue_reset(struct data_queue *queue)
338{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100339 unsigned long irqflags;
340
341 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500342
343 queue->count = 0;
344 queue->length = 0;
345 memset(queue->index, 0, sizeof(queue->index));
346
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100347 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500348}
349
350void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
351{
352 struct data_queue *queue = rt2x00dev->rx;
353 unsigned int i;
354
355 rt2x00queue_reset(queue);
356
357 if (!rt2x00dev->ops->lib->init_rxentry)
358 return;
359
360 for (i = 0; i < queue->limit; i++)
361 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
362 &queue->entries[i]);
363}
364
365void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
366{
367 struct data_queue *queue;
368 unsigned int i;
369
370 txall_queue_for_each(rt2x00dev, queue) {
371 rt2x00queue_reset(queue);
372
373 if (!rt2x00dev->ops->lib->init_txentry)
374 continue;
375
376 for (i = 0; i < queue->limit; i++)
377 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
378 &queue->entries[i]);
379 }
380}
381
382static int rt2x00queue_alloc_entries(struct data_queue *queue,
383 const struct data_queue_desc *qdesc)
384{
385 struct queue_entry *entries;
386 unsigned int entry_size;
387 unsigned int i;
388
389 rt2x00queue_reset(queue);
390
391 queue->limit = qdesc->entry_num;
Ivo van Doornb8697672008-06-06 22:53:14 +0200392 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500393 queue->data_size = qdesc->data_size;
394 queue->desc_size = qdesc->desc_size;
395
396 /*
397 * Allocate all queue entries.
398 */
399 entry_size = sizeof(*entries) + qdesc->priv_size;
400 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
401 if (!entries)
402 return -ENOMEM;
403
404#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
Adam Baker231be4e2008-02-10 22:48:19 +0100405 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
406 ((__index) * (__psize)) )
Ivo van Doorn181d6902008-02-05 16:42:23 -0500407
408 for (i = 0; i < queue->limit; i++) {
409 entries[i].flags = 0;
410 entries[i].queue = queue;
411 entries[i].skb = NULL;
412 entries[i].entry_idx = i;
413 entries[i].priv_data =
414 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
415 sizeof(*entries), qdesc->priv_size);
416 }
417
418#undef QUEUE_ENTRY_PRIV_OFFSET
419
420 queue->entries = entries;
421
422 return 0;
423}
424
425int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
426{
427 struct data_queue *queue;
428 int status;
429
430
431 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
432 if (status)
433 goto exit;
434
435 tx_queue_for_each(rt2x00dev, queue) {
436 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
437 if (status)
438 goto exit;
439 }
440
441 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
442 if (status)
443 goto exit;
444
445 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
446 return 0;
447
448 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
449 rt2x00dev->ops->atim);
450 if (status)
451 goto exit;
452
453 return 0;
454
455exit:
456 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
457
458 rt2x00queue_uninitialize(rt2x00dev);
459
460 return status;
461}
462
463void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
464{
465 struct data_queue *queue;
466
467 queue_for_each(rt2x00dev, queue) {
468 kfree(queue->entries);
469 queue->entries = NULL;
470 }
471}
472
Ivo van Doorn8f539272008-02-10 22:51:41 +0100473static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
474 struct data_queue *queue, enum data_queue_qid qid)
475{
476 spin_lock_init(&queue->lock);
477
478 queue->rt2x00dev = rt2x00dev;
479 queue->qid = qid;
480 queue->aifs = 2;
481 queue->cw_min = 5;
482 queue->cw_max = 10;
483}
484
Ivo van Doorn181d6902008-02-05 16:42:23 -0500485int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
486{
487 struct data_queue *queue;
488 enum data_queue_qid qid;
489 unsigned int req_atim =
490 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
491
492 /*
493 * We need the following queues:
494 * RX: 1
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200495 * TX: ops->tx_queues
Ivo van Doorn181d6902008-02-05 16:42:23 -0500496 * Beacon: 1
497 * Atim: 1 (if required)
498 */
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200499 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500500
501 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
502 if (!queue) {
503 ERROR(rt2x00dev, "Queue allocation failed.\n");
504 return -ENOMEM;
505 }
506
507 /*
508 * Initialize pointers
509 */
510 rt2x00dev->rx = queue;
511 rt2x00dev->tx = &queue[1];
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200512 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
Ivo van Doorn181d6902008-02-05 16:42:23 -0500513
514 /*
515 * Initialize queue parameters.
516 * RX: qid = QID_RX
517 * TX: qid = QID_AC_BE + index
518 * TX: cw_min: 2^5 = 32.
519 * TX: cw_max: 2^10 = 1024.
Ivo van Doorn565a0192008-06-03 20:29:05 +0200520 * BCN: qid = QID_BEACON
521 * ATIM: qid = QID_ATIM
Ivo van Doorn181d6902008-02-05 16:42:23 -0500522 */
Ivo van Doorn8f539272008-02-10 22:51:41 +0100523 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
524
Ivo van Doorn181d6902008-02-05 16:42:23 -0500525 qid = QID_AC_BE;
Ivo van Doorn8f539272008-02-10 22:51:41 +0100526 tx_queue_for_each(rt2x00dev, queue)
527 rt2x00queue_init(rt2x00dev, queue, qid++);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500528
Ivo van Doorn565a0192008-06-03 20:29:05 +0200529 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500530 if (req_atim)
Ivo van Doorn565a0192008-06-03 20:29:05 +0200531 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500532
533 return 0;
534}
535
536void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
537{
538 kfree(rt2x00dev->rx);
539 rt2x00dev->rx = NULL;
540 rt2x00dev->tx = NULL;
541 rt2x00dev->bcn = NULL;
542}