blob: e67e339d9dfdf75c692fc9aa4e42659fa0bbbf4d [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
Ivo van Doorn4e54c712009-01-17 20:42:32 +01002 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
Ivo van Doorn181d6902008-02-05 16:42:23 -05003 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 queue specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020028#include <linux/dma-mapping.h>
Ivo van Doorn181d6902008-02-05 16:42:23 -050029
30#include "rt2x00.h"
31#include "rt2x00lib.h"
32
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020033struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 struct queue_entry *entry)
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020035{
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020036 struct sk_buff *skb;
37 struct skb_frame_desc *skbdesc;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020038 unsigned int frame_size;
39 unsigned int head_size = 0;
40 unsigned int tail_size = 0;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020041
42 /*
43 * The frame size includes descriptor size, because the
44 * hardware directly receive the frame into the skbuffer.
45 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020046 frame_size = entry->queue->data_size + entry->queue->desc_size;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020047
48 /*
Ivo van Doornff352392008-07-04 14:56:07 +020049 * The payload should be aligned to a 4-byte boundary,
50 * this means we need at least 3 bytes for moving the frame
51 * into the correct offset.
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020052 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020053 head_size = 4;
54
55 /*
56 * For IV/EIV/ICV assembly we must make sure there is
57 * at least 8 bytes bytes available in headroom for IV/EIV
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010058 * and 8 bytes for ICV data as tailroon.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020059 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020060 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
61 head_size += 8;
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010062 tail_size += 8;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020063 }
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020064
65 /*
66 * Allocate skbuffer.
67 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020068 skb = dev_alloc_skb(frame_size + head_size + tail_size);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020069 if (!skb)
70 return NULL;
71
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020072 /*
73 * Make sure we not have a frame with the requested bytes
74 * available in the head and tail.
75 */
76 skb_reserve(skb, head_size);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020077 skb_put(skb, frame_size);
78
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020079 /*
80 * Populate skbdesc.
81 */
82 skbdesc = get_skb_frame_desc(skb);
83 memset(skbdesc, 0, sizeof(*skbdesc));
84 skbdesc->entry = entry;
85
86 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
87 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
88 skb->data,
89 skb->len,
90 DMA_FROM_DEVICE);
91 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
92 }
93
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020094 return skb;
95}
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +020096
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020097void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +020098{
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020099 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
100
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200101 /*
102 * If device has requested headroom, we should make sure that
103 * is also mapped to the DMA so it can be used for transfering
104 * additional descriptor information to the hardware.
105 */
106 skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
107
108 skbdesc->skb_dma =
109 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
110
111 /*
112 * Restore data pointer to original location again.
113 */
114 skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
115
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200116 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
117}
118EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
119
120void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
121{
122 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
123
124 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
125 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
126 DMA_FROM_DEVICE);
127 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
128 }
129
130 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200131 /*
132 * Add headroom to the skb length, it has been removed
133 * by the driver, but it was actually mapped to DMA.
134 */
135 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
136 skb->len + rt2x00dev->hw->extra_tx_headroom,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200137 DMA_TO_DEVICE);
138 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
139 }
140}
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200141
142void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
143{
Ivo van Doorn9a613192008-07-05 15:11:57 +0200144 if (!skb)
145 return;
146
Ivo van Doorn61243d82008-06-20 22:10:53 +0200147 rt2x00queue_unmap_skb(rt2x00dev, skb);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200148 dev_kfree_skb_any(skb);
149}
Gertjan van Wingerde239c2492008-06-06 22:54:12 +0200150
Ivo van Doorn9f166172009-04-26 16:08:50 +0200151void rt2x00queue_payload_align(struct sk_buff *skb,
152 bool l2pad, unsigned int header_length)
153{
154 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
155 unsigned int frame_length = skb->len;
156 unsigned int align = ALIGN_SIZE(skb, header_length);
157
158 if (!align)
159 return;
160
161 if (l2pad) {
162 if (skbdesc->flags & SKBDESC_L2_PADDED) {
163 /* Remove L2 padding */
164 memmove(skb->data + align, skb->data, header_length);
165 skb_pull(skb, align);
166 skbdesc->flags &= ~SKBDESC_L2_PADDED;
167 } else {
168 /* Add L2 padding */
169 skb_push(skb, align);
170 memmove(skb->data, skb->data + align, header_length);
171 skbdesc->flags |= SKBDESC_L2_PADDED;
172 }
173 } else {
174 /* Generic payload alignment to 4-byte boundary */
175 skb_push(skb, align);
176 memmove(skb->data, skb->data + align, frame_length);
177 }
178}
179
Ivo van Doorn7b409822008-12-20 10:58:33 +0100180static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
181 struct txentry_desc *txdesc)
182{
183 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
184 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
185 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
186 unsigned long irqflags;
187
188 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
189 unlikely(!tx_info->control.vif))
190 return;
191
192 /*
193 * Hardware should insert sequence counter.
194 * FIXME: We insert a software sequence counter first for
195 * hardware that doesn't support hardware sequence counting.
196 *
197 * This is wrong because beacons are not getting sequence
198 * numbers assigned properly.
199 *
200 * A secondary problem exists for drivers that cannot toggle
201 * sequence counting per-frame, since those will override the
202 * sequence counter given by mac80211.
203 */
204 spin_lock_irqsave(&intf->seqlock, irqflags);
205
206 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
207 intf->seqno += 0x10;
208 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
209 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
210
211 spin_unlock_irqrestore(&intf->seqlock, irqflags);
212
213 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
214}
215
216static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
217 struct txentry_desc *txdesc,
218 const struct rt2x00_rate *hwrate)
219{
220 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
221 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
222 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
223 unsigned int data_length;
224 unsigned int duration;
225 unsigned int residual;
226
227 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
228 data_length = entry->skb->len + 4;
229 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
230
231 /*
232 * PLCP setup
233 * Length calculation depends on OFDM/CCK rate.
234 */
235 txdesc->signal = hwrate->plcp;
236 txdesc->service = 0x04;
237
238 if (hwrate->flags & DEV_RATE_OFDM) {
239 txdesc->length_high = (data_length >> 6) & 0x3f;
240 txdesc->length_low = data_length & 0x3f;
241 } else {
242 /*
243 * Convert length to microseconds.
244 */
245 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
246 duration = GET_DURATION(data_length, hwrate->bitrate);
247
248 if (residual != 0) {
249 duration++;
250
251 /*
252 * Check if we need to set the Length Extension
253 */
254 if (hwrate->bitrate == 110 && residual <= 30)
255 txdesc->service |= 0x80;
256 }
257
258 txdesc->length_high = (duration >> 8) & 0xff;
259 txdesc->length_low = duration & 0xff;
260
261 /*
262 * When preamble is enabled we should set the
263 * preamble bit for the signal.
264 */
265 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
266 txdesc->signal |= 0x08;
267 }
268}
269
Ivo van Doornbd88a782008-07-09 15:12:44 +0200270static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
271 struct txentry_desc *txdesc)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200272{
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200273 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Johannes Berge039fa42008-05-15 12:55:29 +0200274 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200275 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200276 struct ieee80211_rate *rate =
Johannes Berge039fa42008-05-15 12:55:29 +0200277 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200278 const struct rt2x00_rate *hwrate;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200279
280 memset(txdesc, 0, sizeof(*txdesc));
281
282 /*
283 * Initialize information from queue
284 */
285 txdesc->queue = entry->queue->qid;
286 txdesc->cw_min = entry->queue->cw_min;
287 txdesc->cw_max = entry->queue->cw_max;
288 txdesc->aifs = entry->queue->aifs;
289
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200290 /*
Ivo van Doorn9f166172009-04-26 16:08:50 +0200291 * Header and alignment information.
292 */
293 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
294 txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
295
296 /*
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200297 * Check whether this frame is to be acked.
298 */
Johannes Berge039fa42008-05-15 12:55:29 +0200299 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200300 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
301
302 /*
303 * Check if this is a RTS/CTS frame
304 */
Ivo van Doornac104462008-06-16 19:54:57 +0200305 if (ieee80211_is_rts(hdr->frame_control) ||
306 ieee80211_is_cts(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200307 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
Ivo van Doornac104462008-06-16 19:54:57 +0200308 if (ieee80211_is_rts(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200309 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200310 else
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200311 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200312 if (tx_info->control.rts_cts_rate_idx >= 0)
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200313 rate =
Johannes Berge039fa42008-05-15 12:55:29 +0200314 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200315 }
316
317 /*
318 * Determine retry information.
319 */
Johannes Berge6a98542008-10-21 12:40:02 +0200320 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
Ivo van Doorn42c82852008-12-02 18:20:04 +0100321 if (txdesc->retry_limit >= rt2x00dev->long_retry)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200322 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
323
324 /*
325 * Check if more fragments are pending
326 */
Ivo van Doorn267e8982009-08-08 23:53:26 +0200327 if (ieee80211_has_morefrags(hdr->frame_control) ||
328 (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200329 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
330 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
331 }
332
333 /*
334 * Beacons and probe responses require the tsf timestamp
335 * to be inserted into the frame.
336 */
Ivo van Doornac104462008-06-16 19:54:57 +0200337 if (ieee80211_is_beacon(hdr->frame_control) ||
338 ieee80211_is_probe_resp(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200339 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
340
341 /*
342 * Determine with what IFS priority this frame should be send.
343 * Set ifs to IFS_SIFS when the this is not the first fragment,
344 * or this fragment came after RTS/CTS.
345 */
Ivo van Doorn7b409822008-12-20 10:58:33 +0100346 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
347 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200348 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
349 txdesc->ifs = IFS_BACKOFF;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100350 } else
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200351 txdesc->ifs = IFS_SIFS;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200352
Ivo van Doorn076f9582008-12-20 10:59:02 +0100353 /*
354 * Determine rate modulation.
355 */
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200356 hwrate = rt2x00_get_rate(rate->hw_value);
Ivo van Doorn076f9582008-12-20 10:59:02 +0100357 txdesc->rate_mode = RATE_MODE_CCK;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100358 if (hwrate->flags & DEV_RATE_OFDM)
Ivo van Doorn076f9582008-12-20 10:59:02 +0100359 txdesc->rate_mode = RATE_MODE_OFDM;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200360
Ivo van Doorn7b409822008-12-20 10:58:33 +0100361 /*
362 * Apply TX descriptor handling by components
363 */
364 rt2x00crypto_create_tx_descriptor(entry, txdesc);
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200365 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100366 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
367 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200368}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200369
Ivo van Doornbd88a782008-07-09 15:12:44 +0200370static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
371 struct txentry_desc *txdesc)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200372{
Ivo van Doornb8697672008-06-06 22:53:14 +0200373 struct data_queue *queue = entry->queue;
374 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200375
376 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
377
378 /*
379 * All processing on the frame has been completed, this means
380 * it is now ready to be dumped to userspace through debugfs.
381 */
382 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
383
384 /*
Ivo van Doornb8697672008-06-06 22:53:14 +0200385 * Check if we need to kick the queue, there are however a few rules
386 * 1) Don't kick beacon queue
387 * 2) Don't kick unless this is the last in frame in a burst.
388 * When the burst flag is set, this frame is always followed
389 * by another frame which in some way are related to eachother.
390 * This is true for fragments, RTS or CTS-to-self frames.
391 * 3) Rule 2 can be broken when the available entries
392 * in the queue are less then a certain threshold.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200393 */
Ivo van Doornb8697672008-06-06 22:53:14 +0200394 if (entry->queue->qid == QID_BEACON)
395 return;
396
397 if (rt2x00queue_threshold(queue) ||
398 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
399 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200400}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200401
Ivo van Doorn6db37862008-06-06 22:50:28 +0200402int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
403{
Johannes Berge6a98542008-10-21 12:40:02 +0200404 struct ieee80211_tx_info *tx_info;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200405 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
406 struct txentry_desc txdesc;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200407 struct skb_frame_desc *skbdesc;
Johannes Berge6a98542008-10-21 12:40:02 +0200408 u8 rate_idx, rate_flags;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200409
410 if (unlikely(rt2x00queue_full(queue)))
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100411 return -ENOBUFS;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200412
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200413 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
Ivo van Doorn6db37862008-06-06 22:50:28 +0200414 ERROR(queue->rt2x00dev,
415 "Arrived at non-free entry in the non-full queue %d.\n"
416 "Please file bug report to %s.\n",
417 queue->qid, DRV_PROJECT);
418 return -EINVAL;
419 }
420
421 /*
422 * Copy all TX descriptor information into txdesc,
423 * after that we are free to use the skb->cb array
424 * for our information.
425 */
426 entry->skb = skb;
427 rt2x00queue_create_tx_descriptor(entry, &txdesc);
428
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200429 /*
Johannes Berge6a98542008-10-21 12:40:02 +0200430 * All information is retrieved from the skb->cb array,
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200431 * now we should claim ownership of the driver part of that
Johannes Berge6a98542008-10-21 12:40:02 +0200432 * array, preserving the bitrate index and flags.
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200433 */
Johannes Berge6a98542008-10-21 12:40:02 +0200434 tx_info = IEEE80211_SKB_CB(skb);
435 rate_idx = tx_info->control.rates[0].idx;
436 rate_flags = tx_info->control.rates[0].flags;
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100437 skbdesc = get_skb_frame_desc(skb);
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200438 memset(skbdesc, 0, sizeof(*skbdesc));
439 skbdesc->entry = entry;
Johannes Berge6a98542008-10-21 12:40:02 +0200440 skbdesc->tx_rate_idx = rate_idx;
441 skbdesc->tx_rate_flags = rate_flags;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200442
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200443 /*
444 * When hardware encryption is supported, and this frame
445 * is to be encrypted, we should strip the IV/EIV data from
446 * the frame so we can provide it to the driver seperately.
447 */
448 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
Ivo van Doorndddfb472008-12-02 18:20:42 +0100449 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
Ivo van Doorn3f787bd2008-12-20 10:56:36 +0100450 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200451 rt2x00crypto_tx_copy_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100452 else
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200453 rt2x00crypto_tx_remove_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100454 }
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200455
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200456 /*
457 * When DMA allocation is required we should guarentee to the
458 * driver that the DMA is aligned to a 4-byte boundary.
459 * Aligning the header to this boundary can be done by calling
460 * rt2x00queue_payload_align with the header length of 0.
461 * However some drivers require L2 padding to pad the payload
462 * rather then the header. This could be a requirement for
463 * PCI and USB devices, while header alignment only is valid
464 * for PCI devices.
465 */
Ivo van Doorn9f166172009-04-26 16:08:50 +0200466 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
467 rt2x00queue_payload_align(entry->skb, true,
468 txdesc.header_length);
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200469 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
470 rt2x00queue_payload_align(entry->skb, false, 0);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200471
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200472 /*
473 * It could be possible that the queue was corrupted and this
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100474 * call failed. Since we always return NETDEV_TX_OK to mac80211,
475 * this frame will simply be dropped.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200476 */
Ivo van Doorn6db37862008-06-06 22:50:28 +0200477 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200478 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200479 entry->skb = NULL;
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100480 return -EIO;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200481 }
482
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200483 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
484 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
485
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200486 set_bit(ENTRY_DATA_PENDING, &entry->flags);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200487
488 rt2x00queue_index_inc(queue, Q_INDEX);
489 rt2x00queue_write_tx_descriptor(entry, &txdesc);
490
491 return 0;
492}
493
Ivo van Doornbd88a782008-07-09 15:12:44 +0200494int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100495 struct ieee80211_vif *vif,
496 const bool enable_beacon)
Ivo van Doornbd88a782008-07-09 15:12:44 +0200497{
498 struct rt2x00_intf *intf = vif_to_intf(vif);
499 struct skb_frame_desc *skbdesc;
500 struct txentry_desc txdesc;
501 __le32 desc[16];
502
503 if (unlikely(!intf->beacon))
504 return -ENOBUFS;
505
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100506 if (!enable_beacon) {
507 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
508 return 0;
509 }
510
Ivo van Doornbd88a782008-07-09 15:12:44 +0200511 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
512 if (!intf->beacon->skb)
513 return -ENOMEM;
514
515 /*
516 * Copy all TX descriptor information into txdesc,
517 * after that we are free to use the skb->cb array
518 * for our information.
519 */
520 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
521
522 /*
523 * For the descriptor we use a local array from where the
524 * driver can move it to the correct location required for
525 * the hardware.
526 */
527 memset(desc, 0, sizeof(desc));
528
529 /*
530 * Fill in skb descriptor
531 */
532 skbdesc = get_skb_frame_desc(intf->beacon->skb);
533 memset(skbdesc, 0, sizeof(*skbdesc));
534 skbdesc->desc = desc;
535 skbdesc->desc_len = intf->beacon->queue->desc_size;
536 skbdesc->entry = intf->beacon;
537
538 /*
539 * Write TX descriptor into reserved room in front of the beacon.
540 */
541 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
542
543 /*
544 * Send beacon to hardware.
545 * Also enable beacon generation, which might have been disabled
546 * by the driver during the config_beacon() callback function.
547 */
548 rt2x00dev->ops->lib->write_beacon(intf->beacon);
549 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
550
551 return 0;
552}
553
Ivo van Doorn181d6902008-02-05 16:42:23 -0500554struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200555 const enum data_queue_qid queue)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500556{
557 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
558
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100559 if (queue == QID_RX)
560 return rt2x00dev->rx;
561
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200562 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500563 return &rt2x00dev->tx[queue];
564
565 if (!rt2x00dev->bcn)
566 return NULL;
567
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200568 if (queue == QID_BEACON)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500569 return &rt2x00dev->bcn[0];
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200570 else if (queue == QID_ATIM && atim)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500571 return &rt2x00dev->bcn[1];
572
573 return NULL;
574}
575EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
576
577struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
578 enum queue_index index)
579{
580 struct queue_entry *entry;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100581 unsigned long irqflags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500582
583 if (unlikely(index >= Q_INDEX_MAX)) {
584 ERROR(queue->rt2x00dev,
585 "Entry requested from invalid index type (%d)\n", index);
586 return NULL;
587 }
588
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100589 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500590
591 entry = &queue->entries[queue->index[index]];
592
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100593 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500594
595 return entry;
596}
597EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
598
599void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
600{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100601 unsigned long irqflags;
602
Ivo van Doorn181d6902008-02-05 16:42:23 -0500603 if (unlikely(index >= Q_INDEX_MAX)) {
604 ERROR(queue->rt2x00dev,
605 "Index change on invalid index type (%d)\n", index);
606 return;
607 }
608
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100609 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500610
611 queue->index[index]++;
612 if (queue->index[index] >= queue->limit)
613 queue->index[index] = 0;
614
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100615 if (index == Q_INDEX) {
616 queue->length++;
617 } else if (index == Q_INDEX_DONE) {
618 queue->length--;
John Daiker55887512008-10-17 12:16:17 -0700619 queue->count++;
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100620 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500621
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100622 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500623}
Ivo van Doorn181d6902008-02-05 16:42:23 -0500624
625static void rt2x00queue_reset(struct data_queue *queue)
626{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100627 unsigned long irqflags;
628
629 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500630
631 queue->count = 0;
632 queue->length = 0;
633 memset(queue->index, 0, sizeof(queue->index));
634
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100635 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500636}
637
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100638void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
639{
640 struct data_queue *queue;
641
642 txall_queue_for_each(rt2x00dev, queue)
643 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
644}
645
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100646void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500647{
648 struct data_queue *queue;
649 unsigned int i;
650
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100651 queue_for_each(rt2x00dev, queue) {
Ivo van Doorn181d6902008-02-05 16:42:23 -0500652 rt2x00queue_reset(queue);
653
Ivo van Doorn9c0ab712008-07-21 19:06:02 +0200654 for (i = 0; i < queue->limit; i++) {
655 queue->entries[i].flags = 0;
656
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100657 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
Ivo van Doorn9c0ab712008-07-21 19:06:02 +0200658 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500659 }
660}
661
662static int rt2x00queue_alloc_entries(struct data_queue *queue,
663 const struct data_queue_desc *qdesc)
664{
665 struct queue_entry *entries;
666 unsigned int entry_size;
667 unsigned int i;
668
669 rt2x00queue_reset(queue);
670
671 queue->limit = qdesc->entry_num;
Ivo van Doornb8697672008-06-06 22:53:14 +0200672 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500673 queue->data_size = qdesc->data_size;
674 queue->desc_size = qdesc->desc_size;
675
676 /*
677 * Allocate all queue entries.
678 */
679 entry_size = sizeof(*entries) + qdesc->priv_size;
680 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
681 if (!entries)
682 return -ENOMEM;
683
684#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
Adam Baker231be4e2008-02-10 22:48:19 +0100685 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
686 ((__index) * (__psize)) )
Ivo van Doorn181d6902008-02-05 16:42:23 -0500687
688 for (i = 0; i < queue->limit; i++) {
689 entries[i].flags = 0;
690 entries[i].queue = queue;
691 entries[i].skb = NULL;
692 entries[i].entry_idx = i;
693 entries[i].priv_data =
694 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
695 sizeof(*entries), qdesc->priv_size);
696 }
697
698#undef QUEUE_ENTRY_PRIV_OFFSET
699
700 queue->entries = entries;
701
702 return 0;
703}
704
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200705static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
706 struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200707{
708 unsigned int i;
709
710 if (!queue->entries)
711 return;
712
713 for (i = 0; i < queue->limit; i++) {
714 if (queue->entries[i].skb)
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200715 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200716 }
717}
718
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200719static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
720 struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200721{
722 unsigned int i;
723 struct sk_buff *skb;
724
725 for (i = 0; i < queue->limit; i++) {
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200726 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200727 if (!skb)
Ivo van Doorn61243d82008-06-20 22:10:53 +0200728 return -ENOMEM;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200729 queue->entries[i].skb = skb;
730 }
731
732 return 0;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200733}
734
Ivo van Doorn181d6902008-02-05 16:42:23 -0500735int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
736{
737 struct data_queue *queue;
738 int status;
739
Ivo van Doorn181d6902008-02-05 16:42:23 -0500740 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
741 if (status)
742 goto exit;
743
744 tx_queue_for_each(rt2x00dev, queue) {
745 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
746 if (status)
747 goto exit;
748 }
749
750 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
751 if (status)
752 goto exit;
753
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200754 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
755 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
756 rt2x00dev->ops->atim);
757 if (status)
758 goto exit;
759 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500760
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200761 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500762 if (status)
763 goto exit;
764
765 return 0;
766
767exit:
768 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
769
770 rt2x00queue_uninitialize(rt2x00dev);
771
772 return status;
773}
774
775void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
776{
777 struct data_queue *queue;
778
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200779 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200780
Ivo van Doorn181d6902008-02-05 16:42:23 -0500781 queue_for_each(rt2x00dev, queue) {
782 kfree(queue->entries);
783 queue->entries = NULL;
784 }
785}
786
Ivo van Doorn8f539272008-02-10 22:51:41 +0100787static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
788 struct data_queue *queue, enum data_queue_qid qid)
789{
790 spin_lock_init(&queue->lock);
791
792 queue->rt2x00dev = rt2x00dev;
793 queue->qid = qid;
Ivo van Doorn2af0a572008-08-29 21:05:45 +0200794 queue->txop = 0;
Ivo van Doorn8f539272008-02-10 22:51:41 +0100795 queue->aifs = 2;
796 queue->cw_min = 5;
797 queue->cw_max = 10;
798}
799
Ivo van Doorn181d6902008-02-05 16:42:23 -0500800int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
801{
802 struct data_queue *queue;
803 enum data_queue_qid qid;
804 unsigned int req_atim =
805 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
806
807 /*
808 * We need the following queues:
809 * RX: 1
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200810 * TX: ops->tx_queues
Ivo van Doorn181d6902008-02-05 16:42:23 -0500811 * Beacon: 1
812 * Atim: 1 (if required)
813 */
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200814 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500815
816 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
817 if (!queue) {
818 ERROR(rt2x00dev, "Queue allocation failed.\n");
819 return -ENOMEM;
820 }
821
822 /*
823 * Initialize pointers
824 */
825 rt2x00dev->rx = queue;
826 rt2x00dev->tx = &queue[1];
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200827 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
Ivo van Doorn181d6902008-02-05 16:42:23 -0500828
829 /*
830 * Initialize queue parameters.
831 * RX: qid = QID_RX
832 * TX: qid = QID_AC_BE + index
833 * TX: cw_min: 2^5 = 32.
834 * TX: cw_max: 2^10 = 1024.
Ivo van Doorn565a0192008-06-03 20:29:05 +0200835 * BCN: qid = QID_BEACON
836 * ATIM: qid = QID_ATIM
Ivo van Doorn181d6902008-02-05 16:42:23 -0500837 */
Ivo van Doorn8f539272008-02-10 22:51:41 +0100838 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
839
Ivo van Doorn181d6902008-02-05 16:42:23 -0500840 qid = QID_AC_BE;
Ivo van Doorn8f539272008-02-10 22:51:41 +0100841 tx_queue_for_each(rt2x00dev, queue)
842 rt2x00queue_init(rt2x00dev, queue, qid++);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500843
Ivo van Doorn565a0192008-06-03 20:29:05 +0200844 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500845 if (req_atim)
Ivo van Doorn565a0192008-06-03 20:29:05 +0200846 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500847
848 return 0;
849}
850
851void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
852{
853 kfree(rt2x00dev->rx);
854 rt2x00dev->rx = NULL;
855 rt2x00dev->tx = NULL;
856 rt2x00dev->bcn = NULL;
857}