blob: 5dd9cca3c62cf72a3cf0ce48ab9772dfa6f85ef6 [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00
23 Abstract: rt2x00 queue datastructures and routines
24 */
25
26#ifndef RT2X00QUEUE_H
27#define RT2X00QUEUE_H
28
29#include <linux/prefetch.h>
30
31/**
32 * DOC: Entrie frame size
33 *
34 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
35 * for USB devices this restriction does not apply, but the value of
36 * 2432 makes sense since it is big enough to contain the maximum fragment
37 * size according to the ieee802.11 specs.
38 */
39#define DATA_FRAME_SIZE 2432
40#define MGMT_FRAME_SIZE 256
41
42/**
43 * DOC: Number of entries per queue
44 *
Ivo van Doornf5299322008-06-16 19:57:40 +020045 * Under normal load without fragmentation 12 entries are sufficient
46 * without the queue being filled up to the maximum. When using fragmentation
47 * and the queue threshold code we need to add some additional margins to
48 * make sure the queue will never (or only under extreme load) fill up
49 * completely.
50 * Since we don't use preallocated DMA having a large number of queue entries
51 * will have only minimal impact on the memory requirements for the queue.
Ivo van Doorn181d6902008-02-05 16:42:23 -050052 */
Ivo van Doornf5299322008-06-16 19:57:40 +020053#define RX_ENTRIES 24
54#define TX_ENTRIES 24
Ivo van Doorn181d6902008-02-05 16:42:23 -050055#define BEACON_ENTRIES 1
Ivo van Doornf5299322008-06-16 19:57:40 +020056#define ATIM_ENTRIES 8
Ivo van Doorn181d6902008-02-05 16:42:23 -050057
58/**
59 * enum data_queue_qid: Queue identification
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020060 *
61 * @QID_AC_BE: AC BE queue
62 * @QID_AC_BK: AC BK queue
63 * @QID_AC_VI: AC VI queue
64 * @QID_AC_VO: AC VO queue
65 * @QID_HCCA: HCCA queue
66 * @QID_MGMT: MGMT queue (prio queue)
67 * @QID_RX: RX queue
68 * @QID_OTHER: None of the above (don't use, only present for completeness)
69 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
70 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
Ivo van Doorn181d6902008-02-05 16:42:23 -050071 */
72enum data_queue_qid {
73 QID_AC_BE = 0,
74 QID_AC_BK = 1,
75 QID_AC_VI = 2,
76 QID_AC_VO = 3,
77 QID_HCCA = 4,
78 QID_MGMT = 13,
79 QID_RX = 14,
80 QID_OTHER = 15,
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020081 QID_BEACON,
82 QID_ATIM,
Ivo van Doorn181d6902008-02-05 16:42:23 -050083};
84
85/**
Ivo van Doornbaf26a72008-02-17 17:32:08 +010086 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
87 *
Ivo van Doornd74f5ba2008-06-16 19:56:54 +020088 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
89 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
Ivo van Doornbaf26a72008-02-17 17:32:08 +010090 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020091enum skb_frame_desc_flags {
92 SKBDESC_DMA_MAPPED_RX = (1 << 0),
93 SKBDESC_DMA_MAPPED_TX = (1 << 1),
94};
Ivo van Doornbaf26a72008-02-17 17:32:08 +010095
96/**
Ivo van Doorn181d6902008-02-05 16:42:23 -050097 * struct skb_frame_desc: Descriptor information for the skb buffer
98 *
Johannes Berge039fa42008-05-15 12:55:29 +020099 * This structure is placed over the driver_data array, this means that
100 * this structure should not exceed the size of that array (40 bytes).
Ivo van Doorn181d6902008-02-05 16:42:23 -0500101 *
Ivo van Doornbaf26a72008-02-17 17:32:08 +0100102 * @flags: Frame flags, see &enum skb_frame_desc_flags.
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200103 * @desc_len: Length of the frame descriptor.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500104 * @desc: Pointer to descriptor part of the frame.
105 * Note that this pointer could point to something outside
106 * of the scope of the skb->data pointer.
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200107 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500108 * @entry: The entry to which this sk buffer belongs.
109 */
110struct skb_frame_desc {
111 unsigned int flags;
112
Gertjan van Wingerded56d4532008-06-06 22:54:08 +0200113 unsigned int desc_len;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200114 void *desc;
115
116 dma_addr_t skb_dma;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500117
Ivo van Doorn181d6902008-02-05 16:42:23 -0500118 struct queue_entry *entry;
119};
120
Johannes Berge039fa42008-05-15 12:55:29 +0200121/**
122 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
123 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
124 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500125static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
126{
Johannes Berge039fa42008-05-15 12:55:29 +0200127 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
128 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
129 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500130}
131
132/**
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100133 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
134 *
135 * @RXDONE_SIGNAL_PLCP: Does the signal field contain the plcp value,
136 * or does it contain the bitrate itself.
137 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
138 */
139enum rxdone_entry_desc_flags {
140 RXDONE_SIGNAL_PLCP = 1 << 0,
141 RXDONE_MY_BSS = 1 << 1,
142};
143
144/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500145 * struct rxdone_entry_desc: RX Entry descriptor
146 *
147 * Summary of information that has been read from the RX frame descriptor.
148 *
149 * @signal: Signal of the received frame.
150 * @rssi: RSSI of the received frame.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500151 * @size: Data size of the received frame.
152 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100153 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
154
Ivo van Doorn181d6902008-02-05 16:42:23 -0500155 */
156struct rxdone_entry_desc {
157 int signal;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500158 int rssi;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500159 int size;
160 int flags;
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100161 int dev_flags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500162};
163
164/**
Ivo van Doornfb55f4d12008-05-10 13:42:06 +0200165 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
166 *
167 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
168 * @TXDONE_SUCCESS: Frame was successfully send
169 * @TXDONE_FAILURE: Frame was not successfully send
170 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
171 * frame transmission failed due to excessive retries.
172 */
173enum txdone_entry_desc_flags {
174 TXDONE_UNKNOWN = 1 << 0,
175 TXDONE_SUCCESS = 1 << 1,
176 TXDONE_FAILURE = 1 << 2,
177 TXDONE_EXCESSIVE_RETRY = 1 << 3,
178};
179
180/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500181 * struct txdone_entry_desc: TX done entry descriptor
182 *
183 * Summary of information that has been read from the TX frame descriptor
184 * after the device is done with transmission.
185 *
Ivo van Doornfb55f4d12008-05-10 13:42:06 +0200186 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
Ivo van Doorn181d6902008-02-05 16:42:23 -0500187 * @retry: Retry count.
188 */
189struct txdone_entry_desc {
Ivo van Doornfb55f4d12008-05-10 13:42:06 +0200190 unsigned long flags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500191 int retry;
192};
193
194/**
195 * enum txentry_desc_flags: Status flags for TX entry descriptor
196 *
197 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200198 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500199 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200200 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500201 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
202 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
203 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
204 * @ENTRY_TXD_ACK: An ACK is required for this frame.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200205 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500206 */
207enum txentry_desc_flags {
208 ENTRY_TXD_RTS_FRAME,
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200209 ENTRY_TXD_CTS_FRAME,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500210 ENTRY_TXD_OFDM_RATE,
Ivo van Doorn61486e02008-05-10 13:42:31 +0200211 ENTRY_TXD_FIRST_FRAGMENT,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500212 ENTRY_TXD_MORE_FRAG,
213 ENTRY_TXD_REQ_TIMESTAMP,
214 ENTRY_TXD_BURST,
215 ENTRY_TXD_ACK,
Ivo van Doorn61486e02008-05-10 13:42:31 +0200216 ENTRY_TXD_RETRY_MODE,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500217};
218
219/**
220 * struct txentry_desc: TX Entry descriptor
221 *
222 * Summary of information for the frame descriptor before sending a TX frame.
223 *
224 * @flags: Descriptor flags (See &enum queue_entry_flags).
225 * @queue: Queue identification (See &enum data_queue_qid).
226 * @length_high: PLCP length high word.
227 * @length_low: PLCP length low word.
228 * @signal: PLCP signal.
229 * @service: PLCP service.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200230 * @retry_limit: Max number of retries.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500231 * @aifs: AIFS value.
232 * @ifs: IFS value.
233 * @cw_min: cwmin value.
234 * @cw_max: cwmax value.
235 */
236struct txentry_desc {
237 unsigned long flags;
238
239 enum data_queue_qid queue;
240
241 u16 length_high;
242 u16 length_low;
243 u16 signal;
244 u16 service;
245
Ivo van Doorn61486e02008-05-10 13:42:31 +0200246 short retry_limit;
247 short aifs;
248 short ifs;
249 short cw_min;
250 short cw_max;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500251};
252
253/**
254 * enum queue_entry_flags: Status flags for queue entry
255 *
256 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
257 * As long as this bit is set, this entry may only be touched
258 * through the interface structure.
259 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
260 * transfer (either TX or RX depending on the queue). The entry should
261 * only be touched after the device has signaled it is done with it.
262 * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
263 * encryption or decryption. The entry should only be touched after
264 * the device has signaled it is done with it.
Ivo van Doornf019d512008-06-06 22:47:39 +0200265 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
266 * for the signal to start sending.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500267 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500268enum queue_entry_flags {
269 ENTRY_BCN_ASSIGNED,
270 ENTRY_OWNER_DEVICE_DATA,
271 ENTRY_OWNER_DEVICE_CRYPTO,
Ivo van Doornf019d512008-06-06 22:47:39 +0200272 ENTRY_DATA_PENDING,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500273};
274
275/**
276 * struct queue_entry: Entry inside the &struct data_queue
277 *
278 * @flags: Entry flags, see &enum queue_entry_flags.
279 * @queue: The data queue (&struct data_queue) to which this entry belongs.
280 * @skb: The buffer which is currently being transmitted (for TX queue),
281 * or used to directly recieve data in (for RX queue).
282 * @entry_idx: The entry index number.
283 * @priv_data: Private data belonging to this queue entry. The pointer
284 * points to data specific to a particular driver and queue type.
285 */
286struct queue_entry {
287 unsigned long flags;
288
289 struct data_queue *queue;
290
291 struct sk_buff *skb;
292
293 unsigned int entry_idx;
294
295 void *priv_data;
296};
297
298/**
299 * enum queue_index: Queue index type
300 *
301 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
302 * owned by the hardware then the queue is considered to be full.
303 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
304 * the hardware and for which we need to run the txdone handler. If this
305 * entry is not owned by the hardware the queue is considered to be empty.
306 * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
307 * will be completed by the hardware next.
308 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
309 * of the index array.
310 */
311enum queue_index {
312 Q_INDEX,
313 Q_INDEX_DONE,
314 Q_INDEX_CRYPTO,
315 Q_INDEX_MAX,
316};
317
318/**
319 * struct data_queue: Data queue
320 *
321 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
322 * @entries: Base address of the &struct queue_entry which are
323 * part of this queue.
324 * @qid: The queue identification, see &enum data_queue_qid.
325 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or
326 * @index_crypt needs to be changed this lock should be grabbed to prevent
327 * index corruption due to concurrency.
328 * @count: Number of frames handled in the queue.
329 * @limit: Maximum number of entries in the queue.
Ivo van Doornb8697672008-06-06 22:53:14 +0200330 * @threshold: Minimum number of free entries before queue is kicked by force.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500331 * @length: Number of frames in queue.
332 * @index: Index pointers to entry positions in the queue,
333 * use &enum queue_index to get a specific index field.
334 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
335 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
336 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
337 * @data_size: Maximum data size for the frames in this queue.
338 * @desc_size: Hardware descriptor size for the data in this queue.
339 */
340struct data_queue {
341 struct rt2x00_dev *rt2x00dev;
342 struct queue_entry *entries;
343
344 enum data_queue_qid qid;
345
346 spinlock_t lock;
347 unsigned int count;
348 unsigned short limit;
Ivo van Doornb8697672008-06-06 22:53:14 +0200349 unsigned short threshold;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500350 unsigned short length;
351 unsigned short index[Q_INDEX_MAX];
352
353 unsigned short aifs;
354 unsigned short cw_min;
355 unsigned short cw_max;
356
357 unsigned short data_size;
358 unsigned short desc_size;
359};
360
361/**
362 * struct data_queue_desc: Data queue description
363 *
364 * The information in this structure is used by drivers
365 * to inform rt2x00lib about the creation of the data queue.
366 *
367 * @entry_num: Maximum number of entries for a queue.
368 * @data_size: Maximum data size for the frames in this queue.
369 * @desc_size: Hardware descriptor size for the data in this queue.
370 * @priv_size: Size of per-queue_entry private data.
371 */
372struct data_queue_desc {
373 unsigned short entry_num;
374 unsigned short data_size;
375 unsigned short desc_size;
376 unsigned short priv_size;
377};
378
379/**
380 * queue_end - Return pointer to the last queue (HELPER MACRO).
381 * @__dev: Pointer to &struct rt2x00_dev
382 *
383 * Using the base rx pointer and the maximum number of available queues,
384 * this macro will return the address of 1 position beyond the end of the
385 * queues array.
386 */
387#define queue_end(__dev) \
388 &(__dev)->rx[(__dev)->data_queues]
389
390/**
391 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
392 * @__dev: Pointer to &struct rt2x00_dev
393 *
394 * Using the base tx pointer and the maximum number of available TX
395 * queues, this macro will return the address of 1 position beyond
396 * the end of the TX queue array.
397 */
398#define tx_queue_end(__dev) \
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200399 &(__dev)->tx[(__dev)->ops->tx_queues]
Ivo van Doorn181d6902008-02-05 16:42:23 -0500400
401/**
402 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
403 * @__entry: Pointer where the current queue entry will be stored in.
404 * @__start: Start queue pointer.
405 * @__end: End queue pointer.
406 *
407 * This macro will loop through all queues between &__start and &__end.
408 */
409#define queue_loop(__entry, __start, __end) \
410 for ((__entry) = (__start); \
411 prefetch(&(__entry)[1]), (__entry) != (__end); \
412 (__entry) = &(__entry)[1])
413
414/**
415 * queue_for_each - Loop through all queues
416 * @__dev: Pointer to &struct rt2x00_dev
417 * @__entry: Pointer where the current queue entry will be stored in.
418 *
419 * This macro will loop through all available queues.
420 */
421#define queue_for_each(__dev, __entry) \
422 queue_loop(__entry, (__dev)->rx, queue_end(__dev))
423
424/**
425 * tx_queue_for_each - Loop through the TX queues
426 * @__dev: Pointer to &struct rt2x00_dev
427 * @__entry: Pointer where the current queue entry will be stored in.
428 *
429 * This macro will loop through all TX related queues excluding
430 * the Beacon and Atim queues.
431 */
432#define tx_queue_for_each(__dev, __entry) \
433 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
434
435/**
436 * txall_queue_for_each - Loop through all TX related queues
437 * @__dev: Pointer to &struct rt2x00_dev
438 * @__entry: Pointer where the current queue entry will be stored in.
439 *
440 * This macro will loop through all TX related queues including
441 * the Beacon and Atim queues.
442 */
443#define txall_queue_for_each(__dev, __entry) \
444 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
445
446/**
447 * rt2x00queue_empty - Check if the queue is empty.
448 * @queue: Queue to check if empty.
449 */
450static inline int rt2x00queue_empty(struct data_queue *queue)
451{
452 return queue->length == 0;
453}
454
455/**
456 * rt2x00queue_full - Check if the queue is full.
457 * @queue: Queue to check if full.
458 */
459static inline int rt2x00queue_full(struct data_queue *queue)
460{
461 return queue->length == queue->limit;
462}
463
464/**
465 * rt2x00queue_free - Check the number of available entries in queue.
466 * @queue: Queue to check.
467 */
468static inline int rt2x00queue_available(struct data_queue *queue)
469{
470 return queue->limit - queue->length;
471}
472
473/**
Ivo van Doornb8697672008-06-06 22:53:14 +0200474 * rt2x00queue_threshold - Check if the queue is below threshold
475 * @queue: Queue to check.
476 */
477static inline int rt2x00queue_threshold(struct data_queue *queue)
478{
479 return rt2x00queue_available(queue) < queue->threshold;
480}
481
482/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500483 * rt2x00_desc_read - Read a word from the hardware descriptor.
484 * @desc: Base descriptor address
485 * @word: Word index from where the descriptor should be read.
486 * @value: Address where the descriptor value should be written into.
487 */
488static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
489{
490 *value = le32_to_cpu(desc[word]);
491}
492
493/**
494 * rt2x00_desc_write - wrote a word to the hardware descriptor.
495 * @desc: Base descriptor address
496 * @word: Word index from where the descriptor should be written.
497 * @value: Value that should be written into the descriptor.
498 */
499static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
500{
501 desc[word] = cpu_to_le32(value);
502}
503
504#endif /* RT2X00QUEUE_H */