blob: 1bd1a952e42c3d6c5eaf376a210a30e7056abb40 [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00
23 Abstract: rt2x00 queue datastructures and routines
24 */
25
26#ifndef RT2X00QUEUE_H
27#define RT2X00QUEUE_H
28
29#include <linux/prefetch.h>
30
31/**
32 * DOC: Entrie frame size
33 *
34 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
35 * for USB devices this restriction does not apply, but the value of
36 * 2432 makes sense since it is big enough to contain the maximum fragment
37 * size according to the ieee802.11 specs.
38 */
39#define DATA_FRAME_SIZE 2432
40#define MGMT_FRAME_SIZE 256
41
42/**
43 * DOC: Number of entries per queue
44 *
Ivo van Doornf5299322008-06-16 19:57:40 +020045 * Under normal load without fragmentation 12 entries are sufficient
46 * without the queue being filled up to the maximum. When using fragmentation
47 * and the queue threshold code we need to add some additional margins to
48 * make sure the queue will never (or only under extreme load) fill up
49 * completely.
50 * Since we don't use preallocated DMA having a large number of queue entries
51 * will have only minimal impact on the memory requirements for the queue.
Ivo van Doorn181d6902008-02-05 16:42:23 -050052 */
Ivo van Doornf5299322008-06-16 19:57:40 +020053#define RX_ENTRIES 24
54#define TX_ENTRIES 24
Ivo van Doorn181d6902008-02-05 16:42:23 -050055#define BEACON_ENTRIES 1
Ivo van Doornf5299322008-06-16 19:57:40 +020056#define ATIM_ENTRIES 8
Ivo van Doorn181d6902008-02-05 16:42:23 -050057
58/**
59 * enum data_queue_qid: Queue identification
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020060 *
61 * @QID_AC_BE: AC BE queue
62 * @QID_AC_BK: AC BK queue
63 * @QID_AC_VI: AC VI queue
64 * @QID_AC_VO: AC VO queue
65 * @QID_HCCA: HCCA queue
66 * @QID_MGMT: MGMT queue (prio queue)
67 * @QID_RX: RX queue
68 * @QID_OTHER: None of the above (don't use, only present for completeness)
69 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
70 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
Ivo van Doorn181d6902008-02-05 16:42:23 -050071 */
72enum data_queue_qid {
73 QID_AC_BE = 0,
74 QID_AC_BK = 1,
75 QID_AC_VI = 2,
76 QID_AC_VO = 3,
77 QID_HCCA = 4,
78 QID_MGMT = 13,
79 QID_RX = 14,
80 QID_OTHER = 15,
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020081 QID_BEACON,
82 QID_ATIM,
Ivo van Doorn181d6902008-02-05 16:42:23 -050083};
84
85/**
Ivo van Doornbaf26a72008-02-17 17:32:08 +010086 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
87 *
Ivo van Doornd74f5ba2008-06-16 19:56:54 +020088 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
89 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020090 * @FRAME_DESC_IV_STRIPPED: Frame contained a IV/EIV provided by
91 * mac80211 but was stripped for processing by the driver.
Ivo van Doornbaf26a72008-02-17 17:32:08 +010092 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020093enum skb_frame_desc_flags {
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020094 SKBDESC_DMA_MAPPED_RX = 1 << 0,
95 SKBDESC_DMA_MAPPED_TX = 1 << 1,
96 FRAME_DESC_IV_STRIPPED = 1 << 2,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020097};
Ivo van Doornbaf26a72008-02-17 17:32:08 +010098
99/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500100 * struct skb_frame_desc: Descriptor information for the skb buffer
101 *
Johannes Berge039fa42008-05-15 12:55:29 +0200102 * This structure is placed over the driver_data array, this means that
103 * this structure should not exceed the size of that array (40 bytes).
Ivo van Doorn181d6902008-02-05 16:42:23 -0500104 *
Ivo van Doornbaf26a72008-02-17 17:32:08 +0100105 * @flags: Frame flags, see &enum skb_frame_desc_flags.
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200106 * @desc_len: Length of the frame descriptor.
Johannes Berge6a98542008-10-21 12:40:02 +0200107 * @tx_rate_idx: the index of the TX rate, used for TX status reporting
108 * @tx_rate_flags: the TX rate flags, used for TX status reporting
Ivo van Doorn181d6902008-02-05 16:42:23 -0500109 * @desc: Pointer to descriptor part of the frame.
110 * Note that this pointer could point to something outside
111 * of the scope of the skb->data pointer.
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100112 * @iv: IV/EIV data used during encryption/decryption.
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200113 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500114 * @entry: The entry to which this sk buffer belongs.
115 */
116struct skb_frame_desc {
Johannes Berge6a98542008-10-21 12:40:02 +0200117 u8 flags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500118
Johannes Berge6a98542008-10-21 12:40:02 +0200119 u8 desc_len;
120 u8 tx_rate_idx;
121 u8 tx_rate_flags;
122
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200123 void *desc;
124
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100125 __le32 iv[2];
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200126
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200127 dma_addr_t skb_dma;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500128
Ivo van Doorn181d6902008-02-05 16:42:23 -0500129 struct queue_entry *entry;
130};
131
Johannes Berge039fa42008-05-15 12:55:29 +0200132/**
133 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
134 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
135 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500136static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
137{
Johannes Berge039fa42008-05-15 12:55:29 +0200138 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
139 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
140 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500141}
142
143/**
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100144 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
145 *
Ivo van Doorn6c6aa3c2008-08-29 21:07:16 +0200146 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
147 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100148 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
Ivo van Doorn74415ed2008-12-02 22:50:33 +0100149 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
150 * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100151 */
152enum rxdone_entry_desc_flags {
153 RXDONE_SIGNAL_PLCP = 1 << 0,
Ivo van Doorn6c6aa3c2008-08-29 21:07:16 +0200154 RXDONE_SIGNAL_BITRATE = 1 << 1,
155 RXDONE_MY_BSS = 1 << 2,
Ivo van Doorn74415ed2008-12-02 22:50:33 +0100156 RXDONE_CRYPTO_IV = 1 << 3,
157 RXDONE_CRYPTO_ICV = 1 << 4,
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100158};
159
160/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500161 * struct rxdone_entry_desc: RX Entry descriptor
162 *
163 * Summary of information that has been read from the RX frame descriptor.
164 *
Ivo van Doornae73e582008-07-04 16:14:59 +0200165 * @timestamp: RX Timestamp
Ivo van Doorn181d6902008-02-05 16:42:23 -0500166 * @signal: Signal of the received frame.
167 * @rssi: RSSI of the received frame.
Ivo van Doorn2bdb35c2008-12-20 10:59:29 +0100168 * @noise: Measured noise during frame reception.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500169 * @size: Data size of the received frame.
170 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100171 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200172 * @cipher: Cipher type used during decryption.
173 * @cipher_status: Decryption status.
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100174 * @iv: IV/EIV data used during decryption.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200175 * @icv: ICV data used during decryption.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500176 */
177struct rxdone_entry_desc {
Ivo van Doornae73e582008-07-04 16:14:59 +0200178 u64 timestamp;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500179 int signal;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500180 int rssi;
Ivo van Doorn2bdb35c2008-12-20 10:59:29 +0100181 int noise;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500182 int size;
183 int flags;
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100184 int dev_flags;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200185 u8 cipher;
186 u8 cipher_status;
187
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100188 __le32 iv[2];
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200189 __le32 icv;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500190};
191
192/**
Ivo van Doornfb55f4d12008-05-10 13:42:06 +0200193 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
194 *
195 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
196 * @TXDONE_SUCCESS: Frame was successfully send
197 * @TXDONE_FAILURE: Frame was not successfully send
198 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
199 * frame transmission failed due to excessive retries.
200 */
201enum txdone_entry_desc_flags {
Jochen Friedrichf126cba2008-08-15 14:47:46 +0200202 TXDONE_UNKNOWN,
203 TXDONE_SUCCESS,
204 TXDONE_FAILURE,
205 TXDONE_EXCESSIVE_RETRY,
Ivo van Doornfb55f4d12008-05-10 13:42:06 +0200206};
207
208/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500209 * struct txdone_entry_desc: TX done entry descriptor
210 *
211 * Summary of information that has been read from the TX frame descriptor
212 * after the device is done with transmission.
213 *
Ivo van Doornfb55f4d12008-05-10 13:42:06 +0200214 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
Ivo van Doorn181d6902008-02-05 16:42:23 -0500215 * @retry: Retry count.
216 */
217struct txdone_entry_desc {
Ivo van Doornfb55f4d12008-05-10 13:42:06 +0200218 unsigned long flags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500219 int retry;
220};
221
222/**
223 * enum txentry_desc_flags: Status flags for TX entry descriptor
224 *
225 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200226 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
Ivo van Doorn5adf6d62008-07-20 18:03:38 +0200227 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200228 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500229 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
230 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
231 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
232 * @ENTRY_TXD_ACK: An ACK is required for this frame.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200233 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200234 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
235 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
236 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
237 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500238 */
239enum txentry_desc_flags {
240 ENTRY_TXD_RTS_FRAME,
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200241 ENTRY_TXD_CTS_FRAME,
Ivo van Doorn5adf6d62008-07-20 18:03:38 +0200242 ENTRY_TXD_GENERATE_SEQ,
Ivo van Doorn61486e02008-05-10 13:42:31 +0200243 ENTRY_TXD_FIRST_FRAGMENT,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500244 ENTRY_TXD_MORE_FRAG,
245 ENTRY_TXD_REQ_TIMESTAMP,
246 ENTRY_TXD_BURST,
247 ENTRY_TXD_ACK,
Ivo van Doorn61486e02008-05-10 13:42:31 +0200248 ENTRY_TXD_RETRY_MODE,
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200249 ENTRY_TXD_ENCRYPT,
250 ENTRY_TXD_ENCRYPT_PAIRWISE,
251 ENTRY_TXD_ENCRYPT_IV,
252 ENTRY_TXD_ENCRYPT_MMIC,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500253};
254
255/**
256 * struct txentry_desc: TX Entry descriptor
257 *
258 * Summary of information for the frame descriptor before sending a TX frame.
259 *
260 * @flags: Descriptor flags (See &enum queue_entry_flags).
261 * @queue: Queue identification (See &enum data_queue_qid).
262 * @length_high: PLCP length high word.
263 * @length_low: PLCP length low word.
264 * @signal: PLCP signal.
265 * @service: PLCP service.
Ivo van Doorn076f9582008-12-20 10:59:02 +0100266 * @rate_mode: Rate mode (See @enum rate_modulation).
Ivo van Doorn61486e02008-05-10 13:42:31 +0200267 * @retry_limit: Max number of retries.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500268 * @aifs: AIFS value.
269 * @ifs: IFS value.
270 * @cw_min: cwmin value.
271 * @cw_max: cwmax value.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200272 * @cipher: Cipher type used for encryption.
273 * @key_idx: Key index used for encryption.
274 * @iv_offset: Position where IV should be inserted by hardware.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500275 */
276struct txentry_desc {
277 unsigned long flags;
278
279 enum data_queue_qid queue;
280
281 u16 length_high;
282 u16 length_low;
283 u16 signal;
284 u16 service;
285
Ivo van Doorn076f9582008-12-20 10:59:02 +0100286 u16 rate_mode;
287
Ivo van Doorn61486e02008-05-10 13:42:31 +0200288 short retry_limit;
289 short aifs;
290 short ifs;
291 short cw_min;
292 short cw_max;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200293
294 enum cipher cipher;
295 u16 key_idx;
296 u16 iv_offset;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500297};
298
299/**
300 * enum queue_entry_flags: Status flags for queue entry
301 *
302 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
303 * As long as this bit is set, this entry may only be touched
304 * through the interface structure.
305 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
306 * transfer (either TX or RX depending on the queue). The entry should
307 * only be touched after the device has signaled it is done with it.
308 * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
309 * encryption or decryption. The entry should only be touched after
310 * the device has signaled it is done with it.
Ivo van Doornf019d512008-06-06 22:47:39 +0200311 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
312 * for the signal to start sending.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500313 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500314enum queue_entry_flags {
315 ENTRY_BCN_ASSIGNED,
316 ENTRY_OWNER_DEVICE_DATA,
317 ENTRY_OWNER_DEVICE_CRYPTO,
Ivo van Doornf019d512008-06-06 22:47:39 +0200318 ENTRY_DATA_PENDING,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500319};
320
321/**
322 * struct queue_entry: Entry inside the &struct data_queue
323 *
324 * @flags: Entry flags, see &enum queue_entry_flags.
325 * @queue: The data queue (&struct data_queue) to which this entry belongs.
326 * @skb: The buffer which is currently being transmitted (for TX queue),
327 * or used to directly recieve data in (for RX queue).
328 * @entry_idx: The entry index number.
329 * @priv_data: Private data belonging to this queue entry. The pointer
330 * points to data specific to a particular driver and queue type.
331 */
332struct queue_entry {
333 unsigned long flags;
334
335 struct data_queue *queue;
336
337 struct sk_buff *skb;
338
339 unsigned int entry_idx;
340
341 void *priv_data;
342};
343
344/**
345 * enum queue_index: Queue index type
346 *
347 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
348 * owned by the hardware then the queue is considered to be full.
349 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
350 * the hardware and for which we need to run the txdone handler. If this
351 * entry is not owned by the hardware the queue is considered to be empty.
352 * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
353 * will be completed by the hardware next.
354 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
355 * of the index array.
356 */
357enum queue_index {
358 Q_INDEX,
359 Q_INDEX_DONE,
360 Q_INDEX_CRYPTO,
361 Q_INDEX_MAX,
362};
363
364/**
365 * struct data_queue: Data queue
366 *
367 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
368 * @entries: Base address of the &struct queue_entry which are
369 * part of this queue.
370 * @qid: The queue identification, see &enum data_queue_qid.
371 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or
372 * @index_crypt needs to be changed this lock should be grabbed to prevent
373 * index corruption due to concurrency.
374 * @count: Number of frames handled in the queue.
375 * @limit: Maximum number of entries in the queue.
Ivo van Doornb8697672008-06-06 22:53:14 +0200376 * @threshold: Minimum number of free entries before queue is kicked by force.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500377 * @length: Number of frames in queue.
378 * @index: Index pointers to entry positions in the queue,
379 * use &enum queue_index to get a specific index field.
Ivo van Doorn2af0a572008-08-29 21:05:45 +0200380 * @txop: maximum burst time.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500381 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
382 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
383 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
384 * @data_size: Maximum data size for the frames in this queue.
385 * @desc_size: Hardware descriptor size for the data in this queue.
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100386 * @usb_endpoint: Device endpoint used for communication (USB only)
387 * @usb_maxpacket: Max packet size for given endpoint (USB only)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500388 */
389struct data_queue {
390 struct rt2x00_dev *rt2x00dev;
391 struct queue_entry *entries;
392
393 enum data_queue_qid qid;
394
395 spinlock_t lock;
396 unsigned int count;
397 unsigned short limit;
Ivo van Doornb8697672008-06-06 22:53:14 +0200398 unsigned short threshold;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500399 unsigned short length;
400 unsigned short index[Q_INDEX_MAX];
401
Ivo van Doorn2af0a572008-08-29 21:05:45 +0200402 unsigned short txop;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500403 unsigned short aifs;
404 unsigned short cw_min;
405 unsigned short cw_max;
406
407 unsigned short data_size;
408 unsigned short desc_size;
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100409
410 unsigned short usb_endpoint;
411 unsigned short usb_maxpacket;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500412};
413
414/**
415 * struct data_queue_desc: Data queue description
416 *
417 * The information in this structure is used by drivers
418 * to inform rt2x00lib about the creation of the data queue.
419 *
420 * @entry_num: Maximum number of entries for a queue.
421 * @data_size: Maximum data size for the frames in this queue.
422 * @desc_size: Hardware descriptor size for the data in this queue.
423 * @priv_size: Size of per-queue_entry private data.
424 */
425struct data_queue_desc {
426 unsigned short entry_num;
427 unsigned short data_size;
428 unsigned short desc_size;
429 unsigned short priv_size;
430};
431
432/**
433 * queue_end - Return pointer to the last queue (HELPER MACRO).
434 * @__dev: Pointer to &struct rt2x00_dev
435 *
436 * Using the base rx pointer and the maximum number of available queues,
437 * this macro will return the address of 1 position beyond the end of the
438 * queues array.
439 */
440#define queue_end(__dev) \
441 &(__dev)->rx[(__dev)->data_queues]
442
443/**
444 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
445 * @__dev: Pointer to &struct rt2x00_dev
446 *
447 * Using the base tx pointer and the maximum number of available TX
448 * queues, this macro will return the address of 1 position beyond
449 * the end of the TX queue array.
450 */
451#define tx_queue_end(__dev) \
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200452 &(__dev)->tx[(__dev)->ops->tx_queues]
Ivo van Doorn181d6902008-02-05 16:42:23 -0500453
454/**
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100455 * queue_next - Return pointer to next queue in list (HELPER MACRO).
456 * @__queue: Current queue for which we need the next queue
457 *
458 * Using the current queue address we take the address directly
459 * after the queue to take the next queue. Note that this macro
460 * should be used carefully since it does not protect against
461 * moving past the end of the list. (See macros &queue_end and
462 * &tx_queue_end for determining the end of the queue).
463 */
464#define queue_next(__queue) \
465 &(__queue)[1]
466
467/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500468 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
469 * @__entry: Pointer where the current queue entry will be stored in.
470 * @__start: Start queue pointer.
471 * @__end: End queue pointer.
472 *
473 * This macro will loop through all queues between &__start and &__end.
474 */
475#define queue_loop(__entry, __start, __end) \
476 for ((__entry) = (__start); \
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100477 prefetch(queue_next(__entry)), (__entry) != (__end);\
478 (__entry) = queue_next(__entry))
Ivo van Doorn181d6902008-02-05 16:42:23 -0500479
480/**
481 * queue_for_each - Loop through all queues
482 * @__dev: Pointer to &struct rt2x00_dev
483 * @__entry: Pointer where the current queue entry will be stored in.
484 *
485 * This macro will loop through all available queues.
486 */
487#define queue_for_each(__dev, __entry) \
488 queue_loop(__entry, (__dev)->rx, queue_end(__dev))
489
490/**
491 * tx_queue_for_each - Loop through the TX queues
492 * @__dev: Pointer to &struct rt2x00_dev
493 * @__entry: Pointer where the current queue entry will be stored in.
494 *
495 * This macro will loop through all TX related queues excluding
496 * the Beacon and Atim queues.
497 */
498#define tx_queue_for_each(__dev, __entry) \
499 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
500
501/**
502 * txall_queue_for_each - Loop through all TX related queues
503 * @__dev: Pointer to &struct rt2x00_dev
504 * @__entry: Pointer where the current queue entry will be stored in.
505 *
506 * This macro will loop through all TX related queues including
507 * the Beacon and Atim queues.
508 */
509#define txall_queue_for_each(__dev, __entry) \
510 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
511
512/**
513 * rt2x00queue_empty - Check if the queue is empty.
514 * @queue: Queue to check if empty.
515 */
516static inline int rt2x00queue_empty(struct data_queue *queue)
517{
518 return queue->length == 0;
519}
520
521/**
522 * rt2x00queue_full - Check if the queue is full.
523 * @queue: Queue to check if full.
524 */
525static inline int rt2x00queue_full(struct data_queue *queue)
526{
527 return queue->length == queue->limit;
528}
529
530/**
531 * rt2x00queue_free - Check the number of available entries in queue.
532 * @queue: Queue to check.
533 */
534static inline int rt2x00queue_available(struct data_queue *queue)
535{
536 return queue->limit - queue->length;
537}
538
539/**
Ivo van Doornb8697672008-06-06 22:53:14 +0200540 * rt2x00queue_threshold - Check if the queue is below threshold
541 * @queue: Queue to check.
542 */
543static inline int rt2x00queue_threshold(struct data_queue *queue)
544{
545 return rt2x00queue_available(queue) < queue->threshold;
546}
547
548/**
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200549 * _rt2x00_desc_read - Read a word from the hardware descriptor.
550 * @desc: Base descriptor address
551 * @word: Word index from where the descriptor should be read.
552 * @value: Address where the descriptor value should be written into.
553 */
554static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
555{
556 *value = desc[word];
557}
558
559/**
560 * rt2x00_desc_read - Read a word from the hardware descriptor, this
561 * function will take care of the byte ordering.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500562 * @desc: Base descriptor address
563 * @word: Word index from where the descriptor should be read.
564 * @value: Address where the descriptor value should be written into.
565 */
566static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
567{
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200568 __le32 tmp;
569 _rt2x00_desc_read(desc, word, &tmp);
570 *value = le32_to_cpu(tmp);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500571}
572
573/**
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200574 * rt2x00_desc_write - write a word to the hardware descriptor, this
575 * function will take care of the byte ordering.
576 * @desc: Base descriptor address
577 * @word: Word index from where the descriptor should be written.
578 * @value: Value that should be written into the descriptor.
579 */
580static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
581{
582 desc[word] = value;
583}
584
585/**
586 * rt2x00_desc_write - write a word to the hardware descriptor.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500587 * @desc: Base descriptor address
588 * @word: Word index from where the descriptor should be written.
589 * @value: Value that should be written into the descriptor.
590 */
591static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
592{
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200593 _rt2x00_desc_write(desc, word, cpu_to_le32(value));
Ivo van Doorn181d6902008-02-05 16:42:23 -0500594}
595
596#endif /* RT2X00QUEUE_H */