blob: 0e38a911195d78bb5af8d6728573b74ef5a523c3 [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
Ivo van Doorn7e613e12010-08-06 20:45:38 +02002 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
Ivo van Doorn181d6902008-02-05 16:42:23 -05003 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00
23 Abstract: rt2x00 queue datastructures and routines
24 */
25
26#ifndef RT2X00QUEUE_H
27#define RT2X00QUEUE_H
28
29#include <linux/prefetch.h>
30
31/**
Luis Correia49513482009-07-17 21:39:19 +020032 * DOC: Entry frame size
Ivo van Doorn181d6902008-02-05 16:42:23 -050033 *
34 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
35 * for USB devices this restriction does not apply, but the value of
36 * 2432 makes sense since it is big enough to contain the maximum fragment
37 * size according to the ieee802.11 specs.
Ivo van Doorn35f00cf2009-04-26 16:09:32 +020038 * The aggregation size depends on support from the driver, but should
39 * be something around 3840 bytes.
Ivo van Doorn181d6902008-02-05 16:42:23 -050040 */
Ivo van Doorn35f00cf2009-04-26 16:09:32 +020041#define DATA_FRAME_SIZE 2432
42#define MGMT_FRAME_SIZE 256
43#define AGGREGATION_SIZE 3840
Ivo van Doorn181d6902008-02-05 16:42:23 -050044
45/**
46 * DOC: Number of entries per queue
47 *
Luis Correia49513482009-07-17 21:39:19 +020048 * Under normal load without fragmentation, 12 entries are sufficient
Ivo van Doornf5299322008-06-16 19:57:40 +020049 * without the queue being filled up to the maximum. When using fragmentation
Luis Correia49513482009-07-17 21:39:19 +020050 * and the queue threshold code, we need to add some additional margins to
Ivo van Doornf5299322008-06-16 19:57:40 +020051 * make sure the queue will never (or only under extreme load) fill up
52 * completely.
Luis Correia49513482009-07-17 21:39:19 +020053 * Since we don't use preallocated DMA, having a large number of queue entries
54 * will have minimal impact on the memory requirements for the queue.
Ivo van Doorn181d6902008-02-05 16:42:23 -050055 */
Ivo van Doornf5299322008-06-16 19:57:40 +020056#define RX_ENTRIES 24
57#define TX_ENTRIES 24
Ivo van Doorn181d6902008-02-05 16:42:23 -050058#define BEACON_ENTRIES 1
Ivo van Doornf5299322008-06-16 19:57:40 +020059#define ATIM_ENTRIES 8
Ivo van Doorn181d6902008-02-05 16:42:23 -050060
61/**
62 * enum data_queue_qid: Queue identification
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020063 *
64 * @QID_AC_BE: AC BE queue
65 * @QID_AC_BK: AC BK queue
66 * @QID_AC_VI: AC VI queue
67 * @QID_AC_VO: AC VO queue
68 * @QID_HCCA: HCCA queue
69 * @QID_MGMT: MGMT queue (prio queue)
70 * @QID_RX: RX queue
71 * @QID_OTHER: None of the above (don't use, only present for completeness)
72 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
73 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
Ivo van Doorn181d6902008-02-05 16:42:23 -050074 */
75enum data_queue_qid {
76 QID_AC_BE = 0,
77 QID_AC_BK = 1,
78 QID_AC_VI = 2,
79 QID_AC_VO = 3,
80 QID_HCCA = 4,
81 QID_MGMT = 13,
82 QID_RX = 14,
83 QID_OTHER = 15,
Ivo van Doorne58c6ac2008-04-21 19:00:47 +020084 QID_BEACON,
85 QID_ATIM,
Ivo van Doorn181d6902008-02-05 16:42:23 -050086};
87
88/**
Ivo van Doornbaf26a72008-02-17 17:32:08 +010089 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
90 *
Ivo van Doornd74f5ba2008-06-16 19:56:54 +020091 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
Ivo van Doorn9f166172009-04-26 16:08:50 +020093 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020094 * mac80211 but was stripped for processing by the driver.
Johannes Berg7351c6b2009-11-19 01:08:30 +010095 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
96 * don't try to pass it back.
Gertjan van Wingerdefd76f142010-05-11 23:51:43 +020097 * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
98 * skb, instead of in the desc field.
Ivo van Doornbaf26a72008-02-17 17:32:08 +010099 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200100enum skb_frame_desc_flags {
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200101 SKBDESC_DMA_MAPPED_RX = 1 << 0,
102 SKBDESC_DMA_MAPPED_TX = 1 << 1,
Ivo van Doorn9f166172009-04-26 16:08:50 +0200103 SKBDESC_IV_STRIPPED = 1 << 2,
Gertjan van Wingerde354e39d2009-12-04 23:47:02 +0100104 SKBDESC_NOT_MAC80211 = 1 << 3,
Gertjan van Wingerdefd76f142010-05-11 23:51:43 +0200105 SKBDESC_DESC_IN_SKB = 1 << 4,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200106};
Ivo van Doornbaf26a72008-02-17 17:32:08 +0100107
108/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500109 * struct skb_frame_desc: Descriptor information for the skb buffer
110 *
Johannes Berge039fa42008-05-15 12:55:29 +0200111 * This structure is placed over the driver_data array, this means that
112 * this structure should not exceed the size of that array (40 bytes).
Ivo van Doorn181d6902008-02-05 16:42:23 -0500113 *
Ivo van Doornbaf26a72008-02-17 17:32:08 +0100114 * @flags: Frame flags, see &enum skb_frame_desc_flags.
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200115 * @desc_len: Length of the frame descriptor.
Johannes Berge6a98542008-10-21 12:40:02 +0200116 * @tx_rate_idx: the index of the TX rate, used for TX status reporting
117 * @tx_rate_flags: the TX rate flags, used for TX status reporting
Ivo van Doorn181d6902008-02-05 16:42:23 -0500118 * @desc: Pointer to descriptor part of the frame.
119 * Note that this pointer could point to something outside
120 * of the scope of the skb->data pointer.
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100121 * @iv: IV/EIV data used during encryption/decryption.
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200122 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500123 * @entry: The entry to which this sk buffer belongs.
124 */
125struct skb_frame_desc {
Johannes Berge6a98542008-10-21 12:40:02 +0200126 u8 flags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500127
Johannes Berge6a98542008-10-21 12:40:02 +0200128 u8 desc_len;
129 u8 tx_rate_idx;
130 u8 tx_rate_flags;
131
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200132 void *desc;
133
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100134 __le32 iv[2];
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200135
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200136 dma_addr_t skb_dma;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500137
Ivo van Doorn181d6902008-02-05 16:42:23 -0500138 struct queue_entry *entry;
139};
140
Johannes Berge039fa42008-05-15 12:55:29 +0200141/**
142 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
143 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
144 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500145static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
146{
Johannes Berge039fa42008-05-15 12:55:29 +0200147 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
148 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
149 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500150}
151
152/**
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100153 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
154 *
Ivo van Doorn6c6aa3c2008-08-29 21:07:16 +0200155 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
156 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200157 * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100158 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
Ivo van Doorn74415ed2008-12-02 22:50:33 +0100159 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
160 * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
Ivo van Doorn9f166172009-04-26 16:08:50 +0200161 * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100162 */
163enum rxdone_entry_desc_flags {
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200164 RXDONE_SIGNAL_PLCP = BIT(0),
165 RXDONE_SIGNAL_BITRATE = BIT(1),
166 RXDONE_SIGNAL_MCS = BIT(2),
167 RXDONE_MY_BSS = BIT(3),
168 RXDONE_CRYPTO_IV = BIT(4),
169 RXDONE_CRYPTO_ICV = BIT(5),
170 RXDONE_L2PAD = BIT(6),
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100171};
172
173/**
Ivo van Doornb30dd5c02008-12-20 10:59:55 +0100174 * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
175 * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
176 * from &rxdone_entry_desc to a signal value type.
177 */
178#define RXDONE_SIGNAL_MASK \
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200179 ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
Ivo van Doornb30dd5c02008-12-20 10:59:55 +0100180
181/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500182 * struct rxdone_entry_desc: RX Entry descriptor
183 *
184 * Summary of information that has been read from the RX frame descriptor.
185 *
Ivo van Doornae73e582008-07-04 16:14:59 +0200186 * @timestamp: RX Timestamp
Ivo van Doorn181d6902008-02-05 16:42:23 -0500187 * @signal: Signal of the received frame.
188 * @rssi: RSSI of the received frame.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500189 * @size: Data size of the received frame.
190 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100191 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200192 * @rate_mode: Rate mode (See @enum rate_modulation).
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200193 * @cipher: Cipher type used during decryption.
194 * @cipher_status: Decryption status.
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100195 * @iv: IV/EIV data used during decryption.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200196 * @icv: ICV data used during decryption.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500197 */
198struct rxdone_entry_desc {
Ivo van Doornae73e582008-07-04 16:14:59 +0200199 u64 timestamp;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500200 int signal;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500201 int rssi;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500202 int size;
203 int flags;
Ivo van Doorn19d30e02008-03-15 21:38:07 +0100204 int dev_flags;
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200205 u16 rate_mode;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200206 u8 cipher;
207 u8 cipher_status;
208
Ivo van Doorn1ce9cda2008-12-02 18:19:48 +0100209 __le32 iv[2];
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200210 __le32 icv;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500211};
212
213/**
Ivo van Doornfb55f4d2008-05-10 13:42:06 +0200214 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
215 *
Helmut Schaa46678b12010-06-14 22:08:30 +0200216 * Every txdone report has to contain the basic result of the
217 * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
218 * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
219 * conjunction with all of these flags but should only be set
220 * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
221 * in conjunction with &TXDONE_FAILURE.
222 *
Ivo van Doornfb55f4d2008-05-10 13:42:06 +0200223 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
224 * @TXDONE_SUCCESS: Frame was successfully send
Helmut Schaa46678b12010-06-14 22:08:30 +0200225 * @TXDONE_FALLBACK: Hardware used fallback rates for retries
Ivo van Doornfb55f4d2008-05-10 13:42:06 +0200226 * @TXDONE_FAILURE: Frame was not successfully send
227 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
228 * frame transmission failed due to excessive retries.
229 */
230enum txdone_entry_desc_flags {
Jochen Friedrichf126cba2008-08-15 14:47:46 +0200231 TXDONE_UNKNOWN,
232 TXDONE_SUCCESS,
Benoit PAPILLAULT92ed48e2009-08-17 18:56:10 +0200233 TXDONE_FALLBACK,
Jochen Friedrichf126cba2008-08-15 14:47:46 +0200234 TXDONE_FAILURE,
235 TXDONE_EXCESSIVE_RETRY,
Ivo van Doornfb55f4d2008-05-10 13:42:06 +0200236};
237
238/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500239 * struct txdone_entry_desc: TX done entry descriptor
240 *
241 * Summary of information that has been read from the TX frame descriptor
242 * after the device is done with transmission.
243 *
Ivo van Doornfb55f4d2008-05-10 13:42:06 +0200244 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
Ivo van Doorn181d6902008-02-05 16:42:23 -0500245 * @retry: Retry count.
246 */
247struct txdone_entry_desc {
Ivo van Doornfb55f4d2008-05-10 13:42:06 +0200248 unsigned long flags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500249 int retry;
250};
251
252/**
253 * enum txentry_desc_flags: Status flags for TX entry descriptor
254 *
255 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200256 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
Ivo van Doorn5adf6d62008-07-20 18:03:38 +0200257 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200258 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500259 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
260 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
261 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
262 * @ENTRY_TXD_ACK: An ACK is required for this frame.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200263 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200264 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
265 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
266 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
267 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200268 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
269 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
270 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
Ivo van Doorn84804cd2010-08-06 20:46:19 +0200271 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500272 */
273enum txentry_desc_flags {
274 ENTRY_TXD_RTS_FRAME,
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200275 ENTRY_TXD_CTS_FRAME,
Ivo van Doorn5adf6d62008-07-20 18:03:38 +0200276 ENTRY_TXD_GENERATE_SEQ,
Ivo van Doorn61486e02008-05-10 13:42:31 +0200277 ENTRY_TXD_FIRST_FRAGMENT,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500278 ENTRY_TXD_MORE_FRAG,
279 ENTRY_TXD_REQ_TIMESTAMP,
280 ENTRY_TXD_BURST,
281 ENTRY_TXD_ACK,
Ivo van Doorn61486e02008-05-10 13:42:31 +0200282 ENTRY_TXD_RETRY_MODE,
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200283 ENTRY_TXD_ENCRYPT,
284 ENTRY_TXD_ENCRYPT_PAIRWISE,
285 ENTRY_TXD_ENCRYPT_IV,
286 ENTRY_TXD_ENCRYPT_MMIC,
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200287 ENTRY_TXD_HT_AMPDU,
288 ENTRY_TXD_HT_BW_40,
289 ENTRY_TXD_HT_SHORT_GI,
Ivo van Doorn84804cd2010-08-06 20:46:19 +0200290 ENTRY_TXD_HT_MIMO_PS,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500291};
292
293/**
294 * struct txentry_desc: TX Entry descriptor
295 *
296 * Summary of information for the frame descriptor before sending a TX frame.
297 *
298 * @flags: Descriptor flags (See &enum queue_entry_flags).
Helmut Schaaa908a742010-08-30 21:12:24 +0200299 * @qid: Queue identification (See &enum data_queue_qid).
Gertjan van Wingerdedf624ca2010-05-03 22:43:05 +0200300 * @length: Length of the entire frame.
Ivo van Doorn9f166172009-04-26 16:08:50 +0200301 * @header_length: Length of 802.11 header.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500302 * @length_high: PLCP length high word.
303 * @length_low: PLCP length low word.
304 * @signal: PLCP signal.
305 * @service: PLCP service.
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200306 * @msc: MCS.
307 * @stbc: STBC.
308 * @ba_size: BA size.
Ivo van Doorn076f9582008-12-20 10:59:02 +0100309 * @rate_mode: Rate mode (See @enum rate_modulation).
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200310 * @mpdu_density: MDPU density.
Ivo van Doorn61486e02008-05-10 13:42:31 +0200311 * @retry_limit: Max number of retries.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500312 * @aifs: AIFS value.
313 * @ifs: IFS value.
Helmut Schaa1affa092010-05-07 11:03:08 +0200314 * @txop: IFS value for 11n capable chips.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500315 * @cw_min: cwmin value.
316 * @cw_max: cwmax value.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200317 * @cipher: Cipher type used for encryption.
318 * @key_idx: Key index used for encryption.
319 * @iv_offset: Position where IV should be inserted by hardware.
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200320 * @iv_len: Length of IV data.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500321 */
322struct txentry_desc {
323 unsigned long flags;
324
Helmut Schaaa908a742010-08-30 21:12:24 +0200325 enum data_queue_qid qid;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500326
Gertjan van Wingerdedf624ca2010-05-03 22:43:05 +0200327 u16 length;
Ivo van Doorn9f166172009-04-26 16:08:50 +0200328 u16 header_length;
Ivo van Doorn9f166172009-04-26 16:08:50 +0200329
Ivo van Doorn181d6902008-02-05 16:42:23 -0500330 u16 length_high;
331 u16 length_low;
332 u16 signal;
333 u16 service;
334
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200335 u16 mcs;
336 u16 stbc;
337 u16 ba_size;
Ivo van Doorn076f9582008-12-20 10:59:02 +0100338 u16 rate_mode;
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200339 u16 mpdu_density;
Ivo van Doorn076f9582008-12-20 10:59:02 +0100340
Ivo van Doorn61486e02008-05-10 13:42:31 +0200341 short retry_limit;
342 short aifs;
343 short ifs;
Helmut Schaa1affa092010-05-07 11:03:08 +0200344 short txop;
Ivo van Doorn61486e02008-05-10 13:42:31 +0200345 short cw_min;
346 short cw_max;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200347
348 enum cipher cipher;
349 u16 key_idx;
350 u16 iv_offset;
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200351 u16 iv_len;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500352};
353
354/**
355 * enum queue_entry_flags: Status flags for queue entry
356 *
357 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
358 * As long as this bit is set, this entry may only be touched
359 * through the interface structure.
360 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
361 * transfer (either TX or RX depending on the queue). The entry should
362 * only be touched after the device has signaled it is done with it.
Ivo van Doornf019d512008-06-06 22:47:39 +0200363 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
364 * for the signal to start sending.
Ivo van Doorn7e613e12010-08-06 20:45:38 +0200365 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
366 * while transfering the data to the hardware. No TX status report will
367 * be expected from the hardware.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500368 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500369enum queue_entry_flags {
370 ENTRY_BCN_ASSIGNED,
371 ENTRY_OWNER_DEVICE_DATA,
Ivo van Doornf019d512008-06-06 22:47:39 +0200372 ENTRY_DATA_PENDING,
Ivo van Doorn7e613e12010-08-06 20:45:38 +0200373 ENTRY_DATA_IO_FAILED
Ivo van Doorn181d6902008-02-05 16:42:23 -0500374};
375
376/**
377 * struct queue_entry: Entry inside the &struct data_queue
378 *
379 * @flags: Entry flags, see &enum queue_entry_flags.
380 * @queue: The data queue (&struct data_queue) to which this entry belongs.
381 * @skb: The buffer which is currently being transmitted (for TX queue),
382 * or used to directly recieve data in (for RX queue).
383 * @entry_idx: The entry index number.
384 * @priv_data: Private data belonging to this queue entry. The pointer
385 * points to data specific to a particular driver and queue type.
386 */
387struct queue_entry {
388 unsigned long flags;
389
390 struct data_queue *queue;
391
392 struct sk_buff *skb;
393
394 unsigned int entry_idx;
395
396 void *priv_data;
397};
398
399/**
400 * enum queue_index: Queue index type
401 *
402 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
403 * owned by the hardware then the queue is considered to be full.
404 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
405 * the hardware and for which we need to run the txdone handler. If this
406 * entry is not owned by the hardware the queue is considered to be empty.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500407 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
408 * of the index array.
409 */
410enum queue_index {
411 Q_INDEX,
412 Q_INDEX_DONE,
Ivo van Doorn181d6902008-02-05 16:42:23 -0500413 Q_INDEX_MAX,
414};
415
416/**
417 * struct data_queue: Data queue
418 *
419 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
420 * @entries: Base address of the &struct queue_entry which are
421 * part of this queue.
422 * @qid: The queue identification, see &enum data_queue_qid.
423 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or
424 * @index_crypt needs to be changed this lock should be grabbed to prevent
425 * index corruption due to concurrency.
426 * @count: Number of frames handled in the queue.
427 * @limit: Maximum number of entries in the queue.
Ivo van Doornb8697672008-06-06 22:53:14 +0200428 * @threshold: Minimum number of free entries before queue is kicked by force.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500429 * @length: Number of frames in queue.
430 * @index: Index pointers to entry positions in the queue,
431 * use &enum queue_index to get a specific index field.
Ivo van Doorn2af0a572008-08-29 21:05:45 +0200432 * @txop: maximum burst time.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500433 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
434 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
435 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
436 * @data_size: Maximum data size for the frames in this queue.
437 * @desc_size: Hardware descriptor size for the data in this queue.
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100438 * @usb_endpoint: Device endpoint used for communication (USB only)
439 * @usb_maxpacket: Max packet size for given endpoint (USB only)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500440 */
441struct data_queue {
442 struct rt2x00_dev *rt2x00dev;
443 struct queue_entry *entries;
444
445 enum data_queue_qid qid;
446
447 spinlock_t lock;
Ivo van Doornc965c742010-07-11 12:25:46 +0200448 unsigned long last_index;
449 unsigned long last_index_done;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500450 unsigned int count;
451 unsigned short limit;
Ivo van Doornb8697672008-06-06 22:53:14 +0200452 unsigned short threshold;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500453 unsigned short length;
454 unsigned short index[Q_INDEX_MAX];
455
Ivo van Doorn2af0a572008-08-29 21:05:45 +0200456 unsigned short txop;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500457 unsigned short aifs;
458 unsigned short cw_min;
459 unsigned short cw_max;
460
461 unsigned short data_size;
462 unsigned short desc_size;
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100463
464 unsigned short usb_endpoint;
465 unsigned short usb_maxpacket;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500466};
467
468/**
469 * struct data_queue_desc: Data queue description
470 *
471 * The information in this structure is used by drivers
472 * to inform rt2x00lib about the creation of the data queue.
473 *
474 * @entry_num: Maximum number of entries for a queue.
475 * @data_size: Maximum data size for the frames in this queue.
476 * @desc_size: Hardware descriptor size for the data in this queue.
477 * @priv_size: Size of per-queue_entry private data.
478 */
479struct data_queue_desc {
480 unsigned short entry_num;
481 unsigned short data_size;
482 unsigned short desc_size;
483 unsigned short priv_size;
484};
485
486/**
487 * queue_end - Return pointer to the last queue (HELPER MACRO).
488 * @__dev: Pointer to &struct rt2x00_dev
489 *
490 * Using the base rx pointer and the maximum number of available queues,
491 * this macro will return the address of 1 position beyond the end of the
492 * queues array.
493 */
494#define queue_end(__dev) \
495 &(__dev)->rx[(__dev)->data_queues]
496
497/**
498 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
499 * @__dev: Pointer to &struct rt2x00_dev
500 *
501 * Using the base tx pointer and the maximum number of available TX
502 * queues, this macro will return the address of 1 position beyond
503 * the end of the TX queue array.
504 */
505#define tx_queue_end(__dev) \
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200506 &(__dev)->tx[(__dev)->ops->tx_queues]
Ivo van Doorn181d6902008-02-05 16:42:23 -0500507
508/**
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100509 * queue_next - Return pointer to next queue in list (HELPER MACRO).
510 * @__queue: Current queue for which we need the next queue
511 *
512 * Using the current queue address we take the address directly
513 * after the queue to take the next queue. Note that this macro
514 * should be used carefully since it does not protect against
515 * moving past the end of the list. (See macros &queue_end and
516 * &tx_queue_end for determining the end of the queue).
517 */
518#define queue_next(__queue) \
519 &(__queue)[1]
520
521/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500522 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
523 * @__entry: Pointer where the current queue entry will be stored in.
524 * @__start: Start queue pointer.
525 * @__end: End queue pointer.
526 *
527 * This macro will loop through all queues between &__start and &__end.
528 */
529#define queue_loop(__entry, __start, __end) \
530 for ((__entry) = (__start); \
Ivo van Doornf1ca2162008-11-13 23:07:33 +0100531 prefetch(queue_next(__entry)), (__entry) != (__end);\
532 (__entry) = queue_next(__entry))
Ivo van Doorn181d6902008-02-05 16:42:23 -0500533
534/**
535 * queue_for_each - Loop through all queues
536 * @__dev: Pointer to &struct rt2x00_dev
537 * @__entry: Pointer where the current queue entry will be stored in.
538 *
539 * This macro will loop through all available queues.
540 */
541#define queue_for_each(__dev, __entry) \
542 queue_loop(__entry, (__dev)->rx, queue_end(__dev))
543
544/**
545 * tx_queue_for_each - Loop through the TX queues
546 * @__dev: Pointer to &struct rt2x00_dev
547 * @__entry: Pointer where the current queue entry will be stored in.
548 *
549 * This macro will loop through all TX related queues excluding
550 * the Beacon and Atim queues.
551 */
552#define tx_queue_for_each(__dev, __entry) \
553 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
554
555/**
556 * txall_queue_for_each - Loop through all TX related queues
557 * @__dev: Pointer to &struct rt2x00_dev
558 * @__entry: Pointer where the current queue entry will be stored in.
559 *
560 * This macro will loop through all TX related queues including
561 * the Beacon and Atim queues.
562 */
563#define txall_queue_for_each(__dev, __entry) \
564 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
565
566/**
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200567 * rt2x00queue_for_each_entry - Loop through all entries in the queue
568 * @queue: Pointer to @data_queue
569 * @start: &enum queue_index Pointer to start index
570 * @end: &enum queue_index Pointer to end index
571 * @fn: The function to call for each &struct queue_entry
572 *
573 * This will walk through all entries in the queue, in chronological
574 * order. This means it will start at the current @start pointer
575 * and will walk through the queue until it reaches the @end pointer.
576 */
577void rt2x00queue_for_each_entry(struct data_queue *queue,
578 enum queue_index start,
579 enum queue_index end,
580 void (*fn)(struct queue_entry *entry));
581
582/**
Ivo van Doorn181d6902008-02-05 16:42:23 -0500583 * rt2x00queue_empty - Check if the queue is empty.
584 * @queue: Queue to check if empty.
585 */
586static inline int rt2x00queue_empty(struct data_queue *queue)
587{
588 return queue->length == 0;
589}
590
591/**
592 * rt2x00queue_full - Check if the queue is full.
593 * @queue: Queue to check if full.
594 */
595static inline int rt2x00queue_full(struct data_queue *queue)
596{
597 return queue->length == queue->limit;
598}
599
600/**
601 * rt2x00queue_free - Check the number of available entries in queue.
602 * @queue: Queue to check.
603 */
604static inline int rt2x00queue_available(struct data_queue *queue)
605{
606 return queue->limit - queue->length;
607}
608
609/**
Ivo van Doornb8697672008-06-06 22:53:14 +0200610 * rt2x00queue_threshold - Check if the queue is below threshold
611 * @queue: Queue to check.
612 */
613static inline int rt2x00queue_threshold(struct data_queue *queue)
614{
615 return rt2x00queue_available(queue) < queue->threshold;
616}
617
618/**
Ivo van Doornc965c742010-07-11 12:25:46 +0200619 * rt2x00queue_timeout - Check if a timeout occured for this queue
620 * @queue: Queue to check.
621 */
622static inline int rt2x00queue_timeout(struct data_queue *queue)
623{
624 return time_after(queue->last_index, queue->last_index_done + (HZ / 10));
625}
626
627/**
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200628 * _rt2x00_desc_read - Read a word from the hardware descriptor.
629 * @desc: Base descriptor address
630 * @word: Word index from where the descriptor should be read.
631 * @value: Address where the descriptor value should be written into.
632 */
633static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
634{
635 *value = desc[word];
636}
637
638/**
639 * rt2x00_desc_read - Read a word from the hardware descriptor, this
640 * function will take care of the byte ordering.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500641 * @desc: Base descriptor address
642 * @word: Word index from where the descriptor should be read.
643 * @value: Address where the descriptor value should be written into.
644 */
645static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
646{
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200647 __le32 tmp;
648 _rt2x00_desc_read(desc, word, &tmp);
649 *value = le32_to_cpu(tmp);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500650}
651
652/**
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200653 * rt2x00_desc_write - write a word to the hardware descriptor, this
654 * function will take care of the byte ordering.
655 * @desc: Base descriptor address
656 * @word: Word index from where the descriptor should be written.
657 * @value: Value that should be written into the descriptor.
658 */
659static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
660{
661 desc[word] = value;
662}
663
664/**
665 * rt2x00_desc_write - write a word to the hardware descriptor.
Ivo van Doorn181d6902008-02-05 16:42:23 -0500666 * @desc: Base descriptor address
667 * @word: Word index from where the descriptor should be written.
668 * @value: Value that should be written into the descriptor.
669 */
670static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
671{
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200672 _rt2x00_desc_write(desc, word, cpu_to_le32(value));
Ivo van Doorn181d6902008-02-05 16:42:23 -0500673}
674
675#endif /* RT2X00QUEUE_H */