blob: bd523619e7b7f5e52128bf30999f7d7936b4e2e2 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010052 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053053static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070054 struct ath_txq *txq, struct list_head *bf_q,
55 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053056static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020057 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010058static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
Felix Fietkau56dc6332011-08-28 00:32:22 +0200132 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200152 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530153 struct ath_buf *bf;
154 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100156 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200157
Sujithe8324352009-01-16 21:38:42 +0530158 INIT_LIST_HEAD(&bf_head);
159
Felix Fietkau90fa5392010-09-20 13:45:38 +0200160 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530161 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530162
Felix Fietkau56dc6332011-08-28 00:32:22 +0200163 while ((skb = __skb_dequeue(&tid->buf_q))) {
164 fi = get_frame_info(skb);
165 bf = fi->bf;
166
167 list_add_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200168
Felix Fietkaue1566d12010-11-20 03:08:46 +0100169 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100170 if (fi->retries) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200171 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100172 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700174 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200175 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100176 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530177 }
178
179 spin_unlock_bh(&txq->axq_lock);
180}
181
182static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
183 int seqno)
184{
185 int index, cindex;
186
187 index = ATH_BA_INDEX(tid->seq_start, seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530191
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200192 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530193 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
194 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
195 }
196}
197
198static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100199 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530200{
201 int index, cindex;
202
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100203 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530204 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200205 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530206
207 if (index >= ((tid->baw_tail - tid->baw_head) &
208 (ATH_TID_MAX_BUFS - 1))) {
209 tid->baw_tail = cindex;
210 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
211 }
212}
213
214/*
215 * TODO: For frame(s) that are in the retry state, we will reuse the
216 * sequence number(s) without setting the retry bit. The
217 * alternative is to give up on these and BAR the receiver's window
218 * forward.
219 */
220static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
221 struct ath_atx_tid *tid)
222
223{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200224 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530225 struct ath_buf *bf;
226 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700227 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100228 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700229
230 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530231 INIT_LIST_HEAD(&bf_head);
232
Felix Fietkau56dc6332011-08-28 00:32:22 +0200233 while ((skb = __skb_dequeue(&tid->buf_q))) {
234 fi = get_frame_info(skb);
235 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau56dc6332011-08-28 00:32:22 +0200237 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530238
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100239 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200240 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530241
242 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700243 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530244 spin_lock(&txq->axq_lock);
245 }
246
247 tid->seq_next = tid->seq_start;
248 tid->baw_tail = tid->baw_head;
249}
250
Sujithfec247c2009-07-27 12:08:16 +0530251static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100252 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530253{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100254 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530255 struct ieee80211_hdr *hdr;
256
Sujithfec247c2009-07-27 12:08:16 +0530257 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100258 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100259 return;
Sujithe8324352009-01-16 21:38:42 +0530260
Sujithe8324352009-01-16 21:38:42 +0530261 hdr = (struct ieee80211_hdr *)skb->data;
262 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
263}
264
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200265static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
266{
267 struct ath_buf *bf = NULL;
268
269 spin_lock_bh(&sc->tx.txbuflock);
270
271 if (unlikely(list_empty(&sc->tx.txbuf))) {
272 spin_unlock_bh(&sc->tx.txbuflock);
273 return NULL;
274 }
275
276 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
277 list_del(&bf->list);
278
279 spin_unlock_bh(&sc->tx.txbuflock);
280
281 return bf;
282}
283
284static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
285{
286 spin_lock_bh(&sc->tx.txbuflock);
287 list_add_tail(&bf->list, &sc->tx.txbuf);
288 spin_unlock_bh(&sc->tx.txbuflock);
289}
290
Sujithd43f30152009-01-16 21:38:53 +0530291static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
292{
293 struct ath_buf *tbf;
294
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200295 tbf = ath_tx_get_buffer(sc);
296 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530297 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530298
299 ATH_TXBUF_RESET(tbf);
300
301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530304 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530305
306 return tbf;
307}
308
Felix Fietkaub572d032010-11-14 15:20:07 +0100309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100329 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200330 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
Sujithd43f30152009-01-16 21:38:53 +0530341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100343 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530344{
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100348 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530349 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800350 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530351 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200353 struct list_head bf_head;
354 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530355 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530356 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530357 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
358 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200359 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100360 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200361 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100362 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200363 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530364
Sujitha22be222009-03-30 15:28:36 +0530365 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530366 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530367
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800369
Felix Fietkau78c46532010-06-25 01:26:16 +0200370 memcpy(rates, tx_info->control.rates, sizeof(rates));
371
Sujith1286ec62009-01-27 13:30:37 +0530372 rcu_read_lock();
373
Ben Greear686b9cb2010-09-23 09:44:36 -0700374 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530375 if (!sta) {
376 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200377
Felix Fietkau31e79a52010-07-12 23:16:34 +0200378 INIT_LIST_HEAD(&bf_head);
379 while (bf) {
380 bf_next = bf->bf_next;
381
382 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaufce041b2011-05-19 12:20:25 +0200383 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200384 list_move_tail(&bf->list, &bf_head);
385
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100386 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
Sujith1286ec62009-01-27 13:30:37 +0530392 return;
Sujithe8324352009-01-16 21:38:42 +0530393 }
394
Sujith1286ec62009-01-27 13:30:37 +0530395 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530398
Felix Fietkaub11b1602010-07-11 12:48:44 +0200399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200405 txok = false;
406
Sujithe8324352009-01-16 21:38:42 +0530407 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530409
Sujithd43f30152009-01-16 21:38:53 +0530410 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530414 } else {
Sujithd43f30152009-01-16 21:38:53 +0530415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
Sujith2660b812009-02-09 13:27:26 +0530422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530423 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530424 }
425 }
426
Felix Fietkau56dc6332011-08-28 00:32:22 +0200427 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530428
Felix Fietkaub572d032010-11-14 15:20:07 +0100429 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530430 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200431 u16 seqno = bf->bf_state.seqno;
432
Felix Fietkauf0b82202011-01-15 14:30:15 +0100433 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530434 bf_next = bf->bf_next;
435
Felix Fietkau78c46532010-06-25 01:26:16 +0200436 skb = bf->bf_mpdu;
437 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100438 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200439
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200440 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530441 /* transmit completion, subframe is
442 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530443 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530444 } else if (!isaggr && txok) {
445 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530446 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530447 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200448 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530449 /*
450 * cleanup in progress, just fail
451 * the un-acked sub-frames
452 */
453 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200454 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
455 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
456 !an->sleeping)
457 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
458
459 clear_filter = true;
460 txpending = 1;
461 } else {
462 bf->bf_state.bf_type |= BUF_XRETRY;
463 txfail = 1;
464 sendbar = 1;
465 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530466 }
467 }
468
Felix Fietkaufce041b2011-05-19 12:20:25 +0200469 /*
470 * Make sure the last desc is reclaimed if it
471 * not a holding desc.
472 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200473 INIT_LIST_HEAD(&bf_head);
474 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
475 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530476 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530477
Felix Fietkau90fa5392010-09-20 13:45:38 +0200478 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530479 /*
480 * complete the acked-ones/xretried ones; update
481 * block-ack window
482 */
483 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200484 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530485 spin_unlock_bh(&txq->axq_lock);
486
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200488 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 rc_update = false;
491 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 }
494
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
496 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530497 } else {
Sujithd43f30152009-01-16 21:38:53 +0530498 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200499 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400500 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
501 if (bf->bf_next == NULL && bf_last->bf_stale) {
502 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530503
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400504 tbf = ath_clone_txbuf(sc, bf_last);
505 /*
506 * Update tx baw and complete the
507 * frame with failed status if we
508 * run out of tx buf.
509 */
510 if (!tbf) {
511 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200512 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400513 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400514
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 bf->bf_state.bf_type |=
516 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100517 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100518 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400519 ath_tx_complete_buf(sc, bf, txq,
520 &bf_head,
521 ts, 0, 0);
522 break;
523 }
524
525 ath9k_hw_cleartxdesc(sc->sc_ah,
526 tbf->bf_desc);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200527 fi->bf = tbf;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400528 } else {
529 /*
530 * Clear descriptor status words for
531 * software retry
532 */
533 ath9k_hw_cleartxdesc(sc->sc_ah,
534 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400535 }
Sujithe8324352009-01-16 21:38:42 +0530536 }
537
538 /*
539 * Put this buffer to the temporary pending
540 * queue to retain ordering
541 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200542 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530543 }
544
545 bf = bf_next;
546 }
547
Felix Fietkau4cee7862010-07-23 03:53:16 +0200548 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200549 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200550 if (an->sleeping)
551 ieee80211_sta_set_tim(sta);
552
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200554 if (clear_filter)
555 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200556 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600557 if (!an->sleeping)
558 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200559 spin_unlock_bh(&txq->axq_lock);
560 }
561
Sujithe8324352009-01-16 21:38:42 +0530562 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200563 ath_tx_flush_tid(sc, tid);
564
Sujithe8324352009-01-16 21:38:42 +0530565 if (tid->baw_head == tid->baw_tail) {
566 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530567 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530568 }
Sujithe8324352009-01-16 21:38:42 +0530569 }
570
Sujith1286ec62009-01-27 13:30:37 +0530571 rcu_read_unlock();
572
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530573 if (needreset)
Sujithe8324352009-01-16 21:38:42 +0530574 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530575}
576
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530577static bool ath_lookup_legacy(struct ath_buf *bf)
578{
579 struct sk_buff *skb;
580 struct ieee80211_tx_info *tx_info;
581 struct ieee80211_tx_rate *rates;
582 int i;
583
584 skb = bf->bf_mpdu;
585 tx_info = IEEE80211_SKB_CB(skb);
586 rates = tx_info->control.rates;
587
Felix Fietkau059ee092011-08-27 10:25:27 +0200588 for (i = 0; i < 4; i++) {
589 if (!rates[i].count || rates[i].idx < 0)
590 break;
591
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530592 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
593 return true;
594 }
595
596 return false;
597}
598
Sujithe8324352009-01-16 21:38:42 +0530599static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
600 struct ath_atx_tid *tid)
601{
Sujithe8324352009-01-16 21:38:42 +0530602 struct sk_buff *skb;
603 struct ieee80211_tx_info *tx_info;
604 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530605 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530606 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530607 int i;
608
Sujitha22be222009-03-30 15:28:36 +0530609 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530610 tx_info = IEEE80211_SKB_CB(skb);
611 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530612
613 /*
614 * Find the lowest frame length among the rate series that will have a
615 * 4ms transmit duration.
616 * TODO - TXOP limit needs to be considered.
617 */
618 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
619
620 for (i = 0; i < 4; i++) {
621 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100622 int modeidx;
623 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530624 legacy = 1;
625 break;
626 }
627
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200628 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100629 modeidx = MCS_HT40;
630 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200631 modeidx = MCS_HT20;
632
633 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
634 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100635
636 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530637 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530638 }
639 }
640
641 /*
642 * limit aggregate size by the minimum rate if rate selected is
643 * not a probe rate, if rate selected is a probe rate then
644 * avoid aggregation of this packet.
645 */
646 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
647 return 0;
648
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530649 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
650 aggr_limit = min((max_4ms_framelen * 3) / 8,
651 (u32)ATH_AMPDU_LIMIT_MAX);
652 else
653 aggr_limit = min(max_4ms_framelen,
654 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300657 * h/w can accept aggregates up to 16 bit lengths (65535).
658 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530659 * as zero. Ignore 65536 since we are constrained by hw.
660 */
Sujith4ef70842009-07-23 15:32:41 +0530661 if (tid->an->maxampdu)
662 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530663
664 return aggr_limit;
665}
666
667/*
Sujithd43f30152009-01-16 21:38:53 +0530668 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530669 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530670 */
671static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530672 struct ath_buf *bf, u16 frmlen,
673 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530674{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530675#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530676 struct sk_buff *skb = bf->bf_mpdu;
677 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530678 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530679 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100680 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200681 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100682 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530683
684 /* Select standard number of delimiters based on frame length alone */
685 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
686
687 /*
688 * If encryption enabled, hardware requires some more padding between
689 * subframes.
690 * TODO - this could be improved to be dependent on the rate.
691 * The hardware can keep up at lower rates, but not higher rates
692 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530693 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
694 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530695 ndelim += ATH_AGGR_ENCRYPTDELIM;
696
697 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530698 * Add delimiter when using RTS/CTS with aggregation
699 * and non enterprise AR9003 card
700 */
701 if (first_subfrm)
702 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
703
704 /*
Sujithe8324352009-01-16 21:38:42 +0530705 * Convert desired mpdu density from microeconds to bytes based
706 * on highest rate in rate series (i.e. first rate) to determine
707 * required minimum length for subframe. Take into account
708 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530709 *
Sujithe8324352009-01-16 21:38:42 +0530710 * If there is no mpdu density restriction, no further calculation
711 * is needed.
712 */
Sujith4ef70842009-07-23 15:32:41 +0530713
714 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530715 return ndelim;
716
717 rix = tx_info->control.rates[0].idx;
718 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530719 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
720 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
721
722 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530723 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530724 else
Sujith4ef70842009-07-23 15:32:41 +0530725 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530726
727 if (nsymbols == 0)
728 nsymbols = 1;
729
Felix Fietkauc6663872010-04-19 19:57:33 +0200730 streams = HT_RC_2_STREAMS(rix);
731 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530732 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
733
Sujithe8324352009-01-16 21:38:42 +0530734 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530735 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
736 ndelim = max(mindelim, ndelim);
737 }
738
739 return ndelim;
740}
741
742static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530743 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530744 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100745 struct list_head *bf_q,
746 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530747{
748#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200749 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530750 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530751 u16 aggr_limit = 0, al = 0, bpad = 0,
752 al_delta, h_baw = tid->baw_size / 2;
753 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200754 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100755 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200756 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200757 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530758
759 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200760 skb = skb_peek(&tid->buf_q);
761 fi = get_frame_info(skb);
762 bf = fi->bf;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200763 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200764
765 if (!bf_first)
766 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530767
Sujithd43f30152009-01-16 21:38:53 +0530768 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200769 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530770 status = ATH_AGGR_BAW_CLOSED;
771 break;
772 }
773
774 if (!rl) {
775 aggr_limit = ath_lookup_rate(sc, bf, tid);
776 rl = 1;
777 }
778
Sujithd43f30152009-01-16 21:38:53 +0530779 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100780 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530781
Sujithd43f30152009-01-16 21:38:53 +0530782 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530783 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
784 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530785 status = ATH_AGGR_LIMITED;
786 break;
787 }
788
Felix Fietkau0299a502010-10-21 02:47:24 +0200789 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
790 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
791 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
792 break;
793
Sujithd43f30152009-01-16 21:38:53 +0530794 /* do not exceed subframe limit */
795 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530796 status = ATH_AGGR_LIMITED;
797 break;
798 }
799
Sujithd43f30152009-01-16 21:38:53 +0530800 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530801 al += bpad + al_delta;
802
803 /*
804 * Get the delimiters needed to meet the MPDU
805 * density for this node.
806 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530807 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
808 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530809 bpad = PADBYTES(al_delta) + (ndelim << 2);
810
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530811 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530812 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400813 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530814
Sujithd43f30152009-01-16 21:38:53 +0530815 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100816 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200817 ath_tx_addto_baw(sc, tid, seqno);
Sujithd43f30152009-01-16 21:38:53 +0530818 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200819
820 __skb_unlink(skb, &tid->buf_q);
821 list_add_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530822 if (bf_prev) {
823 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400824 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
825 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530826 }
827 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530828
Felix Fietkau56dc6332011-08-28 00:32:22 +0200829 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530830
Felix Fietkau269c44b2010-11-14 15:20:06 +0100831 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530832
Sujithe8324352009-01-16 21:38:42 +0530833 return status;
834#undef PADBYTES
835}
836
837static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
838 struct ath_atx_tid *tid)
839{
Sujithd43f30152009-01-16 21:38:53 +0530840 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530841 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100842 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530843 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100844 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530845
846 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200847 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530848 return;
849
850 INIT_LIST_HEAD(&bf_q);
851
Felix Fietkau269c44b2010-11-14 15:20:06 +0100852 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530853
854 /*
Sujithd43f30152009-01-16 21:38:53 +0530855 * no frames picked up to be aggregated;
856 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530857 */
858 if (list_empty(&bf_q))
859 break;
860
861 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530862 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530863
Felix Fietkau55195412011-04-17 23:28:09 +0200864 if (tid->ac->clear_ps_filter) {
865 tid->ac->clear_ps_filter = false;
866 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
867 }
868
Sujithd43f30152009-01-16 21:38:53 +0530869 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100870 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100871 fi = get_frame_info(bf->bf_mpdu);
872
Sujithe8324352009-01-16 21:38:42 +0530873 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530874 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100875 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200876 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530877 continue;
878 }
879
Sujithd43f30152009-01-16 21:38:53 +0530880 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530881 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100882 ath_buf_set_rate(sc, bf, aggr_len);
883 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530884
Sujithd43f30152009-01-16 21:38:53 +0530885 /* anchor last desc of aggregate */
886 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530887
Felix Fietkaufce041b2011-05-19 12:20:25 +0200888 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530889 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530890
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100891 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530892 status != ATH_AGGR_BAW_CLOSED);
893}
894
Felix Fietkau231c3a12010-09-20 19:35:28 +0200895int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
896 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530897{
898 struct ath_atx_tid *txtid;
899 struct ath_node *an;
900
901 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530902 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200903
904 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
905 return -EAGAIN;
906
Sujithf83da962009-07-23 15:32:37 +0530907 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200908 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700909 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200910
Felix Fietkau2ed72222011-01-10 17:05:49 -0700911 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
912 txtid->baw_head = txtid->baw_tail = 0;
913
Felix Fietkau231c3a12010-09-20 19:35:28 +0200914 return 0;
Sujithe8324352009-01-16 21:38:42 +0530915}
916
Sujithf83da962009-07-23 15:32:37 +0530917void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530918{
919 struct ath_node *an = (struct ath_node *)sta->drv_priv;
920 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100921 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530922
923 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530924 return;
Sujithe8324352009-01-16 21:38:42 +0530925
926 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530927 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530928 return;
Sujithe8324352009-01-16 21:38:42 +0530929 }
930
Sujithe8324352009-01-16 21:38:42 +0530931 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200932 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200933
934 /*
935 * If frames are still being transmitted for this TID, they will be
936 * cleaned up during tx completion. To prevent race conditions, this
937 * TID can only be reused after all in-progress subframes have been
938 * completed.
939 */
940 if (txtid->baw_head != txtid->baw_tail)
941 txtid->state |= AGGR_CLEANUP;
942 else
943 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530944 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530945
Felix Fietkau90fa5392010-09-20 13:45:38 +0200946 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530947}
948
Felix Fietkau55195412011-04-17 23:28:09 +0200949bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
950{
951 struct ath_atx_tid *tid;
952 struct ath_atx_ac *ac;
953 struct ath_txq *txq;
954 bool buffered = false;
955 int tidno;
956
957 for (tidno = 0, tid = &an->tid[tidno];
958 tidno < WME_NUM_TID; tidno++, tid++) {
959
960 if (!tid->sched)
961 continue;
962
963 ac = tid->ac;
964 txq = ac->txq;
965
966 spin_lock_bh(&txq->axq_lock);
967
Felix Fietkau56dc6332011-08-28 00:32:22 +0200968 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +0200969 buffered = true;
970
971 tid->sched = false;
972 list_del(&tid->list);
973
974 if (ac->sched) {
975 ac->sched = false;
976 list_del(&ac->list);
977 }
978
979 spin_unlock_bh(&txq->axq_lock);
980 }
981
982 return buffered;
983}
984
985void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
986{
987 struct ath_atx_tid *tid;
988 struct ath_atx_ac *ac;
989 struct ath_txq *txq;
990 int tidno;
991
992 for (tidno = 0, tid = &an->tid[tidno];
993 tidno < WME_NUM_TID; tidno++, tid++) {
994
995 ac = tid->ac;
996 txq = ac->txq;
997
998 spin_lock_bh(&txq->axq_lock);
999 ac->clear_ps_filter = true;
1000
Felix Fietkau56dc6332011-08-28 00:32:22 +02001001 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001002 ath_tx_queue_tid(txq, tid);
1003 ath_txq_schedule(sc, txq);
1004 }
1005
1006 spin_unlock_bh(&txq->axq_lock);
1007 }
1008}
1009
Sujithe8324352009-01-16 21:38:42 +05301010void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1011{
1012 struct ath_atx_tid *txtid;
1013 struct ath_node *an;
1014
1015 an = (struct ath_node *)sta->drv_priv;
1016
1017 if (sc->sc_flags & SC_OP_TXAGGR) {
1018 txtid = ATH_AN_2_TID(an, tid);
1019 txtid->baw_size =
1020 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1021 txtid->state |= AGGR_ADDBA_COMPLETE;
1022 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1023 ath_tx_resume_tid(sc, txtid);
1024 }
1025}
1026
Sujithe8324352009-01-16 21:38:42 +05301027/********************/
1028/* Queue Management */
1029/********************/
1030
Sujithe8324352009-01-16 21:38:42 +05301031static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1032 struct ath_txq *txq)
1033{
1034 struct ath_atx_ac *ac, *ac_tmp;
1035 struct ath_atx_tid *tid, *tid_tmp;
1036
1037 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1038 list_del(&ac->list);
1039 ac->sched = false;
1040 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1041 list_del(&tid->list);
1042 tid->sched = false;
1043 ath_tid_drain(sc, txq, tid);
1044 }
1045 }
1046}
1047
1048struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1049{
Sujithcbe61d82009-02-09 13:27:12 +05301050 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001051 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301052 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001053 static const int subtype_txq_to_hwq[] = {
1054 [WME_AC_BE] = ATH_TXQ_AC_BE,
1055 [WME_AC_BK] = ATH_TXQ_AC_BK,
1056 [WME_AC_VI] = ATH_TXQ_AC_VI,
1057 [WME_AC_VO] = ATH_TXQ_AC_VO,
1058 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001059 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301060
1061 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001062 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301063 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1064 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1065 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1066 qi.tqi_physCompBuf = 0;
1067
1068 /*
1069 * Enable interrupts only for EOL and DESC conditions.
1070 * We mark tx descriptors to receive a DESC interrupt
1071 * when a tx queue gets deep; otherwise waiting for the
1072 * EOL to reap descriptors. Note that this is done to
1073 * reduce interrupt load and this only defers reaping
1074 * descriptors, never transmitting frames. Aside from
1075 * reducing interrupts this also permits more concurrency.
1076 * The only potential downside is if the tx queue backs
1077 * up in which case the top half of the kernel may backup
1078 * due to a lack of tx descriptors.
1079 *
1080 * The UAPSD queue is an exception, since we take a desc-
1081 * based intr on the EOSP frames.
1082 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001083 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1084 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1085 TXQ_FLAG_TXERRINT_ENABLE;
1086 } else {
1087 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1088 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1089 else
1090 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1091 TXQ_FLAG_TXDESCINT_ENABLE;
1092 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001093 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1094 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301095 /*
1096 * NB: don't print a message, this happens
1097 * normally on parts with too few tx queues
1098 */
1099 return NULL;
1100 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001101 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001102 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001103 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1104 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301105 return NULL;
1106 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001107 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1108 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301109
Ben Greear60f2d1d2011-01-09 23:11:52 -08001110 txq->axq_qnum = axq_qnum;
1111 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301112 txq->axq_link = NULL;
1113 INIT_LIST_HEAD(&txq->axq_q);
1114 INIT_LIST_HEAD(&txq->axq_acq);
1115 spin_lock_init(&txq->axq_lock);
1116 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001117 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001118 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001119 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001120
1121 txq->txq_headidx = txq->txq_tailidx = 0;
1122 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1123 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301124 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001125 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301126}
1127
Sujithe8324352009-01-16 21:38:42 +05301128int ath_txq_update(struct ath_softc *sc, int qnum,
1129 struct ath9k_tx_queue_info *qinfo)
1130{
Sujithcbe61d82009-02-09 13:27:12 +05301131 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301132 int error = 0;
1133 struct ath9k_tx_queue_info qi;
1134
1135 if (qnum == sc->beacon.beaconq) {
1136 /*
1137 * XXX: for beacon queue, we just save the parameter.
1138 * It will be picked up by ath_beaconq_config when
1139 * it's necessary.
1140 */
1141 sc->beacon.beacon_qi = *qinfo;
1142 return 0;
1143 }
1144
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001145 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301146
1147 ath9k_hw_get_txq_props(ah, qnum, &qi);
1148 qi.tqi_aifs = qinfo->tqi_aifs;
1149 qi.tqi_cwmin = qinfo->tqi_cwmin;
1150 qi.tqi_cwmax = qinfo->tqi_cwmax;
1151 qi.tqi_burstTime = qinfo->tqi_burstTime;
1152 qi.tqi_readyTime = qinfo->tqi_readyTime;
1153
1154 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001155 ath_err(ath9k_hw_common(sc->sc_ah),
1156 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301157 error = -EIO;
1158 } else {
1159 ath9k_hw_resettxqueue(ah, qnum);
1160 }
1161
1162 return error;
1163}
1164
1165int ath_cabq_update(struct ath_softc *sc)
1166{
1167 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001168 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301169 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301170
1171 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1172 /*
1173 * Ensure the readytime % is within the bounds.
1174 */
Sujith17d79042009-02-09 13:27:03 +05301175 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1176 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1177 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1178 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301179
Steve Brown9814f6b2011-02-07 17:10:39 -07001180 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301181 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301182 ath_txq_update(sc, qnum, &qi);
1183
1184 return 0;
1185}
1186
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001187static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1188{
1189 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1190 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1191}
1192
Felix Fietkaufce041b2011-05-19 12:20:25 +02001193static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1194 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301195 __releases(txq->axq_lock)
1196 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301197{
1198 struct ath_buf *bf, *lastbf;
1199 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001200 struct ath_tx_status ts;
1201
1202 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301203 INIT_LIST_HEAD(&bf_head);
1204
Felix Fietkaufce041b2011-05-19 12:20:25 +02001205 while (!list_empty(list)) {
1206 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301207
Felix Fietkaufce041b2011-05-19 12:20:25 +02001208 if (bf->bf_stale) {
1209 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301210
Felix Fietkaufce041b2011-05-19 12:20:25 +02001211 ath_tx_return_buffer(sc, bf);
1212 continue;
Sujithe8324352009-01-16 21:38:42 +05301213 }
1214
1215 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001216 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001217
Sujithe8324352009-01-16 21:38:42 +05301218 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001219 if (bf_is_ampdu_not_probing(bf))
1220 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301221
Felix Fietkaufce041b2011-05-19 12:20:25 +02001222 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301223 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001224 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1225 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301226 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001227 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001228 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001229 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001230}
1231
1232/*
1233 * Drain a given TX queue (could be Beacon or Data)
1234 *
1235 * This assumes output has been stopped and
1236 * we do not need to block ath_tx_tasklet.
1237 */
1238void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1239{
1240 spin_lock_bh(&txq->axq_lock);
1241 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1242 int idx = txq->txq_tailidx;
1243
1244 while (!list_empty(&txq->txq_fifo[idx])) {
1245 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1246 retry_tx);
1247
1248 INCR(idx, ATH_TXFIFO_DEPTH);
1249 }
1250 txq->txq_tailidx = idx;
1251 }
1252
1253 txq->axq_link = NULL;
1254 txq->axq_tx_inprogress = false;
1255 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001256
1257 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001258 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1259 ath_txq_drain_pending_buffers(sc, txq);
1260
1261 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301262}
1263
Felix Fietkau080e1a22010-12-05 20:17:53 +01001264bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301265{
Sujithcbe61d82009-02-09 13:27:12 +05301266 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001267 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301268 struct ath_txq *txq;
1269 int i, npend = 0;
1270
1271 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001272 return true;
Sujith043a0402009-01-16 21:38:47 +05301273
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001274 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301275
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001276 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301277 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001278 if (!ATH_TXQ_SETUP(sc, i))
1279 continue;
1280
1281 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301282 }
1283
Felix Fietkau080e1a22010-12-05 20:17:53 +01001284 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001285 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301286
1287 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001288 if (!ATH_TXQ_SETUP(sc, i))
1289 continue;
1290
1291 /*
1292 * The caller will resume queues with ieee80211_wake_queues.
1293 * Mark the queue as not stopped to prevent ath_tx_complete
1294 * from waking the queue too early.
1295 */
1296 txq = &sc->tx.txq[i];
1297 txq->stopped = false;
1298 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301299 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001300
1301 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301302}
1303
Sujithe8324352009-01-16 21:38:42 +05301304void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1305{
1306 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1307 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1308}
1309
Ben Greear7755bad2011-01-18 17:30:00 -08001310/* For each axq_acq entry, for each tid, try to schedule packets
1311 * for transmit until ampdu_depth has reached min Q depth.
1312 */
Sujithe8324352009-01-16 21:38:42 +05301313void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1314{
Ben Greear7755bad2011-01-18 17:30:00 -08001315 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1316 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301317
Felix Fietkau21f28e62011-01-15 14:30:14 +01001318 if (list_empty(&txq->axq_acq) ||
1319 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301320 return;
1321
1322 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001323 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301324
Ben Greear7755bad2011-01-18 17:30:00 -08001325 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1326 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1327 list_del(&ac->list);
1328 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301329
Ben Greear7755bad2011-01-18 17:30:00 -08001330 while (!list_empty(&ac->tid_q)) {
1331 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1332 list);
1333 list_del(&tid->list);
1334 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301335
Ben Greear7755bad2011-01-18 17:30:00 -08001336 if (tid->paused)
1337 continue;
Sujithe8324352009-01-16 21:38:42 +05301338
Ben Greear7755bad2011-01-18 17:30:00 -08001339 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301340
Ben Greear7755bad2011-01-18 17:30:00 -08001341 /*
1342 * add tid to round-robin queue if more frames
1343 * are pending for the tid
1344 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001345 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001346 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301347
Ben Greear7755bad2011-01-18 17:30:00 -08001348 if (tid == last_tid ||
1349 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1350 break;
Sujithe8324352009-01-16 21:38:42 +05301351 }
Ben Greear7755bad2011-01-18 17:30:00 -08001352
1353 if (!list_empty(&ac->tid_q)) {
1354 if (!ac->sched) {
1355 ac->sched = true;
1356 list_add_tail(&ac->list, &txq->axq_acq);
1357 }
1358 }
1359
1360 if (ac == last_ac ||
1361 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1362 return;
Sujithe8324352009-01-16 21:38:42 +05301363 }
1364}
1365
Sujithe8324352009-01-16 21:38:42 +05301366/***********/
1367/* TX, DMA */
1368/***********/
1369
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001370/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001371 * Insert a chain of ath_buf (descriptors) on a txq and
1372 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001373 */
Sujith102e0572008-10-29 10:15:16 +05301374static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001375 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001376{
Sujithcbe61d82009-02-09 13:27:12 +05301377 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001378 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001379 struct ath_buf *bf, *bf_last;
1380 bool puttxbuf = false;
1381 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301382
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001383 /*
1384 * Insert the frame on the outbound list and
1385 * pass it on to the hardware.
1386 */
1387
1388 if (list_empty(head))
1389 return;
1390
Felix Fietkaufce041b2011-05-19 12:20:25 +02001391 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001392 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001393 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394
Joe Perches226afe62010-12-02 19:12:37 -08001395 ath_dbg(common, ATH_DBG_QUEUE,
1396 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001397
Felix Fietkaufce041b2011-05-19 12:20:25 +02001398 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1399 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001400 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001401 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001402 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001403 list_splice_tail_init(head, &txq->axq_q);
1404
Felix Fietkaufce041b2011-05-19 12:20:25 +02001405 if (txq->axq_link) {
1406 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001407 ath_dbg(common, ATH_DBG_XMIT,
1408 "link[%u] (%p)=%llx (%p)\n",
1409 txq->axq_qnum, txq->axq_link,
1410 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001411 } else if (!edma)
1412 puttxbuf = true;
1413
1414 txq->axq_link = bf_last->bf_desc;
1415 }
1416
1417 if (puttxbuf) {
1418 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1419 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1420 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1421 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1422 }
1423
1424 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001425 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001426 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001427 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001428
1429 if (!internal) {
1430 txq->axq_depth++;
1431 if (bf_is_ampdu_not_probing(bf))
1432 txq->axq_ampdu_depth++;
1433 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001434}
1435
Sujithe8324352009-01-16 21:38:42 +05301436static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001437 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301438{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001439 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001440 struct list_head bf_head;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001441 u16 seqno = bf->bf_state.seqno;
Sujithe8324352009-01-16 21:38:42 +05301442
Sujithe8324352009-01-16 21:38:42 +05301443 bf->bf_state.bf_type |= BUF_AMPDU;
1444
1445 /*
1446 * Do not queue to h/w when any of the following conditions is true:
1447 * - there are pending frames in software queue
1448 * - the TID is currently paused for ADDBA/BAR request
1449 * - seqno is not within block-ack window
1450 * - h/w queue depth exceeds low water mark
1451 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001452 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001453 !BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001454 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001455 /*
Sujithe8324352009-01-16 21:38:42 +05301456 * Add this frame to software queue for scheduling later
1457 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001458 */
Ben Greearbda8add2011-01-09 23:11:48 -08001459 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau56dc6332011-08-28 00:32:22 +02001460 __skb_queue_tail(&tid->buf_q, bf->bf_mpdu);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001461 if (!txctl->an || !txctl->an->sleeping)
1462 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301463 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001464 }
1465
Felix Fietkau04caf862010-11-14 15:20:12 +01001466 INIT_LIST_HEAD(&bf_head);
1467 list_add(&bf->list, &bf_head);
1468
Sujithe8324352009-01-16 21:38:42 +05301469 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001470 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001471 ath_tx_addto_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +05301472
1473 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001474 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301475 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001476 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001477 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301478}
1479
Felix Fietkau82b873a2010-11-11 03:18:37 +01001480static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1481 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001482 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001483{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001484 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301485 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001486
Sujithe8324352009-01-16 21:38:42 +05301487 bf = list_first_entry(bf_head, struct ath_buf, list);
1488 bf->bf_state.bf_type &= ~BUF_AMPDU;
1489
1490 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001491 if (tid)
1492 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301493
Sujithd43f30152009-01-16 21:38:53 +05301494 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001495 fi = get_frame_info(bf->bf_mpdu);
1496 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001497 ath_tx_txqaddbuf(sc, txq, bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301498 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001499}
1500
Sujith528f0c62008-10-29 10:14:26 +05301501static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001502{
Sujith528f0c62008-10-29 10:14:26 +05301503 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504 enum ath9k_pkt_type htype;
1505 __le16 fc;
1506
Sujith528f0c62008-10-29 10:14:26 +05301507 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001508 fc = hdr->frame_control;
1509
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001510 if (ieee80211_is_beacon(fc))
1511 htype = ATH9K_PKT_TYPE_BEACON;
1512 else if (ieee80211_is_probe_resp(fc))
1513 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1514 else if (ieee80211_is_atim(fc))
1515 htype = ATH9K_PKT_TYPE_ATIM;
1516 else if (ieee80211_is_pspoll(fc))
1517 htype = ATH9K_PKT_TYPE_PSPOLL;
1518 else
1519 htype = ATH9K_PKT_TYPE_NORMAL;
1520
1521 return htype;
1522}
1523
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001524static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1525 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301526{
1527 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001528 struct ieee80211_sta *sta = tx_info->control.sta;
1529 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001530 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001531 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001532 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001533 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301534
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001535 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301536
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001537 if (sta)
1538 an = (struct ath_node *) sta->drv_priv;
1539
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001540 memset(fi, 0, sizeof(*fi));
1541 if (hw_key)
1542 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001543 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1544 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001545 else
1546 fi->keyix = ATH9K_TXKEYIX_INVALID;
1547 fi->keytype = keytype;
1548 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301549}
1550
Felix Fietkau82b873a2010-11-11 03:18:37 +01001551static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301552{
1553 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1554 int flags = 0;
1555
Sujith528f0c62008-10-29 10:14:26 +05301556 flags |= ATH9K_TXDESC_INTREQ;
1557
1558 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1559 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301560
Felix Fietkau82b873a2010-11-11 03:18:37 +01001561 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001562 flags |= ATH9K_TXDESC_LDPC;
1563
Sujith528f0c62008-10-29 10:14:26 +05301564 return flags;
1565}
1566
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001567/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001568 * rix - rate index
1569 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1570 * width - 0 for 20 MHz, 1 for 40 MHz
1571 * half_gi - to use 4us v/s 3.6 us for symbol time
1572 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001573static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301574 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001575{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001576 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001577 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301578
1579 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001580 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001581 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001582 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1584
1585 if (!half_gi)
1586 duration = SYMBOL_TIME(nsymbols);
1587 else
1588 duration = SYMBOL_TIME_HALFGI(nsymbols);
1589
Sujithe63835b2008-11-18 09:07:53 +05301590 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001591 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301592
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001593 return duration;
1594}
1595
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301596u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1597{
1598 struct ath_hw *ah = sc->sc_ah;
1599 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301600 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1601 (curchan->channelFlags & CHANNEL_5GHZ) &&
1602 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301603 return 0x3;
1604 else
1605 return chainmask;
1606}
1607
Felix Fietkau269c44b2010-11-14 15:20:06 +01001608static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001609{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001610 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001611 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301612 struct sk_buff *skb;
1613 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301614 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001615 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301616 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301617 int i, flags = 0;
1618 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301619 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301620
1621 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301622
Sujitha22be222009-03-30 15:28:36 +05301623 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301624 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301625 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301626 hdr = (struct ieee80211_hdr *)skb->data;
1627 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301628
Sujithc89424d2009-01-30 14:29:28 +05301629 /*
1630 * We check if Short Preamble is needed for the CTS rate by
1631 * checking the BSS's global flag.
1632 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1633 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001634 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1635 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301636 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001637 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001638
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001639 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001640 bool is_40, is_sgi, is_sp;
1641 int phy;
1642
Sujithe63835b2008-11-18 09:07:53 +05301643 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001644 continue;
1645
Sujitha8efee42008-11-18 09:07:30 +05301646 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301647 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001648
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301649 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301650 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001651 flags |= ATH9K_TXDESC_RTSENA;
1652 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1653 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1654 flags |= ATH9K_TXDESC_CTSENA;
1655 }
1656
Sujithc89424d2009-01-30 14:29:28 +05301657 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1658 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1659 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1660 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001661
Felix Fietkau545750d2009-11-23 22:21:01 +01001662 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1663 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1664 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1665
1666 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1667 /* MCS rates */
1668 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301669 series[i].ChSel = ath_txchainmask_reduction(sc,
1670 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001671 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001672 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001673 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1674 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001675 continue;
1676 }
1677
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301678 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001679 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1680 !(rate->flags & IEEE80211_RATE_ERP_G))
1681 phy = WLAN_RC_PHY_CCK;
1682 else
1683 phy = WLAN_RC_PHY_OFDM;
1684
1685 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1686 series[i].Rate = rate->hw_value;
1687 if (rate->hw_value_short) {
1688 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1689 series[i].Rate |= rate->hw_value_short;
1690 } else {
1691 is_sp = false;
1692 }
1693
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301694 if (bf->bf_state.bfs_paprd)
1695 series[i].ChSel = common->tx_chainmask;
1696 else
1697 series[i].ChSel = ath_txchainmask_reduction(sc,
1698 common->tx_chainmask, series[i].Rate);
1699
Felix Fietkau545750d2009-11-23 22:21:01 +01001700 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001701 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001702 }
1703
Felix Fietkau27032052010-01-17 21:08:50 +01001704 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001705 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001706 flags &= ~ATH9K_TXDESC_RTSENA;
1707
1708 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1709 if (flags & ATH9K_TXDESC_RTSENA)
1710 flags &= ~ATH9K_TXDESC_CTSENA;
1711
Sujithe63835b2008-11-18 09:07:53 +05301712 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301713 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1714 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301715 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301716 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301717
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001718}
1719
Felix Fietkaufa05f872011-08-28 00:32:24 +02001720static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001721 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001722 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001723 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301724{
Felix Fietkau04caf862010-11-14 15:20:12 +01001725 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001726 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001727 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001728 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001729 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001730 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001731 int frm_type;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001732 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001733
1734 bf = ath_tx_get_buffer(sc);
1735 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001736 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001737 return NULL;
1738 }
Sujithe8324352009-01-16 21:38:42 +05301739
Sujithe8324352009-01-16 21:38:42 +05301740 ATH_TXBUF_RESET(bf);
1741
Felix Fietkaufa05f872011-08-28 00:32:24 +02001742 if (tid) {
1743 seqno = tid->seq_next;
1744 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1745 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1746 bf->bf_state.seqno = seqno;
1747 }
1748
Felix Fietkau82b873a2010-11-11 03:18:37 +01001749 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301750 bf->bf_mpdu = skb;
1751
Ben Greearc1739eb2010-10-14 12:45:29 -07001752 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1753 skb->len, DMA_TO_DEVICE);
1754 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301755 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001756 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001757 ath_err(ath9k_hw_common(sc->sc_ah),
1758 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001759 ath_tx_return_buffer(sc, bf);
1760 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301761 }
1762
Sujithe8324352009-01-16 21:38:42 +05301763 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301764
1765 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001766 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301767
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001768 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1769 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301770
1771 ath9k_hw_filltxdesc(ah, ds,
1772 skb->len, /* segment length */
1773 true, /* first segment */
1774 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001775 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001776 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001777 txq->axq_qnum);
1778
Felix Fietkau56dc6332011-08-28 00:32:22 +02001779 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001780
1781 return bf;
1782}
1783
1784/* FIXME: tx power */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001785static int ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001786 struct ath_tx_control *txctl)
1787{
Felix Fietkau04caf862010-11-14 15:20:12 +01001788 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1789 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001790 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001791 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001792 struct ath_buf *bf;
1793 int ret = 0;
Felix Fietkau04caf862010-11-14 15:20:12 +01001794 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301795
Sujithe8324352009-01-16 21:38:42 +05301796 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301797 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1798 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001799 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1800 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001801 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001802
Felix Fietkau066dae92010-11-07 14:59:39 +01001803 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001804 }
1805
Felix Fietkaufa05f872011-08-28 00:32:24 +02001806 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1807 if (unlikely(!bf)) {
1808 ret = -ENOMEM;
1809 goto out;
1810 }
1811
Felix Fietkau248a38d2010-12-10 21:16:46 +01001812 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001813 /*
1814 * Try aggregation if it's a unicast data frame
1815 * and the destination is HT capable.
1816 */
1817 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301818 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001819 INIT_LIST_HEAD(&bf_head);
1820 list_add_tail(&bf->list, &bf_head);
1821
Felix Fietkau82b873a2010-11-11 03:18:37 +01001822 bf->bf_state.bfs_paprd = txctl->paprd;
1823
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001824 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001825 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1826 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001827
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301828 if (txctl->paprd)
1829 bf->bf_state.bfs_paprd_timestamp = jiffies;
1830
Felix Fietkau55195412011-04-17 23:28:09 +02001831 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1832 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1833
Felix Fietkau248a38d2010-12-10 21:16:46 +01001834 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301835 }
1836
Felix Fietkaufa05f872011-08-28 00:32:24 +02001837out:
Sujithe8324352009-01-16 21:38:42 +05301838 spin_unlock_bh(&txctl->txq->axq_lock);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001839 return ret;
Sujithe8324352009-01-16 21:38:42 +05301840}
1841
1842/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001843int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301844 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001845{
Felix Fietkau28d16702010-11-14 15:20:10 +01001846 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1847 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001848 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001849 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001850 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001851 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001852 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001853 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001854 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001855
Ben Greeara9927ba2010-12-06 21:13:49 -08001856 /* NOTE: sta can be NULL according to net/mac80211.h */
1857 if (sta)
1858 txctl->an = (struct ath_node *)sta->drv_priv;
1859
Felix Fietkau04caf862010-11-14 15:20:12 +01001860 if (info->control.hw_key)
1861 frmlen += info->control.hw_key->icv_len;
1862
Felix Fietkau28d16702010-11-14 15:20:10 +01001863 /*
1864 * As a temporary workaround, assign seq# here; this will likely need
1865 * to be cleaned up to work better with Beacon transmission and virtual
1866 * BSSes.
1867 */
1868 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1869 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1870 sc->tx.seq_no += 0x10;
1871 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1872 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1873 }
1874
1875 /* Add the padding after the header if this is not already done */
1876 padpos = ath9k_cmn_padpos(hdr->frame_control);
1877 padsize = padpos & 3;
1878 if (padsize && skb->len > padpos) {
1879 if (skb_headroom(skb) < padsize)
1880 return -ENOMEM;
1881
1882 skb_push(skb, padsize);
1883 memmove(skb->data, skb->data + padsize, padpos);
1884 }
1885
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001886 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1887 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1888 !ieee80211_is_data(hdr->frame_control))
1889 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1890
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001891 setup_frame_info(hw, skb, frmlen);
1892
1893 /*
1894 * At this point, the vif, hw_key and sta pointers in the tx control
1895 * info are no longer valid (overwritten by the ath_frame_info data.
1896 */
1897
Felix Fietkau066dae92010-11-07 14:59:39 +01001898 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001899 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001900 if (txq == sc->tx.txq_map[q] &&
1901 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001902 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001903 txq->stopped = 1;
1904 }
1905 spin_unlock_bh(&txq->axq_lock);
1906
Felix Fietkaufa05f872011-08-28 00:32:24 +02001907 return ath_tx_start_dma(sc, skb, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001908}
1909
Sujithe8324352009-01-16 21:38:42 +05301910/*****************/
1911/* TX Completion */
1912/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001913
Sujithe8324352009-01-16 21:38:42 +05301914static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301915 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001916{
Sujithe8324352009-01-16 21:38:42 +05301917 struct ieee80211_hw *hw = sc->hw;
1918 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001919 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001920 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001921 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301922
Joe Perches226afe62010-12-02 19:12:37 -08001923 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301924
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301925 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301926 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301927
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301928 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301929 /* Frame was ACKed */
1930 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1931 }
1932
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001933 padpos = ath9k_cmn_padpos(hdr->frame_control);
1934 padsize = padpos & 3;
1935 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301936 /*
1937 * Remove MAC header padding before giving the frame back to
1938 * mac80211.
1939 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001940 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301941 skb_pull(skb, padsize);
1942 }
1943
Sujith1b04b932010-01-08 10:36:05 +05301944 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1945 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001946 ath_dbg(common, ATH_DBG_PS,
1947 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301948 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1949 PS_WAIT_FOR_CAB |
1950 PS_WAIT_FOR_PSPOLL_DATA |
1951 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001952 }
1953
Felix Fietkau7545daf2011-01-24 19:23:16 +01001954 q = skb_get_queue_mapping(skb);
1955 if (txq == sc->tx.txq_map[q]) {
1956 spin_lock_bh(&txq->axq_lock);
1957 if (WARN_ON(--txq->pending_frames < 0))
1958 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001959
Felix Fietkau7545daf2011-01-24 19:23:16 +01001960 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1961 ieee80211_wake_queue(sc->hw, q);
1962 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001963 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001964 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001965 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001966
1967 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301968}
1969
1970static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001971 struct ath_txq *txq, struct list_head *bf_q,
1972 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301973{
1974 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301975 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301976 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301977
Sujithe8324352009-01-16 21:38:42 +05301978 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301979 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301980
1981 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301982 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301983
1984 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301985 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301986 }
1987
Ben Greearc1739eb2010-10-14 12:45:29 -07001988 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001989 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001990
1991 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301992 if (time_after(jiffies,
1993 bf->bf_state.bfs_paprd_timestamp +
1994 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001995 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001996 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001997 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001998 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001999 ath_debug_stat_tx(sc, bf, ts, txq);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302000 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002001 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002002 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2003 * accidentally reference it later.
2004 */
2005 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302006
2007 /*
2008 * Return the list of ath_buf of this mpdu to free queue
2009 */
2010 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2011 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2012 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2013}
2014
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002015static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2016 struct ath_tx_status *ts, int nframes, int nbad,
2017 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302018{
Sujitha22be222009-03-30 15:28:36 +05302019 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302020 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302021 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002022 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002023 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302024 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302025
Sujith95e4acb2009-03-13 08:56:09 +05302026 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002027 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302028
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002029 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302030 WARN_ON(tx_rateindex >= hw->max_rates);
2031
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002032 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302033 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002034 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002035 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302036
Felix Fietkaub572d032010-11-14 15:20:07 +01002037 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002038
Felix Fietkaub572d032010-11-14 15:20:07 +01002039 tx_info->status.ampdu_len = nframes;
2040 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002041 }
2042
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002043 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302044 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002045 /*
2046 * If an underrun error is seen assume it as an excessive
2047 * retry only if max frame trigger level has been reached
2048 * (2 KB for single stream, and 4 KB for dual stream).
2049 * Adjust the long retry as if the frame was tried
2050 * hw->max_rate_tries times to affect how rate control updates
2051 * PER for the failed rate.
2052 * In case of congestion on the bus penalizing this type of
2053 * underruns should help hardware actually transmit new frames
2054 * successfully by eventually preferring slower rates.
2055 * This itself should also alleviate congestion on the bus.
2056 */
2057 if (ieee80211_is_data(hdr->frame_control) &&
2058 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2059 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002060 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002061 tx_info->status.rates[tx_rateindex].count =
2062 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302063 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302064
Felix Fietkau545750d2009-11-23 22:21:01 +01002065 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302066 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002067 tx_info->status.rates[i].idx = -1;
2068 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302069
Felix Fietkau78c46532010-06-25 01:26:16 +02002070 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302071}
2072
Felix Fietkaufce041b2011-05-19 12:20:25 +02002073static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2074 struct ath_tx_status *ts, struct ath_buf *bf,
2075 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302076 __releases(txq->axq_lock)
2077 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002078{
2079 int txok;
2080
2081 txq->axq_depth--;
2082 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2083 txq->axq_tx_inprogress = false;
2084 if (bf_is_ampdu_not_probing(bf))
2085 txq->axq_ampdu_depth--;
2086
2087 spin_unlock_bh(&txq->axq_lock);
2088
2089 if (!bf_isampdu(bf)) {
2090 /*
2091 * This frame is sent out as a single frame.
2092 * Use hardware retry status for this frame.
2093 */
2094 if (ts->ts_status & ATH9K_TXERR_XRETRY)
2095 bf->bf_state.bf_type |= BUF_XRETRY;
2096 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2097 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2098 } else
2099 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2100
2101 spin_lock_bh(&txq->axq_lock);
2102
2103 if (sc->sc_flags & SC_OP_TXAGGR)
2104 ath_txq_schedule(sc, txq);
2105}
2106
Sujithc4288392008-11-18 09:09:30 +05302107static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002108{
Sujithcbe61d82009-02-09 13:27:12 +05302109 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002110 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002111 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2112 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302113 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002114 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002115 int status;
2116
Joe Perches226afe62010-12-02 19:12:37 -08002117 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2118 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2119 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120
Felix Fietkaufce041b2011-05-19 12:20:25 +02002121 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002122 for (;;) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 if (list_empty(&txq->axq_q)) {
2124 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002125 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002126 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 break;
2128 }
2129 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2130
2131 /*
2132 * There is a race condition that a BH gets scheduled
2133 * after sw writes TxE and before hw re-load the last
2134 * descriptor to get the newly chained one.
2135 * Software must keep the last DONE descriptor as a
2136 * holding descriptor - software does so by marking
2137 * it with the STALE flag.
2138 */
2139 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302140 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002142 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002143 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002144
2145 bf = list_entry(bf_held->list.next, struct ath_buf,
2146 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 }
2148
2149 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302150 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151
Felix Fietkau29bffa92010-03-29 20:14:23 -07002152 memset(&ts, 0, sizeof(ts));
2153 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002154 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002156
Ben Greear2dac4fb2011-01-09 23:11:45 -08002157 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158
2159 /*
2160 * Remove ath_buf's of the same transmit unit from txq,
2161 * however leave the last descriptor back as the holding
2162 * descriptor for hw.
2163 */
Sujitha119cc42009-03-30 15:28:38 +05302164 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002165 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 if (!list_is_singular(&lastbf->list))
2167 list_cut_position(&bf_head,
2168 &txq->axq_q, lastbf->list.prev);
2169
Felix Fietkaufce041b2011-05-19 12:20:25 +02002170 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002171 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002172 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173 }
Johannes Berge6a98542008-10-21 12:40:02 +02002174
Felix Fietkaufce041b2011-05-19 12:20:25 +02002175 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002177 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178}
2179
Sujith305fe472009-07-23 15:32:29 +05302180static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002181{
2182 struct ath_softc *sc = container_of(work, struct ath_softc,
2183 tx_complete_work.work);
2184 struct ath_txq *txq;
2185 int i;
2186 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002187#ifdef CONFIG_ATH9K_DEBUGFS
2188 sc->tx_complete_poll_work_seen++;
2189#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002190
2191 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2192 if (ATH_TXQ_SETUP(sc, i)) {
2193 txq = &sc->tx.txq[i];
2194 spin_lock_bh(&txq->axq_lock);
2195 if (txq->axq_depth) {
2196 if (txq->axq_tx_inprogress) {
2197 needreset = true;
2198 spin_unlock_bh(&txq->axq_lock);
2199 break;
2200 } else {
2201 txq->axq_tx_inprogress = true;
2202 }
2203 }
2204 spin_unlock_bh(&txq->axq_lock);
2205 }
2206
2207 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002208 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2209 "tx hung, resetting the chip\n");
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302210 spin_lock_bh(&sc->sc_pcu_lock);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002211 ath_reset(sc, true);
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302212 spin_unlock_bh(&sc->sc_pcu_lock);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002213 }
2214
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002215 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002216 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2217}
2218
2219
Sujithe8324352009-01-16 21:38:42 +05302220
2221void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002222{
Sujithe8324352009-01-16 21:38:42 +05302223 int i;
2224 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002225
Sujithe8324352009-01-16 21:38:42 +05302226 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002227
2228 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302229 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2230 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002231 }
2232}
2233
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002234void ath_tx_edma_tasklet(struct ath_softc *sc)
2235{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002236 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002237 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2238 struct ath_hw *ah = sc->sc_ah;
2239 struct ath_txq *txq;
2240 struct ath_buf *bf, *lastbf;
2241 struct list_head bf_head;
2242 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002243
2244 for (;;) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002245 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002246 if (status == -EINPROGRESS)
2247 break;
2248 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002249 ath_dbg(common, ATH_DBG_XMIT,
2250 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 break;
2252 }
2253
2254 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002255 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256 continue;
2257
Felix Fietkaufce041b2011-05-19 12:20:25 +02002258 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002259
2260 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002261
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002262 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2263 spin_unlock_bh(&txq->axq_lock);
2264 return;
2265 }
2266
2267 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2268 struct ath_buf, list);
2269 lastbf = bf->bf_lastbf;
2270
2271 INIT_LIST_HEAD(&bf_head);
2272 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2273 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002274
Felix Fietkaufce041b2011-05-19 12:20:25 +02002275 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2276 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002277
Felix Fietkaufce041b2011-05-19 12:20:25 +02002278 if (!list_empty(&txq->axq_q)) {
2279 struct list_head bf_q;
2280
2281 INIT_LIST_HEAD(&bf_q);
2282 txq->axq_link = NULL;
2283 list_splice_tail_init(&txq->axq_q, &bf_q);
2284 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2285 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002286 }
2287
Felix Fietkaufce041b2011-05-19 12:20:25 +02002288 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002289 spin_unlock_bh(&txq->axq_lock);
2290 }
2291}
2292
Sujithe8324352009-01-16 21:38:42 +05302293/*****************/
2294/* Init, Cleanup */
2295/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002296
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002297static int ath_txstatus_setup(struct ath_softc *sc, int size)
2298{
2299 struct ath_descdma *dd = &sc->txsdma;
2300 u8 txs_len = sc->sc_ah->caps.txs_len;
2301
2302 dd->dd_desc_len = size * txs_len;
2303 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2304 &dd->dd_desc_paddr, GFP_KERNEL);
2305 if (!dd->dd_desc)
2306 return -ENOMEM;
2307
2308 return 0;
2309}
2310
2311static int ath_tx_edma_init(struct ath_softc *sc)
2312{
2313 int err;
2314
2315 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2316 if (!err)
2317 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2318 sc->txsdma.dd_desc_paddr,
2319 ATH_TXSTATUS_RING_SIZE);
2320
2321 return err;
2322}
2323
2324static void ath_tx_edma_cleanup(struct ath_softc *sc)
2325{
2326 struct ath_descdma *dd = &sc->txsdma;
2327
2328 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2329 dd->dd_desc_paddr);
2330}
2331
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002332int ath_tx_init(struct ath_softc *sc, int nbufs)
2333{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002334 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002335 int error = 0;
2336
Sujith797fe5c2009-03-30 15:28:45 +05302337 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002338
Sujith797fe5c2009-03-30 15:28:45 +05302339 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002340 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302341 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002342 ath_err(common,
2343 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302344 goto err;
2345 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002346
Sujith797fe5c2009-03-30 15:28:45 +05302347 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002348 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302349 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002350 ath_err(common,
2351 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302352 goto err;
2353 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002354
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002355 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2356
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002357 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2358 error = ath_tx_edma_init(sc);
2359 if (error)
2360 goto err;
2361 }
2362
Sujith797fe5c2009-03-30 15:28:45 +05302363err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002364 if (error != 0)
2365 ath_tx_cleanup(sc);
2366
2367 return error;
2368}
2369
Sujith797fe5c2009-03-30 15:28:45 +05302370void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002371{
Sujithb77f4832008-12-07 21:44:03 +05302372 if (sc->beacon.bdma.dd_desc_len != 0)
2373 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374
Sujithb77f4832008-12-07 21:44:03 +05302375 if (sc->tx.txdma.dd_desc_len != 0)
2376 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002377
2378 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2379 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380}
2381
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002382void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2383{
Sujithc5170162008-10-29 10:13:59 +05302384 struct ath_atx_tid *tid;
2385 struct ath_atx_ac *ac;
2386 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387
Sujith8ee5afb2008-12-07 21:43:36 +05302388 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302389 tidno < WME_NUM_TID;
2390 tidno++, tid++) {
2391 tid->an = an;
2392 tid->tidno = tidno;
2393 tid->seq_start = tid->seq_next = 0;
2394 tid->baw_size = WME_MAX_BA;
2395 tid->baw_head = tid->baw_tail = 0;
2396 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302397 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302398 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002399 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302400 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302401 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302402 tid->state &= ~AGGR_ADDBA_COMPLETE;
2403 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302404 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405
Sujith8ee5afb2008-12-07 21:43:36 +05302406 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302407 acno < WME_NUM_AC; acno++, ac++) {
2408 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002409 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302410 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002411 }
2412}
2413
Sujithb5aa9bf2008-10-29 10:13:31 +05302414void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415{
Felix Fietkau2b409942010-07-07 19:42:08 +02002416 struct ath_atx_ac *ac;
2417 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002419 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302420
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 for (tidno = 0, tid = &an->tid[tidno];
2422 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423
Felix Fietkau2b409942010-07-07 19:42:08 +02002424 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002425 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002426
Felix Fietkau2b409942010-07-07 19:42:08 +02002427 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428
Felix Fietkau2b409942010-07-07 19:42:08 +02002429 if (tid->sched) {
2430 list_del(&tid->list);
2431 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002432 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002433
2434 if (ac->sched) {
2435 list_del(&ac->list);
2436 tid->ac->sched = false;
2437 }
2438
2439 ath_tid_drain(sc, txq, tid);
2440 tid->state &= ~AGGR_ADDBA_COMPLETE;
2441 tid->state &= ~AGGR_CLEANUP;
2442
2443 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002444 }
2445}