blob: 34005e11502c6fef2b1d6fd39618571698eb7344 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010052 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053053static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070054 struct ath_txq *txq, struct list_head *bf_q,
55 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053056static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020057 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010058static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
Felix Fietkau56dc6332011-08-28 00:32:22 +0200132 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200152 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530153 struct ath_buf *bf;
154 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100156 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200157
Sujithe8324352009-01-16 21:38:42 +0530158 INIT_LIST_HEAD(&bf_head);
159
Felix Fietkau90fa5392010-09-20 13:45:38 +0200160 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530161 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530162
Felix Fietkau56dc6332011-08-28 00:32:22 +0200163 while ((skb = __skb_dequeue(&tid->buf_q))) {
164 fi = get_frame_info(skb);
165 bf = fi->bf;
166
167 list_add_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200168
Felix Fietkaue1566d12010-11-20 03:08:46 +0100169 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100170 if (fi->retries) {
171 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100172 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700174 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200175 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100176 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530177 }
178
179 spin_unlock_bh(&txq->axq_lock);
180}
181
182static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
183 int seqno)
184{
185 int index, cindex;
186
187 index = ATH_BA_INDEX(tid->seq_start, seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530191
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200192 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530193 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
194 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
195 }
196}
197
198static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100199 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530200{
201 int index, cindex;
202
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100203 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530204 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200205 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530206
207 if (index >= ((tid->baw_tail - tid->baw_head) &
208 (ATH_TID_MAX_BUFS - 1))) {
209 tid->baw_tail = cindex;
210 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
211 }
212}
213
214/*
215 * TODO: For frame(s) that are in the retry state, we will reuse the
216 * sequence number(s) without setting the retry bit. The
217 * alternative is to give up on these and BAR the receiver's window
218 * forward.
219 */
220static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
221 struct ath_atx_tid *tid)
222
223{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200224 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530225 struct ath_buf *bf;
226 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700227 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100228 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700229
230 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530231 INIT_LIST_HEAD(&bf_head);
232
Felix Fietkau56dc6332011-08-28 00:32:22 +0200233 while ((skb = __skb_dequeue(&tid->buf_q))) {
234 fi = get_frame_info(skb);
235 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau56dc6332011-08-28 00:32:22 +0200237 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530238
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100239 if (fi->retries)
240 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530241
242 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700243 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530244 spin_lock(&txq->axq_lock);
245 }
246
247 tid->seq_next = tid->seq_start;
248 tid->baw_tail = tid->baw_head;
249}
250
Sujithfec247c2009-07-27 12:08:16 +0530251static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100252 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530253{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100254 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530255 struct ieee80211_hdr *hdr;
256
Sujithfec247c2009-07-27 12:08:16 +0530257 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100258 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100259 return;
Sujithe8324352009-01-16 21:38:42 +0530260
Sujithe8324352009-01-16 21:38:42 +0530261 hdr = (struct ieee80211_hdr *)skb->data;
262 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
263}
264
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200265static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
266{
267 struct ath_buf *bf = NULL;
268
269 spin_lock_bh(&sc->tx.txbuflock);
270
271 if (unlikely(list_empty(&sc->tx.txbuf))) {
272 spin_unlock_bh(&sc->tx.txbuflock);
273 return NULL;
274 }
275
276 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
277 list_del(&bf->list);
278
279 spin_unlock_bh(&sc->tx.txbuflock);
280
281 return bf;
282}
283
284static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
285{
286 spin_lock_bh(&sc->tx.txbuflock);
287 list_add_tail(&bf->list, &sc->tx.txbuf);
288 spin_unlock_bh(&sc->tx.txbuflock);
289}
290
Sujithd43f30152009-01-16 21:38:53 +0530291static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
292{
293 struct ath_buf *tbf;
294
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200295 tbf = ath_tx_get_buffer(sc);
296 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530297 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530298
299 ATH_TXBUF_RESET(tbf);
300
301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530304 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530305
306 return tbf;
307}
308
Felix Fietkaub572d032010-11-14 15:20:07 +0100309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
Sujithd43f30152009-01-16 21:38:53 +0530341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100343 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530344{
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100348 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530349 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800350 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530351 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200353 struct list_head bf_head;
354 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530355 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530356 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530357 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
358 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200359 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100360 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200361 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100362 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200363 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530364
Sujitha22be222009-03-30 15:28:36 +0530365 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530366 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530367
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800369
Felix Fietkau78c46532010-06-25 01:26:16 +0200370 memcpy(rates, tx_info->control.rates, sizeof(rates));
371
Sujith1286ec62009-01-27 13:30:37 +0530372 rcu_read_lock();
373
Ben Greear686b9cb2010-09-23 09:44:36 -0700374 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530375 if (!sta) {
376 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200377
Felix Fietkau31e79a52010-07-12 23:16:34 +0200378 INIT_LIST_HEAD(&bf_head);
379 while (bf) {
380 bf_next = bf->bf_next;
381
382 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaufce041b2011-05-19 12:20:25 +0200383 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200384 list_move_tail(&bf->list, &bf_head);
385
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100386 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
Sujith1286ec62009-01-27 13:30:37 +0530392 return;
Sujithe8324352009-01-16 21:38:42 +0530393 }
394
Sujith1286ec62009-01-27 13:30:37 +0530395 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530398
Felix Fietkaub11b1602010-07-11 12:48:44 +0200399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200405 txok = false;
406
Sujithe8324352009-01-16 21:38:42 +0530407 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530409
Sujithd43f30152009-01-16 21:38:53 +0530410 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530414 } else {
Sujithd43f30152009-01-16 21:38:53 +0530415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
Sujith2660b812009-02-09 13:27:26 +0530422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530423 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530424 }
425 }
426
Felix Fietkau56dc6332011-08-28 00:32:22 +0200427 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530428
Felix Fietkaub572d032010-11-14 15:20:07 +0100429 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530430 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100431 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530432 bf_next = bf->bf_next;
433
Felix Fietkau78c46532010-06-25 01:26:16 +0200434 skb = bf->bf_mpdu;
435 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100436 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200437
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100438 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530439 /* transmit completion, subframe is
440 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530441 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530442 } else if (!isaggr && txok) {
443 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530444 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530445 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200446 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530447 /*
448 * cleanup in progress, just fail
449 * the un-acked sub-frames
450 */
451 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200452 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
453 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
454 !an->sleeping)
455 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
456
457 clear_filter = true;
458 txpending = 1;
459 } else {
460 bf->bf_state.bf_type |= BUF_XRETRY;
461 txfail = 1;
462 sendbar = 1;
463 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530464 }
465 }
466
Felix Fietkaufce041b2011-05-19 12:20:25 +0200467 /*
468 * Make sure the last desc is reclaimed if it
469 * not a holding desc.
470 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200471 INIT_LIST_HEAD(&bf_head);
472 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
473 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530474 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530475
Felix Fietkau90fa5392010-09-20 13:45:38 +0200476 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530477 /*
478 * complete the acked-ones/xretried ones; update
479 * block-ack window
480 */
481 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100482 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530483 spin_unlock_bh(&txq->axq_lock);
484
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530485 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200486 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100487 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530488 rc_update = false;
489 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100490 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530491 }
492
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700493 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
494 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530495 } else {
Sujithd43f30152009-01-16 21:38:53 +0530496 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200497 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400498 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
499 if (bf->bf_next == NULL && bf_last->bf_stale) {
500 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530501
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400502 tbf = ath_clone_txbuf(sc, bf_last);
503 /*
504 * Update tx baw and complete the
505 * frame with failed status if we
506 * run out of tx buf.
507 */
508 if (!tbf) {
509 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100510 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400512
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400513 bf->bf_state.bf_type |=
514 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100515 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100516 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 ath_tx_complete_buf(sc, bf, txq,
518 &bf_head,
519 ts, 0, 0);
520 break;
521 }
522
523 ath9k_hw_cleartxdesc(sc->sc_ah,
524 tbf->bf_desc);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200525 fi->bf = tbf;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400526 } else {
527 /*
528 * Clear descriptor status words for
529 * software retry
530 */
531 ath9k_hw_cleartxdesc(sc->sc_ah,
532 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400533 }
Sujithe8324352009-01-16 21:38:42 +0530534 }
535
536 /*
537 * Put this buffer to the temporary pending
538 * queue to retain ordering
539 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200540 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530541 }
542
543 bf = bf_next;
544 }
545
Felix Fietkau4cee7862010-07-23 03:53:16 +0200546 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200547 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200548 if (an->sleeping)
549 ieee80211_sta_set_tim(sta);
550
Felix Fietkau4cee7862010-07-23 03:53:16 +0200551 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200552 if (clear_filter)
553 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200554 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600555 if (!an->sleeping)
556 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200557 spin_unlock_bh(&txq->axq_lock);
558 }
559
Sujithe8324352009-01-16 21:38:42 +0530560 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200561 ath_tx_flush_tid(sc, tid);
562
Sujithe8324352009-01-16 21:38:42 +0530563 if (tid->baw_head == tid->baw_tail) {
564 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530565 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530566 }
Sujithe8324352009-01-16 21:38:42 +0530567 }
568
Sujith1286ec62009-01-27 13:30:37 +0530569 rcu_read_unlock();
570
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530571 if (needreset)
Sujithe8324352009-01-16 21:38:42 +0530572 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530573}
574
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530575static bool ath_lookup_legacy(struct ath_buf *bf)
576{
577 struct sk_buff *skb;
578 struct ieee80211_tx_info *tx_info;
579 struct ieee80211_tx_rate *rates;
580 int i;
581
582 skb = bf->bf_mpdu;
583 tx_info = IEEE80211_SKB_CB(skb);
584 rates = tx_info->control.rates;
585
Felix Fietkau059ee092011-08-27 10:25:27 +0200586 for (i = 0; i < 4; i++) {
587 if (!rates[i].count || rates[i].idx < 0)
588 break;
589
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530590 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
591 return true;
592 }
593
594 return false;
595}
596
Sujithe8324352009-01-16 21:38:42 +0530597static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
598 struct ath_atx_tid *tid)
599{
Sujithe8324352009-01-16 21:38:42 +0530600 struct sk_buff *skb;
601 struct ieee80211_tx_info *tx_info;
602 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530603 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530604 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530605 int i;
606
Sujitha22be222009-03-30 15:28:36 +0530607 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530608 tx_info = IEEE80211_SKB_CB(skb);
609 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530610
611 /*
612 * Find the lowest frame length among the rate series that will have a
613 * 4ms transmit duration.
614 * TODO - TXOP limit needs to be considered.
615 */
616 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
617
618 for (i = 0; i < 4; i++) {
619 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100620 int modeidx;
621 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530622 legacy = 1;
623 break;
624 }
625
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200626 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100627 modeidx = MCS_HT40;
628 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200629 modeidx = MCS_HT20;
630
631 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
632 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100633
634 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530635 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530636 }
637 }
638
639 /*
640 * limit aggregate size by the minimum rate if rate selected is
641 * not a probe rate, if rate selected is a probe rate then
642 * avoid aggregation of this packet.
643 */
644 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
645 return 0;
646
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530647 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
648 aggr_limit = min((max_4ms_framelen * 3) / 8,
649 (u32)ATH_AMPDU_LIMIT_MAX);
650 else
651 aggr_limit = min(max_4ms_framelen,
652 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530653
654 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300655 * h/w can accept aggregates up to 16 bit lengths (65535).
656 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530657 * as zero. Ignore 65536 since we are constrained by hw.
658 */
Sujith4ef70842009-07-23 15:32:41 +0530659 if (tid->an->maxampdu)
660 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530661
662 return aggr_limit;
663}
664
665/*
Sujithd43f30152009-01-16 21:38:53 +0530666 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530667 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530668 */
669static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530670 struct ath_buf *bf, u16 frmlen,
671 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530672{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530673#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530674 struct sk_buff *skb = bf->bf_mpdu;
675 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530676 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530677 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100678 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200679 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100680 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530681
682 /* Select standard number of delimiters based on frame length alone */
683 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
684
685 /*
686 * If encryption enabled, hardware requires some more padding between
687 * subframes.
688 * TODO - this could be improved to be dependent on the rate.
689 * The hardware can keep up at lower rates, but not higher rates
690 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530691 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
692 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530693 ndelim += ATH_AGGR_ENCRYPTDELIM;
694
695 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530696 * Add delimiter when using RTS/CTS with aggregation
697 * and non enterprise AR9003 card
698 */
699 if (first_subfrm)
700 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
701
702 /*
Sujithe8324352009-01-16 21:38:42 +0530703 * Convert desired mpdu density from microeconds to bytes based
704 * on highest rate in rate series (i.e. first rate) to determine
705 * required minimum length for subframe. Take into account
706 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530707 *
Sujithe8324352009-01-16 21:38:42 +0530708 * If there is no mpdu density restriction, no further calculation
709 * is needed.
710 */
Sujith4ef70842009-07-23 15:32:41 +0530711
712 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530713 return ndelim;
714
715 rix = tx_info->control.rates[0].idx;
716 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530717 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
718 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
719
720 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530721 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530722 else
Sujith4ef70842009-07-23 15:32:41 +0530723 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530724
725 if (nsymbols == 0)
726 nsymbols = 1;
727
Felix Fietkauc6663872010-04-19 19:57:33 +0200728 streams = HT_RC_2_STREAMS(rix);
729 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530730 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
731
Sujithe8324352009-01-16 21:38:42 +0530732 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530733 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
734 ndelim = max(mindelim, ndelim);
735 }
736
737 return ndelim;
738}
739
740static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530741 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530742 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100743 struct list_head *bf_q,
744 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530745{
746#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200747 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530748 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530749 u16 aggr_limit = 0, al = 0, bpad = 0,
750 al_delta, h_baw = tid->baw_size / 2;
751 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200752 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100753 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200754 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530755
756 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757 skb = skb_peek(&tid->buf_q);
758 fi = get_frame_info(skb);
759 bf = fi->bf;
760
761 if (!bf_first)
762 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530763
Sujithd43f30152009-01-16 21:38:53 +0530764 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100765 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530766 status = ATH_AGGR_BAW_CLOSED;
767 break;
768 }
769
770 if (!rl) {
771 aggr_limit = ath_lookup_rate(sc, bf, tid);
772 rl = 1;
773 }
774
Sujithd43f30152009-01-16 21:38:53 +0530775 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100776 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530777
Sujithd43f30152009-01-16 21:38:53 +0530778 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530779 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
780 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530781 status = ATH_AGGR_LIMITED;
782 break;
783 }
784
Felix Fietkau0299a502010-10-21 02:47:24 +0200785 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
786 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
787 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
788 break;
789
Sujithd43f30152009-01-16 21:38:53 +0530790 /* do not exceed subframe limit */
791 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530792 status = ATH_AGGR_LIMITED;
793 break;
794 }
795
Sujithd43f30152009-01-16 21:38:53 +0530796 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530797 al += bpad + al_delta;
798
799 /*
800 * Get the delimiters needed to meet the MPDU
801 * density for this node.
802 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530803 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
804 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530805 bpad = PADBYTES(al_delta) + (ndelim << 2);
806
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530807 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530808 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400809 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530810
Sujithd43f30152009-01-16 21:38:53 +0530811 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100812 if (!fi->retries)
813 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530814 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200815
816 __skb_unlink(skb, &tid->buf_q);
817 list_add_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530818 if (bf_prev) {
819 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400820 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
821 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530822 }
823 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530824
Felix Fietkau56dc6332011-08-28 00:32:22 +0200825 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530826
Felix Fietkau269c44b2010-11-14 15:20:06 +0100827 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530828
Sujithe8324352009-01-16 21:38:42 +0530829 return status;
830#undef PADBYTES
831}
832
833static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
834 struct ath_atx_tid *tid)
835{
Sujithd43f30152009-01-16 21:38:53 +0530836 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530837 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100838 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530839 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100840 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530841
842 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200843 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530844 return;
845
846 INIT_LIST_HEAD(&bf_q);
847
Felix Fietkau269c44b2010-11-14 15:20:06 +0100848 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530849
850 /*
Sujithd43f30152009-01-16 21:38:53 +0530851 * no frames picked up to be aggregated;
852 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530853 */
854 if (list_empty(&bf_q))
855 break;
856
857 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530858 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530859
Felix Fietkau55195412011-04-17 23:28:09 +0200860 if (tid->ac->clear_ps_filter) {
861 tid->ac->clear_ps_filter = false;
862 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
863 }
864
Sujithd43f30152009-01-16 21:38:53 +0530865 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100866 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100867 fi = get_frame_info(bf->bf_mpdu);
868
Sujithe8324352009-01-16 21:38:42 +0530869 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530870 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100871 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200872 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530873 continue;
874 }
875
Sujithd43f30152009-01-16 21:38:53 +0530876 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530877 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100878 ath_buf_set_rate(sc, bf, aggr_len);
879 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530880
Sujithd43f30152009-01-16 21:38:53 +0530881 /* anchor last desc of aggregate */
882 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530883
Felix Fietkaufce041b2011-05-19 12:20:25 +0200884 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530885 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530886
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100887 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530888 status != ATH_AGGR_BAW_CLOSED);
889}
890
Felix Fietkau231c3a12010-09-20 19:35:28 +0200891int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
892 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530893{
894 struct ath_atx_tid *txtid;
895 struct ath_node *an;
896
897 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530898 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200899
900 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
901 return -EAGAIN;
902
Sujithf83da962009-07-23 15:32:37 +0530903 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200904 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700905 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200906
Felix Fietkau2ed72222011-01-10 17:05:49 -0700907 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
908 txtid->baw_head = txtid->baw_tail = 0;
909
Felix Fietkau231c3a12010-09-20 19:35:28 +0200910 return 0;
Sujithe8324352009-01-16 21:38:42 +0530911}
912
Sujithf83da962009-07-23 15:32:37 +0530913void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530914{
915 struct ath_node *an = (struct ath_node *)sta->drv_priv;
916 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100917 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530918
919 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530920 return;
Sujithe8324352009-01-16 21:38:42 +0530921
922 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530923 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530924 return;
Sujithe8324352009-01-16 21:38:42 +0530925 }
926
Sujithe8324352009-01-16 21:38:42 +0530927 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200928 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200929
930 /*
931 * If frames are still being transmitted for this TID, they will be
932 * cleaned up during tx completion. To prevent race conditions, this
933 * TID can only be reused after all in-progress subframes have been
934 * completed.
935 */
936 if (txtid->baw_head != txtid->baw_tail)
937 txtid->state |= AGGR_CLEANUP;
938 else
939 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530940 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530941
Felix Fietkau90fa5392010-09-20 13:45:38 +0200942 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530943}
944
Felix Fietkau55195412011-04-17 23:28:09 +0200945bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
946{
947 struct ath_atx_tid *tid;
948 struct ath_atx_ac *ac;
949 struct ath_txq *txq;
950 bool buffered = false;
951 int tidno;
952
953 for (tidno = 0, tid = &an->tid[tidno];
954 tidno < WME_NUM_TID; tidno++, tid++) {
955
956 if (!tid->sched)
957 continue;
958
959 ac = tid->ac;
960 txq = ac->txq;
961
962 spin_lock_bh(&txq->axq_lock);
963
Felix Fietkau56dc6332011-08-28 00:32:22 +0200964 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +0200965 buffered = true;
966
967 tid->sched = false;
968 list_del(&tid->list);
969
970 if (ac->sched) {
971 ac->sched = false;
972 list_del(&ac->list);
973 }
974
975 spin_unlock_bh(&txq->axq_lock);
976 }
977
978 return buffered;
979}
980
981void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
982{
983 struct ath_atx_tid *tid;
984 struct ath_atx_ac *ac;
985 struct ath_txq *txq;
986 int tidno;
987
988 for (tidno = 0, tid = &an->tid[tidno];
989 tidno < WME_NUM_TID; tidno++, tid++) {
990
991 ac = tid->ac;
992 txq = ac->txq;
993
994 spin_lock_bh(&txq->axq_lock);
995 ac->clear_ps_filter = true;
996
Felix Fietkau56dc6332011-08-28 00:32:22 +0200997 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +0200998 ath_tx_queue_tid(txq, tid);
999 ath_txq_schedule(sc, txq);
1000 }
1001
1002 spin_unlock_bh(&txq->axq_lock);
1003 }
1004}
1005
Sujithe8324352009-01-16 21:38:42 +05301006void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1007{
1008 struct ath_atx_tid *txtid;
1009 struct ath_node *an;
1010
1011 an = (struct ath_node *)sta->drv_priv;
1012
1013 if (sc->sc_flags & SC_OP_TXAGGR) {
1014 txtid = ATH_AN_2_TID(an, tid);
1015 txtid->baw_size =
1016 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1017 txtid->state |= AGGR_ADDBA_COMPLETE;
1018 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1019 ath_tx_resume_tid(sc, txtid);
1020 }
1021}
1022
Sujithe8324352009-01-16 21:38:42 +05301023/********************/
1024/* Queue Management */
1025/********************/
1026
Sujithe8324352009-01-16 21:38:42 +05301027static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1028 struct ath_txq *txq)
1029{
1030 struct ath_atx_ac *ac, *ac_tmp;
1031 struct ath_atx_tid *tid, *tid_tmp;
1032
1033 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1034 list_del(&ac->list);
1035 ac->sched = false;
1036 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1037 list_del(&tid->list);
1038 tid->sched = false;
1039 ath_tid_drain(sc, txq, tid);
1040 }
1041 }
1042}
1043
1044struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1045{
Sujithcbe61d82009-02-09 13:27:12 +05301046 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001047 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301048 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001049 static const int subtype_txq_to_hwq[] = {
1050 [WME_AC_BE] = ATH_TXQ_AC_BE,
1051 [WME_AC_BK] = ATH_TXQ_AC_BK,
1052 [WME_AC_VI] = ATH_TXQ_AC_VI,
1053 [WME_AC_VO] = ATH_TXQ_AC_VO,
1054 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001055 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301056
1057 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001058 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301059 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1060 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1061 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1062 qi.tqi_physCompBuf = 0;
1063
1064 /*
1065 * Enable interrupts only for EOL and DESC conditions.
1066 * We mark tx descriptors to receive a DESC interrupt
1067 * when a tx queue gets deep; otherwise waiting for the
1068 * EOL to reap descriptors. Note that this is done to
1069 * reduce interrupt load and this only defers reaping
1070 * descriptors, never transmitting frames. Aside from
1071 * reducing interrupts this also permits more concurrency.
1072 * The only potential downside is if the tx queue backs
1073 * up in which case the top half of the kernel may backup
1074 * due to a lack of tx descriptors.
1075 *
1076 * The UAPSD queue is an exception, since we take a desc-
1077 * based intr on the EOSP frames.
1078 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001079 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1080 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1081 TXQ_FLAG_TXERRINT_ENABLE;
1082 } else {
1083 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1084 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1085 else
1086 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1087 TXQ_FLAG_TXDESCINT_ENABLE;
1088 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001089 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1090 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301091 /*
1092 * NB: don't print a message, this happens
1093 * normally on parts with too few tx queues
1094 */
1095 return NULL;
1096 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001097 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001098 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001099 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1100 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301101 return NULL;
1102 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001103 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1104 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301105
Ben Greear60f2d1d2011-01-09 23:11:52 -08001106 txq->axq_qnum = axq_qnum;
1107 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301108 txq->axq_link = NULL;
1109 INIT_LIST_HEAD(&txq->axq_q);
1110 INIT_LIST_HEAD(&txq->axq_acq);
1111 spin_lock_init(&txq->axq_lock);
1112 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001113 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001114 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001115 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001116
1117 txq->txq_headidx = txq->txq_tailidx = 0;
1118 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1119 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301120 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001121 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301122}
1123
Sujithe8324352009-01-16 21:38:42 +05301124int ath_txq_update(struct ath_softc *sc, int qnum,
1125 struct ath9k_tx_queue_info *qinfo)
1126{
Sujithcbe61d82009-02-09 13:27:12 +05301127 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301128 int error = 0;
1129 struct ath9k_tx_queue_info qi;
1130
1131 if (qnum == sc->beacon.beaconq) {
1132 /*
1133 * XXX: for beacon queue, we just save the parameter.
1134 * It will be picked up by ath_beaconq_config when
1135 * it's necessary.
1136 */
1137 sc->beacon.beacon_qi = *qinfo;
1138 return 0;
1139 }
1140
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001141 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301142
1143 ath9k_hw_get_txq_props(ah, qnum, &qi);
1144 qi.tqi_aifs = qinfo->tqi_aifs;
1145 qi.tqi_cwmin = qinfo->tqi_cwmin;
1146 qi.tqi_cwmax = qinfo->tqi_cwmax;
1147 qi.tqi_burstTime = qinfo->tqi_burstTime;
1148 qi.tqi_readyTime = qinfo->tqi_readyTime;
1149
1150 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001151 ath_err(ath9k_hw_common(sc->sc_ah),
1152 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301153 error = -EIO;
1154 } else {
1155 ath9k_hw_resettxqueue(ah, qnum);
1156 }
1157
1158 return error;
1159}
1160
1161int ath_cabq_update(struct ath_softc *sc)
1162{
1163 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001164 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301165 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301166
1167 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1168 /*
1169 * Ensure the readytime % is within the bounds.
1170 */
Sujith17d79042009-02-09 13:27:03 +05301171 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1172 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1173 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1174 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301175
Steve Brown9814f6b2011-02-07 17:10:39 -07001176 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301177 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301178 ath_txq_update(sc, qnum, &qi);
1179
1180 return 0;
1181}
1182
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001183static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1184{
1185 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1186 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1187}
1188
Felix Fietkaufce041b2011-05-19 12:20:25 +02001189static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1190 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301191 __releases(txq->axq_lock)
1192 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301193{
1194 struct ath_buf *bf, *lastbf;
1195 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001196 struct ath_tx_status ts;
1197
1198 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301199 INIT_LIST_HEAD(&bf_head);
1200
Felix Fietkaufce041b2011-05-19 12:20:25 +02001201 while (!list_empty(list)) {
1202 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301203
Felix Fietkaufce041b2011-05-19 12:20:25 +02001204 if (bf->bf_stale) {
1205 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301206
Felix Fietkaufce041b2011-05-19 12:20:25 +02001207 ath_tx_return_buffer(sc, bf);
1208 continue;
Sujithe8324352009-01-16 21:38:42 +05301209 }
1210
1211 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001212 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001213
Sujithe8324352009-01-16 21:38:42 +05301214 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001215 if (bf_is_ampdu_not_probing(bf))
1216 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301217
Felix Fietkaufce041b2011-05-19 12:20:25 +02001218 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301219 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001220 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1221 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301222 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001223 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001224 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001225 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001226}
1227
1228/*
1229 * Drain a given TX queue (could be Beacon or Data)
1230 *
1231 * This assumes output has been stopped and
1232 * we do not need to block ath_tx_tasklet.
1233 */
1234void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1235{
1236 spin_lock_bh(&txq->axq_lock);
1237 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1238 int idx = txq->txq_tailidx;
1239
1240 while (!list_empty(&txq->txq_fifo[idx])) {
1241 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1242 retry_tx);
1243
1244 INCR(idx, ATH_TXFIFO_DEPTH);
1245 }
1246 txq->txq_tailidx = idx;
1247 }
1248
1249 txq->axq_link = NULL;
1250 txq->axq_tx_inprogress = false;
1251 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001252
1253 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001254 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1255 ath_txq_drain_pending_buffers(sc, txq);
1256
1257 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301258}
1259
Felix Fietkau080e1a22010-12-05 20:17:53 +01001260bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301261{
Sujithcbe61d82009-02-09 13:27:12 +05301262 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001263 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301264 struct ath_txq *txq;
1265 int i, npend = 0;
1266
1267 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001268 return true;
Sujith043a0402009-01-16 21:38:47 +05301269
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001270 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301271
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001272 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301273 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001274 if (!ATH_TXQ_SETUP(sc, i))
1275 continue;
1276
1277 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301278 }
1279
Felix Fietkau080e1a22010-12-05 20:17:53 +01001280 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001281 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301282
1283 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001284 if (!ATH_TXQ_SETUP(sc, i))
1285 continue;
1286
1287 /*
1288 * The caller will resume queues with ieee80211_wake_queues.
1289 * Mark the queue as not stopped to prevent ath_tx_complete
1290 * from waking the queue too early.
1291 */
1292 txq = &sc->tx.txq[i];
1293 txq->stopped = false;
1294 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301295 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001296
1297 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301298}
1299
Sujithe8324352009-01-16 21:38:42 +05301300void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1301{
1302 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1303 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1304}
1305
Ben Greear7755bad2011-01-18 17:30:00 -08001306/* For each axq_acq entry, for each tid, try to schedule packets
1307 * for transmit until ampdu_depth has reached min Q depth.
1308 */
Sujithe8324352009-01-16 21:38:42 +05301309void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1310{
Ben Greear7755bad2011-01-18 17:30:00 -08001311 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1312 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301313
Felix Fietkau21f28e62011-01-15 14:30:14 +01001314 if (list_empty(&txq->axq_acq) ||
1315 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301316 return;
1317
1318 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001319 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301320
Ben Greear7755bad2011-01-18 17:30:00 -08001321 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1322 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1323 list_del(&ac->list);
1324 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301325
Ben Greear7755bad2011-01-18 17:30:00 -08001326 while (!list_empty(&ac->tid_q)) {
1327 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1328 list);
1329 list_del(&tid->list);
1330 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301331
Ben Greear7755bad2011-01-18 17:30:00 -08001332 if (tid->paused)
1333 continue;
Sujithe8324352009-01-16 21:38:42 +05301334
Ben Greear7755bad2011-01-18 17:30:00 -08001335 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301336
Ben Greear7755bad2011-01-18 17:30:00 -08001337 /*
1338 * add tid to round-robin queue if more frames
1339 * are pending for the tid
1340 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001341 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001342 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301343
Ben Greear7755bad2011-01-18 17:30:00 -08001344 if (tid == last_tid ||
1345 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1346 break;
Sujithe8324352009-01-16 21:38:42 +05301347 }
Ben Greear7755bad2011-01-18 17:30:00 -08001348
1349 if (!list_empty(&ac->tid_q)) {
1350 if (!ac->sched) {
1351 ac->sched = true;
1352 list_add_tail(&ac->list, &txq->axq_acq);
1353 }
1354 }
1355
1356 if (ac == last_ac ||
1357 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1358 return;
Sujithe8324352009-01-16 21:38:42 +05301359 }
1360}
1361
Sujithe8324352009-01-16 21:38:42 +05301362/***********/
1363/* TX, DMA */
1364/***********/
1365
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001366/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001367 * Insert a chain of ath_buf (descriptors) on a txq and
1368 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001369 */
Sujith102e0572008-10-29 10:15:16 +05301370static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001371 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001372{
Sujithcbe61d82009-02-09 13:27:12 +05301373 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001374 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001375 struct ath_buf *bf, *bf_last;
1376 bool puttxbuf = false;
1377 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301378
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001379 /*
1380 * Insert the frame on the outbound list and
1381 * pass it on to the hardware.
1382 */
1383
1384 if (list_empty(head))
1385 return;
1386
Felix Fietkaufce041b2011-05-19 12:20:25 +02001387 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001388 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001389 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001390
Joe Perches226afe62010-12-02 19:12:37 -08001391 ath_dbg(common, ATH_DBG_QUEUE,
1392 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001393
Felix Fietkaufce041b2011-05-19 12:20:25 +02001394 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1395 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001396 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001397 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001398 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001399 list_splice_tail_init(head, &txq->axq_q);
1400
Felix Fietkaufce041b2011-05-19 12:20:25 +02001401 if (txq->axq_link) {
1402 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001403 ath_dbg(common, ATH_DBG_XMIT,
1404 "link[%u] (%p)=%llx (%p)\n",
1405 txq->axq_qnum, txq->axq_link,
1406 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001407 } else if (!edma)
1408 puttxbuf = true;
1409
1410 txq->axq_link = bf_last->bf_desc;
1411 }
1412
1413 if (puttxbuf) {
1414 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1415 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1416 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1417 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1418 }
1419
1420 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001421 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001422 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001423 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001424
1425 if (!internal) {
1426 txq->axq_depth++;
1427 if (bf_is_ampdu_not_probing(bf))
1428 txq->axq_ampdu_depth++;
1429 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001430}
1431
Sujithe8324352009-01-16 21:38:42 +05301432static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001433 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301434{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001435 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001436 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301437
Sujithe8324352009-01-16 21:38:42 +05301438 bf->bf_state.bf_type |= BUF_AMPDU;
1439
1440 /*
1441 * Do not queue to h/w when any of the following conditions is true:
1442 * - there are pending frames in software queue
1443 * - the TID is currently paused for ADDBA/BAR request
1444 * - seqno is not within block-ack window
1445 * - h/w queue depth exceeds low water mark
1446 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001447 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001448 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001449 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001450 /*
Sujithe8324352009-01-16 21:38:42 +05301451 * Add this frame to software queue for scheduling later
1452 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001453 */
Ben Greearbda8add2011-01-09 23:11:48 -08001454 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau56dc6332011-08-28 00:32:22 +02001455 __skb_queue_tail(&tid->buf_q, bf->bf_mpdu);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001456 if (!txctl->an || !txctl->an->sleeping)
1457 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301458 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001459 }
1460
Felix Fietkau04caf862010-11-14 15:20:12 +01001461 INIT_LIST_HEAD(&bf_head);
1462 list_add(&bf->list, &bf_head);
1463
Sujithe8324352009-01-16 21:38:42 +05301464 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001465 if (!fi->retries)
1466 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301467
1468 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001469 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301470 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001471 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001472 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301473}
1474
Felix Fietkau82b873a2010-11-11 03:18:37 +01001475static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1476 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001477 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001478{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001479 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301480 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001481
Sujithe8324352009-01-16 21:38:42 +05301482 bf = list_first_entry(bf_head, struct ath_buf, list);
1483 bf->bf_state.bf_type &= ~BUF_AMPDU;
1484
1485 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001486 if (tid)
1487 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301488
Sujithd43f30152009-01-16 21:38:53 +05301489 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001490 fi = get_frame_info(bf->bf_mpdu);
1491 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001492 ath_tx_txqaddbuf(sc, txq, bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301493 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001494}
1495
Sujith528f0c62008-10-29 10:14:26 +05301496static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001497{
Sujith528f0c62008-10-29 10:14:26 +05301498 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001499 enum ath9k_pkt_type htype;
1500 __le16 fc;
1501
Sujith528f0c62008-10-29 10:14:26 +05301502 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001503 fc = hdr->frame_control;
1504
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001505 if (ieee80211_is_beacon(fc))
1506 htype = ATH9K_PKT_TYPE_BEACON;
1507 else if (ieee80211_is_probe_resp(fc))
1508 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1509 else if (ieee80211_is_atim(fc))
1510 htype = ATH9K_PKT_TYPE_ATIM;
1511 else if (ieee80211_is_pspoll(fc))
1512 htype = ATH9K_PKT_TYPE_PSPOLL;
1513 else
1514 htype = ATH9K_PKT_TYPE_NORMAL;
1515
1516 return htype;
1517}
1518
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001519static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1520 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301521{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001522 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301523 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001524 struct ieee80211_sta *sta = tx_info->control.sta;
1525 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301526 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001527 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001528 struct ath_node *an = NULL;
Sujith528f0c62008-10-29 10:14:26 +05301529 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001530 enum ath9k_key_type keytype;
1531 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001532 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301533
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001534 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301535
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001536 if (sta)
1537 an = (struct ath_node *) sta->drv_priv;
1538
Sujith528f0c62008-10-29 10:14:26 +05301539 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001540 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001541 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001542
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001543 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1544
1545 /*
1546 * Override seqno set by upper layer with the one
1547 * in tx aggregation state.
1548 */
1549 tid = ATH_AN_2_TID(an, tidno);
1550 seqno = tid->seq_next;
1551 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1552 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1553 }
1554
1555 memset(fi, 0, sizeof(*fi));
1556 if (hw_key)
1557 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001558 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1559 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001560 else
1561 fi->keyix = ATH9K_TXKEYIX_INVALID;
1562 fi->keytype = keytype;
1563 fi->framelen = framelen;
1564 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301565}
1566
Felix Fietkau82b873a2010-11-11 03:18:37 +01001567static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301568{
1569 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1570 int flags = 0;
1571
Sujith528f0c62008-10-29 10:14:26 +05301572 flags |= ATH9K_TXDESC_INTREQ;
1573
1574 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1575 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301576
Felix Fietkau82b873a2010-11-11 03:18:37 +01001577 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001578 flags |= ATH9K_TXDESC_LDPC;
1579
Sujith528f0c62008-10-29 10:14:26 +05301580 return flags;
1581}
1582
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001584 * rix - rate index
1585 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1586 * width - 0 for 20 MHz, 1 for 40 MHz
1587 * half_gi - to use 4us v/s 3.6 us for symbol time
1588 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001589static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301590 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001591{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001592 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001593 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301594
1595 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001596 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001597 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001598 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001599 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1600
1601 if (!half_gi)
1602 duration = SYMBOL_TIME(nsymbols);
1603 else
1604 duration = SYMBOL_TIME_HALFGI(nsymbols);
1605
Sujithe63835b2008-11-18 09:07:53 +05301606 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301608
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001609 return duration;
1610}
1611
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301612u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1613{
1614 struct ath_hw *ah = sc->sc_ah;
1615 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301616 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1617 (curchan->channelFlags & CHANNEL_5GHZ) &&
1618 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301619 return 0x3;
1620 else
1621 return chainmask;
1622}
1623
Felix Fietkau269c44b2010-11-14 15:20:06 +01001624static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001625{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001626 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001627 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301628 struct sk_buff *skb;
1629 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301630 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001631 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301632 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301633 int i, flags = 0;
1634 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301635 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301636
1637 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301638
Sujitha22be222009-03-30 15:28:36 +05301639 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301640 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301641 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301642 hdr = (struct ieee80211_hdr *)skb->data;
1643 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301644
Sujithc89424d2009-01-30 14:29:28 +05301645 /*
1646 * We check if Short Preamble is needed for the CTS rate by
1647 * checking the BSS's global flag.
1648 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1649 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001650 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1651 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301652 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001653 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001654
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001655 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001656 bool is_40, is_sgi, is_sp;
1657 int phy;
1658
Sujithe63835b2008-11-18 09:07:53 +05301659 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001660 continue;
1661
Sujitha8efee42008-11-18 09:07:30 +05301662 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301663 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001664
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301665 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301666 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001667 flags |= ATH9K_TXDESC_RTSENA;
1668 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1669 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1670 flags |= ATH9K_TXDESC_CTSENA;
1671 }
1672
Sujithc89424d2009-01-30 14:29:28 +05301673 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1674 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1675 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1676 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001677
Felix Fietkau545750d2009-11-23 22:21:01 +01001678 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1679 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1680 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1681
1682 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1683 /* MCS rates */
1684 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301685 series[i].ChSel = ath_txchainmask_reduction(sc,
1686 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001687 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001688 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001689 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1690 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001691 continue;
1692 }
1693
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301694 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001695 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1696 !(rate->flags & IEEE80211_RATE_ERP_G))
1697 phy = WLAN_RC_PHY_CCK;
1698 else
1699 phy = WLAN_RC_PHY_OFDM;
1700
1701 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1702 series[i].Rate = rate->hw_value;
1703 if (rate->hw_value_short) {
1704 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1705 series[i].Rate |= rate->hw_value_short;
1706 } else {
1707 is_sp = false;
1708 }
1709
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301710 if (bf->bf_state.bfs_paprd)
1711 series[i].ChSel = common->tx_chainmask;
1712 else
1713 series[i].ChSel = ath_txchainmask_reduction(sc,
1714 common->tx_chainmask, series[i].Rate);
1715
Felix Fietkau545750d2009-11-23 22:21:01 +01001716 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001717 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001718 }
1719
Felix Fietkau27032052010-01-17 21:08:50 +01001720 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001721 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001722 flags &= ~ATH9K_TXDESC_RTSENA;
1723
1724 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1725 if (flags & ATH9K_TXDESC_RTSENA)
1726 flags &= ~ATH9K_TXDESC_CTSENA;
1727
Sujithe63835b2008-11-18 09:07:53 +05301728 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301729 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1730 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301731 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301732 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301733
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001734}
1735
Felix Fietkau82b873a2010-11-11 03:18:37 +01001736static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001737 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001738 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301739{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001740 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001741 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001742 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001743 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001744 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001745 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001746 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001747
1748 bf = ath_tx_get_buffer(sc);
1749 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001750 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001751 return NULL;
1752 }
Sujithe8324352009-01-16 21:38:42 +05301753
Sujithe8324352009-01-16 21:38:42 +05301754 ATH_TXBUF_RESET(bf);
1755
Felix Fietkau82b873a2010-11-11 03:18:37 +01001756 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301757 bf->bf_mpdu = skb;
1758
Ben Greearc1739eb32010-10-14 12:45:29 -07001759 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1760 skb->len, DMA_TO_DEVICE);
1761 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301762 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001763 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001764 ath_err(ath9k_hw_common(sc->sc_ah),
1765 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001766 ath_tx_return_buffer(sc, bf);
1767 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301768 }
1769
Sujithe8324352009-01-16 21:38:42 +05301770 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301771
1772 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001773 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301774
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001775 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1776 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301777
1778 ath9k_hw_filltxdesc(ah, ds,
1779 skb->len, /* segment length */
1780 true, /* first segment */
1781 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001782 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001783 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001784 txq->axq_qnum);
1785
Felix Fietkau56dc6332011-08-28 00:32:22 +02001786 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001787
1788 return bf;
1789}
1790
1791/* FIXME: tx power */
1792static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1793 struct ath_tx_control *txctl)
1794{
1795 struct sk_buff *skb = bf->bf_mpdu;
1796 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1797 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001798 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001799 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001800 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301801
Sujithe8324352009-01-16 21:38:42 +05301802 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301803 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1804 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001805 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1806 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001807 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001808
Felix Fietkau066dae92010-11-07 14:59:39 +01001809 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001810 }
1811
1812 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001813 /*
1814 * Try aggregation if it's a unicast data frame
1815 * and the destination is HT capable.
1816 */
1817 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301818 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001819 INIT_LIST_HEAD(&bf_head);
1820 list_add_tail(&bf->list, &bf_head);
1821
Felix Fietkau82b873a2010-11-11 03:18:37 +01001822 bf->bf_state.bfs_paprd = txctl->paprd;
1823
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001824 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001825 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1826 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001827
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301828 if (txctl->paprd)
1829 bf->bf_state.bfs_paprd_timestamp = jiffies;
1830
Felix Fietkau55195412011-04-17 23:28:09 +02001831 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1832 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1833
Felix Fietkau248a38d2010-12-10 21:16:46 +01001834 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301835 }
1836
1837 spin_unlock_bh(&txctl->txq->axq_lock);
1838}
1839
1840/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001841int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301842 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001843{
Felix Fietkau28d16702010-11-14 15:20:10 +01001844 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1845 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001846 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001847 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001848 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001849 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001850 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001851 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001852 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001853 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001854
Ben Greeara9927ba2010-12-06 21:13:49 -08001855 /* NOTE: sta can be NULL according to net/mac80211.h */
1856 if (sta)
1857 txctl->an = (struct ath_node *)sta->drv_priv;
1858
Felix Fietkau04caf862010-11-14 15:20:12 +01001859 if (info->control.hw_key)
1860 frmlen += info->control.hw_key->icv_len;
1861
Felix Fietkau28d16702010-11-14 15:20:10 +01001862 /*
1863 * As a temporary workaround, assign seq# here; this will likely need
1864 * to be cleaned up to work better with Beacon transmission and virtual
1865 * BSSes.
1866 */
1867 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1868 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1869 sc->tx.seq_no += 0x10;
1870 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1871 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1872 }
1873
1874 /* Add the padding after the header if this is not already done */
1875 padpos = ath9k_cmn_padpos(hdr->frame_control);
1876 padsize = padpos & 3;
1877 if (padsize && skb->len > padpos) {
1878 if (skb_headroom(skb) < padsize)
1879 return -ENOMEM;
1880
1881 skb_push(skb, padsize);
1882 memmove(skb->data, skb->data + padsize, padpos);
1883 }
1884
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001885 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1886 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1887 !ieee80211_is_data(hdr->frame_control))
1888 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1889
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001890 setup_frame_info(hw, skb, frmlen);
1891
1892 /*
1893 * At this point, the vif, hw_key and sta pointers in the tx control
1894 * info are no longer valid (overwritten by the ath_frame_info data.
1895 */
1896
1897 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001898 if (unlikely(!bf))
1899 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001900
Felix Fietkau066dae92010-11-07 14:59:39 +01001901 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001902 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001903 if (txq == sc->tx.txq_map[q] &&
1904 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001905 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001906 txq->stopped = 1;
1907 }
1908 spin_unlock_bh(&txq->axq_lock);
1909
Sujithe8324352009-01-16 21:38:42 +05301910 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001911
1912 return 0;
1913}
1914
Sujithe8324352009-01-16 21:38:42 +05301915/*****************/
1916/* TX Completion */
1917/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001918
Sujithe8324352009-01-16 21:38:42 +05301919static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301920 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001921{
Sujithe8324352009-01-16 21:38:42 +05301922 struct ieee80211_hw *hw = sc->hw;
1923 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001924 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001925 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001926 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301927
Joe Perches226afe62010-12-02 19:12:37 -08001928 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301929
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301930 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301931 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301932
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301933 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301934 /* Frame was ACKed */
1935 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1936 }
1937
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001938 padpos = ath9k_cmn_padpos(hdr->frame_control);
1939 padsize = padpos & 3;
1940 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301941 /*
1942 * Remove MAC header padding before giving the frame back to
1943 * mac80211.
1944 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001945 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301946 skb_pull(skb, padsize);
1947 }
1948
Sujith1b04b932010-01-08 10:36:05 +05301949 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1950 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001951 ath_dbg(common, ATH_DBG_PS,
1952 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301953 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1954 PS_WAIT_FOR_CAB |
1955 PS_WAIT_FOR_PSPOLL_DATA |
1956 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001957 }
1958
Felix Fietkau7545daf2011-01-24 19:23:16 +01001959 q = skb_get_queue_mapping(skb);
1960 if (txq == sc->tx.txq_map[q]) {
1961 spin_lock_bh(&txq->axq_lock);
1962 if (WARN_ON(--txq->pending_frames < 0))
1963 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001964
Felix Fietkau7545daf2011-01-24 19:23:16 +01001965 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1966 ieee80211_wake_queue(sc->hw, q);
1967 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001968 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001969 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001970 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001971
1972 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301973}
1974
1975static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001976 struct ath_txq *txq, struct list_head *bf_q,
1977 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301978{
1979 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301980 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301981 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301982
Sujithe8324352009-01-16 21:38:42 +05301983 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301984 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301985
1986 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301987 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301988
1989 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301990 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301991 }
1992
Ben Greearc1739eb32010-10-14 12:45:29 -07001993 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001994 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001995
1996 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301997 if (time_after(jiffies,
1998 bf->bf_state.bfs_paprd_timestamp +
1999 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002000 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002001 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002002 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002003 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01002004 ath_debug_stat_tx(sc, bf, ts, txq);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302005 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002006 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002007 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2008 * accidentally reference it later.
2009 */
2010 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302011
2012 /*
2013 * Return the list of ath_buf of this mpdu to free queue
2014 */
2015 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2016 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2017 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2018}
2019
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002020static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2021 struct ath_tx_status *ts, int nframes, int nbad,
2022 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302023{
Sujitha22be222009-03-30 15:28:36 +05302024 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302025 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302026 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002027 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002028 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302029 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302030
Sujith95e4acb2009-03-13 08:56:09 +05302031 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002032 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302033
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002034 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302035 WARN_ON(tx_rateindex >= hw->max_rates);
2036
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002037 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302038 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002039 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002040 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302041
Felix Fietkaub572d032010-11-14 15:20:07 +01002042 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002043
Felix Fietkaub572d032010-11-14 15:20:07 +01002044 tx_info->status.ampdu_len = nframes;
2045 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002046 }
2047
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002048 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302049 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002050 /*
2051 * If an underrun error is seen assume it as an excessive
2052 * retry only if max frame trigger level has been reached
2053 * (2 KB for single stream, and 4 KB for dual stream).
2054 * Adjust the long retry as if the frame was tried
2055 * hw->max_rate_tries times to affect how rate control updates
2056 * PER for the failed rate.
2057 * In case of congestion on the bus penalizing this type of
2058 * underruns should help hardware actually transmit new frames
2059 * successfully by eventually preferring slower rates.
2060 * This itself should also alleviate congestion on the bus.
2061 */
2062 if (ieee80211_is_data(hdr->frame_control) &&
2063 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2064 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002065 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002066 tx_info->status.rates[tx_rateindex].count =
2067 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302068 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302069
Felix Fietkau545750d2009-11-23 22:21:01 +01002070 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302071 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002072 tx_info->status.rates[i].idx = -1;
2073 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302074
Felix Fietkau78c46532010-06-25 01:26:16 +02002075 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302076}
2077
Felix Fietkaufce041b2011-05-19 12:20:25 +02002078static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2079 struct ath_tx_status *ts, struct ath_buf *bf,
2080 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302081 __releases(txq->axq_lock)
2082 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002083{
2084 int txok;
2085
2086 txq->axq_depth--;
2087 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2088 txq->axq_tx_inprogress = false;
2089 if (bf_is_ampdu_not_probing(bf))
2090 txq->axq_ampdu_depth--;
2091
2092 spin_unlock_bh(&txq->axq_lock);
2093
2094 if (!bf_isampdu(bf)) {
2095 /*
2096 * This frame is sent out as a single frame.
2097 * Use hardware retry status for this frame.
2098 */
2099 if (ts->ts_status & ATH9K_TXERR_XRETRY)
2100 bf->bf_state.bf_type |= BUF_XRETRY;
2101 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2102 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2103 } else
2104 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2105
2106 spin_lock_bh(&txq->axq_lock);
2107
2108 if (sc->sc_flags & SC_OP_TXAGGR)
2109 ath_txq_schedule(sc, txq);
2110}
2111
Sujithc4288392008-11-18 09:09:30 +05302112static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113{
Sujithcbe61d82009-02-09 13:27:12 +05302114 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002115 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2117 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302118 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002119 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 int status;
2121
Joe Perches226afe62010-12-02 19:12:37 -08002122 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2123 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2124 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125
Felix Fietkaufce041b2011-05-19 12:20:25 +02002126 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 for (;;) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 if (list_empty(&txq->axq_q)) {
2129 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002130 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002131 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002132 break;
2133 }
2134 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2135
2136 /*
2137 * There is a race condition that a BH gets scheduled
2138 * after sw writes TxE and before hw re-load the last
2139 * descriptor to get the newly chained one.
2140 * Software must keep the last DONE descriptor as a
2141 * holding descriptor - software does so by marking
2142 * it with the STALE flag.
2143 */
2144 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302145 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002146 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002147 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002149
2150 bf = list_entry(bf_held->list.next, struct ath_buf,
2151 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002152 }
2153
2154 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302155 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156
Felix Fietkau29bffa92010-03-29 20:14:23 -07002157 memset(&ts, 0, sizeof(ts));
2158 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002159 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002160 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002161
Ben Greear2dac4fb2011-01-09 23:11:45 -08002162 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163
2164 /*
2165 * Remove ath_buf's of the same transmit unit from txq,
2166 * however leave the last descriptor back as the holding
2167 * descriptor for hw.
2168 */
Sujitha119cc42009-03-30 15:28:38 +05302169 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 if (!list_is_singular(&lastbf->list))
2172 list_cut_position(&bf_head,
2173 &txq->axq_q, lastbf->list.prev);
2174
Felix Fietkaufce041b2011-05-19 12:20:25 +02002175 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002176 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002177 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178 }
Johannes Berge6a98542008-10-21 12:40:02 +02002179
Felix Fietkaufce041b2011-05-19 12:20:25 +02002180 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002182 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183}
2184
Sujith305fe472009-07-23 15:32:29 +05302185static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002186{
2187 struct ath_softc *sc = container_of(work, struct ath_softc,
2188 tx_complete_work.work);
2189 struct ath_txq *txq;
2190 int i;
2191 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002192#ifdef CONFIG_ATH9K_DEBUGFS
2193 sc->tx_complete_poll_work_seen++;
2194#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002195
2196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2197 if (ATH_TXQ_SETUP(sc, i)) {
2198 txq = &sc->tx.txq[i];
2199 spin_lock_bh(&txq->axq_lock);
2200 if (txq->axq_depth) {
2201 if (txq->axq_tx_inprogress) {
2202 needreset = true;
2203 spin_unlock_bh(&txq->axq_lock);
2204 break;
2205 } else {
2206 txq->axq_tx_inprogress = true;
2207 }
2208 }
2209 spin_unlock_bh(&txq->axq_lock);
2210 }
2211
2212 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002213 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2214 "tx hung, resetting the chip\n");
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302215 spin_lock_bh(&sc->sc_pcu_lock);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002216 ath_reset(sc, true);
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302217 spin_unlock_bh(&sc->sc_pcu_lock);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002218 }
2219
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002220 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002221 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2222}
2223
2224
Sujithe8324352009-01-16 21:38:42 +05302225
2226void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002227{
Sujithe8324352009-01-16 21:38:42 +05302228 int i;
2229 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002230
Sujithe8324352009-01-16 21:38:42 +05302231 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002232
2233 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302234 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2235 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002236 }
2237}
2238
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002239void ath_tx_edma_tasklet(struct ath_softc *sc)
2240{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002241 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2243 struct ath_hw *ah = sc->sc_ah;
2244 struct ath_txq *txq;
2245 struct ath_buf *bf, *lastbf;
2246 struct list_head bf_head;
2247 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248
2249 for (;;) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002250 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 if (status == -EINPROGRESS)
2252 break;
2253 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002254 ath_dbg(common, ATH_DBG_XMIT,
2255 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256 break;
2257 }
2258
2259 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002260 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261 continue;
2262
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264
2265 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002266
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002267 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2268 spin_unlock_bh(&txq->axq_lock);
2269 return;
2270 }
2271
2272 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2273 struct ath_buf, list);
2274 lastbf = bf->bf_lastbf;
2275
2276 INIT_LIST_HEAD(&bf_head);
2277 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2278 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2281 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282
Felix Fietkaufce041b2011-05-19 12:20:25 +02002283 if (!list_empty(&txq->axq_q)) {
2284 struct list_head bf_q;
2285
2286 INIT_LIST_HEAD(&bf_q);
2287 txq->axq_link = NULL;
2288 list_splice_tail_init(&txq->axq_q, &bf_q);
2289 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2290 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002291 }
2292
Felix Fietkaufce041b2011-05-19 12:20:25 +02002293 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002294 spin_unlock_bh(&txq->axq_lock);
2295 }
2296}
2297
Sujithe8324352009-01-16 21:38:42 +05302298/*****************/
2299/* Init, Cleanup */
2300/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002301
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002302static int ath_txstatus_setup(struct ath_softc *sc, int size)
2303{
2304 struct ath_descdma *dd = &sc->txsdma;
2305 u8 txs_len = sc->sc_ah->caps.txs_len;
2306
2307 dd->dd_desc_len = size * txs_len;
2308 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2309 &dd->dd_desc_paddr, GFP_KERNEL);
2310 if (!dd->dd_desc)
2311 return -ENOMEM;
2312
2313 return 0;
2314}
2315
2316static int ath_tx_edma_init(struct ath_softc *sc)
2317{
2318 int err;
2319
2320 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2321 if (!err)
2322 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2323 sc->txsdma.dd_desc_paddr,
2324 ATH_TXSTATUS_RING_SIZE);
2325
2326 return err;
2327}
2328
2329static void ath_tx_edma_cleanup(struct ath_softc *sc)
2330{
2331 struct ath_descdma *dd = &sc->txsdma;
2332
2333 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2334 dd->dd_desc_paddr);
2335}
2336
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337int ath_tx_init(struct ath_softc *sc, int nbufs)
2338{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002339 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002340 int error = 0;
2341
Sujith797fe5cb2009-03-30 15:28:45 +05302342 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343
Sujith797fe5cb2009-03-30 15:28:45 +05302344 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002345 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302346 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002347 ath_err(common,
2348 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302349 goto err;
2350 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002351
Sujith797fe5cb2009-03-30 15:28:45 +05302352 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002353 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002355 ath_err(common,
2356 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302357 goto err;
2358 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002360 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2361
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002362 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2363 error = ath_tx_edma_init(sc);
2364 if (error)
2365 goto err;
2366 }
2367
Sujith797fe5cb2009-03-30 15:28:45 +05302368err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002369 if (error != 0)
2370 ath_tx_cleanup(sc);
2371
2372 return error;
2373}
2374
Sujith797fe5cb2009-03-30 15:28:45 +05302375void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376{
Sujithb77f4832008-12-07 21:44:03 +05302377 if (sc->beacon.bdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Sujithb77f4832008-12-07 21:44:03 +05302380 if (sc->tx.txdma.dd_desc_len != 0)
2381 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002382
2383 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2384 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385}
2386
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2388{
Sujithc5170162008-10-29 10:13:59 +05302389 struct ath_atx_tid *tid;
2390 struct ath_atx_ac *ac;
2391 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Sujith8ee5afb2008-12-07 21:43:36 +05302393 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302394 tidno < WME_NUM_TID;
2395 tidno++, tid++) {
2396 tid->an = an;
2397 tid->tidno = tidno;
2398 tid->seq_start = tid->seq_next = 0;
2399 tid->baw_size = WME_MAX_BA;
2400 tid->baw_head = tid->baw_tail = 0;
2401 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302402 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302403 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002404 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302405 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302406 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302407 tid->state &= ~AGGR_ADDBA_COMPLETE;
2408 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302409 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410
Sujith8ee5afb2008-12-07 21:43:36 +05302411 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302412 acno < WME_NUM_AC; acno++, ac++) {
2413 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002414 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302415 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002416 }
2417}
2418
Sujithb5aa9bf2008-10-29 10:13:31 +05302419void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420{
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 struct ath_atx_ac *ac;
2422 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002424 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302425
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 for (tidno = 0, tid = &an->tid[tidno];
2427 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428
Felix Fietkau2b409942010-07-07 19:42:08 +02002429 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002430 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431
Felix Fietkau2b409942010-07-07 19:42:08 +02002432 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433
Felix Fietkau2b409942010-07-07 19:42:08 +02002434 if (tid->sched) {
2435 list_del(&tid->list);
2436 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002437 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002438
2439 if (ac->sched) {
2440 list_del(&ac->list);
2441 tid->ac->sched = false;
2442 }
2443
2444 ath_tid_drain(sc, txq, tid);
2445 tid->state &= ~AGGR_ADDBA_COMPLETE;
2446 tid->state &= ~AGGR_CLEANUP;
2447
2448 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002449 }
2450}