blob: 55d077e7135d1549aeed07cb839f84874e4fff96 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200265 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200274 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
275 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530276}
277
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200278static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
279{
280 struct ath_buf *bf = NULL;
281
282 spin_lock_bh(&sc->tx.txbuflock);
283
284 if (unlikely(list_empty(&sc->tx.txbuf))) {
285 spin_unlock_bh(&sc->tx.txbuflock);
286 return NULL;
287 }
288
289 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
290 list_del(&bf->list);
291
292 spin_unlock_bh(&sc->tx.txbuflock);
293
294 return bf;
295}
296
297static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
298{
299 spin_lock_bh(&sc->tx.txbuflock);
300 list_add_tail(&bf->list, &sc->tx.txbuf);
301 spin_unlock_bh(&sc->tx.txbuflock);
302}
303
Sujithd43f30152009-01-16 21:38:53 +0530304static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
305{
306 struct ath_buf *tbf;
307
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200308 tbf = ath_tx_get_buffer(sc);
309 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530310 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530311
312 ATH_TXBUF_RESET(tbf);
313
314 tbf->bf_mpdu = bf->bf_mpdu;
315 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400316 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530317 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530318
319 return tbf;
320}
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
323 struct ath_tx_status *ts, int txok,
324 int *nframes, int *nbad)
325{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100326 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100327 u16 seq_st = 0;
328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int ba_index;
330 int isaggr = 0;
331
332 *nbad = 0;
333 *nframes = 0;
334
Felix Fietkaub572d032010-11-14 15:20:07 +0100335 isaggr = bf_isaggr(bf);
336 if (isaggr) {
337 seq_st = ts->ts_seqnum;
338 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
339 }
340
341 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100342 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200343 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100344
345 (*nframes)++;
346 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
347 (*nbad)++;
348
349 bf = bf->bf_next;
350 }
351}
352
353
Sujithd43f30152009-01-16 21:38:53 +0530354static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
355 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100356 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530357{
358 struct ath_node *an = NULL;
359 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100361 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530362 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800363 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530364 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200366 struct list_head bf_head;
367 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530369 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
371 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200372 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100373 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200374 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100375 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200376 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Sujithe8324352009-01-16 21:38:42 +0530377
Sujitha22be222009-03-30 15:28:36 +0530378 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530379 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530380
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800381 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800382
Felix Fietkau78c46532010-06-25 01:26:16 +0200383 memcpy(rates, tx_info->control.rates, sizeof(rates));
384
Sujith1286ec62009-01-27 13:30:37 +0530385 rcu_read_lock();
386
Ben Greear686b9cb2010-09-23 09:44:36 -0700387 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530388 if (!sta) {
389 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200390
Felix Fietkau31e79a52010-07-12 23:16:34 +0200391 INIT_LIST_HEAD(&bf_head);
392 while (bf) {
393 bf_next = bf->bf_next;
394
Felix Fietkaufce041b2011-05-19 12:20:25 +0200395 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 list_move_tail(&bf->list, &bf_head);
397
Felix Fietkau31e79a52010-07-12 23:16:34 +0200398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
399 0, 0);
400
401 bf = bf_next;
402 }
Sujith1286ec62009-01-27 13:30:37 +0530403 return;
Sujithe8324352009-01-16 21:38:42 +0530404 }
405
Sujith1286ec62009-01-27 13:30:37 +0530406 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
408 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530409
Felix Fietkaub11b1602010-07-11 12:48:44 +0200410 /*
411 * The hardware occasionally sends a tx status for the wrong TID.
412 * In this case, the BA status cannot be considered valid and all
413 * subframes need to be retransmitted
414 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100415 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200416 txok = false;
417
Sujithe8324352009-01-16 21:38:42 +0530418 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530419 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530420
Sujithd43f30152009-01-16 21:38:53 +0530421 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700422 if (ts->ts_flags & ATH9K_TX_BA) {
423 seq_st = ts->ts_seqnum;
424 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530425 } else {
Sujithd43f30152009-01-16 21:38:53 +0530426 /*
427 * AR5416 can become deaf/mute when BA
428 * issue happens. Chip needs to be reset.
429 * But AP code may have sychronization issues
430 * when perform internal reset in this routine.
431 * Only enable reset in STA mode for now.
432 */
Sujith2660b812009-02-09 13:27:26 +0530433 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530434 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530435 }
436 }
437
Felix Fietkau56dc6332011-08-28 00:32:22 +0200438 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530439
Felix Fietkaub572d032010-11-14 15:20:07 +0100440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530441 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200442 u16 seqno = bf->bf_state.seqno;
443
Felix Fietkauf0b82202011-01-15 14:30:15 +0100444 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530445 bf_next = bf->bf_next;
446
Felix Fietkau78c46532010-06-25 01:26:16 +0200447 skb = bf->bf_mpdu;
448 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100449 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200450
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530452 /* transmit completion, subframe is
453 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else if (!isaggr && txok) {
456 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530457 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530458 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200459 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530460 /*
461 * cleanup in progress, just fail
462 * the un-acked sub-frames
463 */
464 txfail = 1;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200465 } else if (flush) {
466 txpending = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200467 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
Felix Fietkau26a64252011-10-07 02:28:14 +0200468 if (txok || !an->sleeping)
Felix Fietkau55195412011-04-17 23:28:09 +0200469 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
470
Felix Fietkau55195412011-04-17 23:28:09 +0200471 txpending = 1;
472 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200473 txfail = 1;
474 sendbar = 1;
475 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530476 }
477 }
478
Felix Fietkaufce041b2011-05-19 12:20:25 +0200479 /*
480 * Make sure the last desc is reclaimed if it
481 * not a holding desc.
482 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200483 INIT_LIST_HEAD(&bf_head);
484 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
485 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530486 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530487
Felix Fietkau90fa5392010-09-20 13:45:38 +0200488 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530489 /*
490 * complete the acked-ones/xretried ones; update
491 * block-ack window
492 */
493 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200494 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530495 spin_unlock_bh(&txq->axq_lock);
496
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530497 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200498 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200499 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530500 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 }
502
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530505 } else {
Sujithd43f30152009-01-16 21:38:53 +0530506 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
508 if (bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530510
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 tbf = ath_clone_txbuf(sc, bf_last);
512 /*
513 * Update tx baw and complete the
514 * frame with failed status if we
515 * run out of tx buf.
516 */
517 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200519 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400520 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400521
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 ath_tx_complete_buf(sc, bf, txq,
523 &bf_head,
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200524 ts, 0,
525 !flush);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400526 break;
527 }
528
Felix Fietkau56dc6332011-08-28 00:32:22 +0200529 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400530 }
Sujithe8324352009-01-16 21:38:42 +0530531 }
532
533 /*
534 * Put this buffer to the temporary pending
535 * queue to retain ordering
536 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200537 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530538 }
539
540 bf = bf_next;
541 }
542
Felix Fietkau4cee7862010-07-23 03:53:16 +0200543 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200544 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200545 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200546 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200547
Felix Fietkau4cee7862010-07-23 03:53:16 +0200548 spin_lock_bh(&txq->axq_lock);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200549 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200550 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600551 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200552
553 if (ts->ts_status & ATH9K_TXERR_FILT)
554 tid->ac->clear_ps_filter = true;
555 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200556 spin_unlock_bh(&txq->axq_lock);
557 }
558
Sujithe8324352009-01-16 21:38:42 +0530559 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200560 ath_tx_flush_tid(sc, tid);
561
Sujithe8324352009-01-16 21:38:42 +0530562 if (tid->baw_head == tid->baw_tail) {
563 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530564 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530565 }
Sujithe8324352009-01-16 21:38:42 +0530566 }
567
Sujith1286ec62009-01-27 13:30:37 +0530568 rcu_read_unlock();
569
Felix Fietkau030d6292011-10-07 02:28:13 +0200570 if (needreset) {
571 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200572 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200573 }
Sujithe8324352009-01-16 21:38:42 +0530574}
575
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530576static bool ath_lookup_legacy(struct ath_buf *bf)
577{
578 struct sk_buff *skb;
579 struct ieee80211_tx_info *tx_info;
580 struct ieee80211_tx_rate *rates;
581 int i;
582
583 skb = bf->bf_mpdu;
584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
586
Felix Fietkau059ee092011-08-27 10:25:27 +0200587 for (i = 0; i < 4; i++) {
588 if (!rates[i].count || rates[i].idx < 0)
589 break;
590
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530591 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
592 return true;
593 }
594
595 return false;
596}
597
Sujithe8324352009-01-16 21:38:42 +0530598static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
599 struct ath_atx_tid *tid)
600{
Sujithe8324352009-01-16 21:38:42 +0530601 struct sk_buff *skb;
602 struct ieee80211_tx_info *tx_info;
603 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530604 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530605 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530606 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530607 int i;
608
Sujitha22be222009-03-30 15:28:36 +0530609 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530610 tx_info = IEEE80211_SKB_CB(skb);
611 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530612
613 /*
614 * Find the lowest frame length among the rate series that will have a
615 * 4ms transmit duration.
616 * TODO - TXOP limit needs to be considered.
617 */
618 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
619
620 for (i = 0; i < 4; i++) {
621 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100622 int modeidx;
623 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530624 legacy = 1;
625 break;
626 }
627
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200628 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100629 modeidx = MCS_HT40;
630 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200631 modeidx = MCS_HT20;
632
633 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
634 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100635
636 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530637 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530638 }
639 }
640
641 /*
642 * limit aggregate size by the minimum rate if rate selected is
643 * not a probe rate, if rate selected is a probe rate then
644 * avoid aggregation of this packet.
645 */
646 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
647 return 0;
648
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530649 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
650 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
651 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530652 aggr_limit = min((max_4ms_framelen * 3) / 8,
653 (u32)ATH_AMPDU_LIMIT_MAX);
654 else
655 aggr_limit = min(max_4ms_framelen,
656 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530657
658 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300659 * h/w can accept aggregates up to 16 bit lengths (65535).
660 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530661 * as zero. Ignore 65536 since we are constrained by hw.
662 */
Sujith4ef70842009-07-23 15:32:41 +0530663 if (tid->an->maxampdu)
664 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530665
666 return aggr_limit;
667}
668
669/*
Sujithd43f30152009-01-16 21:38:53 +0530670 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530671 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530672 */
673static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530674 struct ath_buf *bf, u16 frmlen,
675 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530676{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530677#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530678 struct sk_buff *skb = bf->bf_mpdu;
679 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530680 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530681 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100682 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200683 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100684 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530685
686 /* Select standard number of delimiters based on frame length alone */
687 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
688
689 /*
690 * If encryption enabled, hardware requires some more padding between
691 * subframes.
692 * TODO - this could be improved to be dependent on the rate.
693 * The hardware can keep up at lower rates, but not higher rates
694 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530695 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
696 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530697 ndelim += ATH_AGGR_ENCRYPTDELIM;
698
699 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530700 * Add delimiter when using RTS/CTS with aggregation
701 * and non enterprise AR9003 card
702 */
Felix Fietkau34597312011-08-29 18:57:54 +0200703 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
704 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530705 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
706
707 /*
Sujithe8324352009-01-16 21:38:42 +0530708 * Convert desired mpdu density from microeconds to bytes based
709 * on highest rate in rate series (i.e. first rate) to determine
710 * required minimum length for subframe. Take into account
711 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530712 *
Sujithe8324352009-01-16 21:38:42 +0530713 * If there is no mpdu density restriction, no further calculation
714 * is needed.
715 */
Sujith4ef70842009-07-23 15:32:41 +0530716
717 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530718 return ndelim;
719
720 rix = tx_info->control.rates[0].idx;
721 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530722 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
723 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
724
725 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530726 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530727 else
Sujith4ef70842009-07-23 15:32:41 +0530728 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530729
730 if (nsymbols == 0)
731 nsymbols = 1;
732
Felix Fietkauc6663872010-04-19 19:57:33 +0200733 streams = HT_RC_2_STREAMS(rix);
734 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530735 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
736
Sujithe8324352009-01-16 21:38:42 +0530737 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530738 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
739 ndelim = max(mindelim, ndelim);
740 }
741
742 return ndelim;
743}
744
745static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530746 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530747 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100748 struct list_head *bf_q,
749 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530750{
751#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200752 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530753 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530754 u16 aggr_limit = 0, al = 0, bpad = 0,
755 al_delta, h_baw = tid->baw_size / 2;
756 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200757 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100758 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200759 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200760 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530761
762 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200763 skb = skb_peek(&tid->buf_q);
764 fi = get_frame_info(skb);
765 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200766 if (!fi->bf)
767 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200768
Felix Fietkau44f1d262011-08-28 00:32:25 +0200769 if (!bf)
770 continue;
771
Felix Fietkau399c6482011-09-14 21:24:17 +0200772 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200773 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200774 if (!bf_first)
775 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530776
Sujithd43f30152009-01-16 21:38:53 +0530777 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200778 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530779 status = ATH_AGGR_BAW_CLOSED;
780 break;
781 }
782
783 if (!rl) {
784 aggr_limit = ath_lookup_rate(sc, bf, tid);
785 rl = 1;
786 }
787
Sujithd43f30152009-01-16 21:38:53 +0530788 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100789 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530790
Sujithd43f30152009-01-16 21:38:53 +0530791 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530792 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
793 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530794 status = ATH_AGGR_LIMITED;
795 break;
796 }
797
Felix Fietkau0299a502010-10-21 02:47:24 +0200798 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200799 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200800 break;
801
Sujithd43f30152009-01-16 21:38:53 +0530802 /* do not exceed subframe limit */
803 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530804 status = ATH_AGGR_LIMITED;
805 break;
806 }
807
Sujithd43f30152009-01-16 21:38:53 +0530808 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530809 al += bpad + al_delta;
810
811 /*
812 * Get the delimiters needed to meet the MPDU
813 * density for this node.
814 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530815 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
816 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530817 bpad = PADBYTES(al_delta) + (ndelim << 2);
818
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530819 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530820 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530821
Sujithd43f30152009-01-16 21:38:53 +0530822 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100823 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200824 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200825 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200826
827 __skb_unlink(skb, &tid->buf_q);
828 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200829 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530830 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200831
Sujithe8324352009-01-16 21:38:42 +0530832 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530833
Felix Fietkau56dc6332011-08-28 00:32:22 +0200834 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530835
Felix Fietkau269c44b2010-11-14 15:20:06 +0100836 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 return status;
839#undef PADBYTES
840}
841
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200842/*
843 * rix - rate index
844 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
845 * width - 0 for 20 MHz, 1 for 40 MHz
846 * half_gi - to use 4us v/s 3.6 us for symbol time
847 */
848static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
849 int width, int half_gi, bool shortPreamble)
850{
851 u32 nbits, nsymbits, duration, nsymbols;
852 int streams;
853
854 /* find number of symbols: PLCP + data */
855 streams = HT_RC_2_STREAMS(rix);
856 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
857 nsymbits = bits_per_symbol[rix % 8][width] * streams;
858 nsymbols = (nbits + nsymbits - 1) / nsymbits;
859
860 if (!half_gi)
861 duration = SYMBOL_TIME(nsymbols);
862 else
863 duration = SYMBOL_TIME_HALFGI(nsymbols);
864
865 /* addup duration for legacy/ht training and signal fields */
866 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
867
868 return duration;
869}
870
Felix Fietkau493cf042011-09-14 21:24:22 +0200871static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
872 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200873{
874 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200875 struct sk_buff *skb;
876 struct ieee80211_tx_info *tx_info;
877 struct ieee80211_tx_rate *rates;
878 const struct ieee80211_rate *rate;
879 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200880 int i;
881 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200882
883 skb = bf->bf_mpdu;
884 tx_info = IEEE80211_SKB_CB(skb);
885 rates = tx_info->control.rates;
886 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200887
888 /* set dur_update_en for l-sig computation except for PS-Poll frames */
889 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200890
891 /*
892 * We check if Short Preamble is needed for the CTS rate by
893 * checking the BSS's global flag.
894 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
895 */
896 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200897 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200898 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200899 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200900
901 for (i = 0; i < 4; i++) {
902 bool is_40, is_sgi, is_sp;
903 int phy;
904
905 if (!rates[i].count || (rates[i].idx < 0))
906 continue;
907
908 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200909 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200910
911 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200912 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
913 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200914 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200915 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
916 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200917 }
918
919 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200920 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200921 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200922 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200923
924 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
925 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
926 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
927
928 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
929 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200930 info->rates[i].Rate = rix | 0x80;
931 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
932 ah->txchainmask, info->rates[i].Rate);
933 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200934 is_40, is_sgi, is_sp);
935 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200936 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200937 continue;
938 }
939
940 /* legacy rates */
941 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
942 !(rate->flags & IEEE80211_RATE_ERP_G))
943 phy = WLAN_RC_PHY_CCK;
944 else
945 phy = WLAN_RC_PHY_OFDM;
946
947 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200948 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200949 if (rate->hw_value_short) {
950 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200951 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200952 } else {
953 is_sp = false;
954 }
955
956 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200957 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200958 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200959 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
960 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200961
Felix Fietkau493cf042011-09-14 21:24:22 +0200962 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200963 phy, rate->bitrate * 100, len, rix, is_sp);
964 }
965
966 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
967 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200968 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200969
970 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200971 if (info->flags & ATH9K_TXDESC_RTSENA)
972 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200973}
974
Felix Fietkau493cf042011-09-14 21:24:22 +0200975static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
976{
977 struct ieee80211_hdr *hdr;
978 enum ath9k_pkt_type htype;
979 __le16 fc;
980
981 hdr = (struct ieee80211_hdr *)skb->data;
982 fc = hdr->frame_control;
983
984 if (ieee80211_is_beacon(fc))
985 htype = ATH9K_PKT_TYPE_BEACON;
986 else if (ieee80211_is_probe_resp(fc))
987 htype = ATH9K_PKT_TYPE_PROBE_RESP;
988 else if (ieee80211_is_atim(fc))
989 htype = ATH9K_PKT_TYPE_ATIM;
990 else if (ieee80211_is_pspoll(fc))
991 htype = ATH9K_PKT_TYPE_PSPOLL;
992 else
993 htype = ATH9K_PKT_TYPE_NORMAL;
994
995 return htype;
996}
997
998static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
999 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001000{
1001 struct ath_hw *ah = sc->sc_ah;
1002 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1003 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001004 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001005 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001006
Felix Fietkau493cf042011-09-14 21:24:22 +02001007 memset(&info, 0, sizeof(info));
1008 info.is_first = true;
1009 info.is_last = true;
1010 info.txpower = MAX_RATE_POWER;
1011 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001012
Felix Fietkau493cf042011-09-14 21:24:22 +02001013 info.flags = ATH9K_TXDESC_INTREQ;
1014 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1015 info.flags |= ATH9K_TXDESC_NOACK;
1016 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1017 info.flags |= ATH9K_TXDESC_LDPC;
1018
1019 ath_buf_set_rate(sc, bf, &info, len);
1020
1021 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1022 info.flags |= ATH9K_TXDESC_CLRDMASK;
1023
1024 if (bf->bf_state.bfs_paprd)
1025 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1026
Felix Fietkau399c6482011-09-14 21:24:17 +02001027
1028 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001029 struct sk_buff *skb = bf->bf_mpdu;
1030 struct ath_frame_info *fi = get_frame_info(skb);
1031
1032 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001033 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001034 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001035 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001036 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001037
John W. Linville42cecc32011-09-19 15:42:31 -04001038 info.buf_addr[0] = bf->bf_buf_addr;
1039 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001040 info.pkt_len = fi->framelen;
1041 info.keyix = fi->keyix;
1042 info.keytype = fi->keytype;
1043
1044 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001045 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001046 info.aggr = AGGR_BUF_FIRST;
1047 else if (!bf->bf_next)
1048 info.aggr = AGGR_BUF_LAST;
1049 else
1050 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001051
Felix Fietkau493cf042011-09-14 21:24:22 +02001052 info.ndelim = bf->bf_state.ndelim;
1053 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001054 }
1055
Felix Fietkau493cf042011-09-14 21:24:22 +02001056 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001057 bf = bf->bf_next;
1058 }
1059}
1060
Sujithe8324352009-01-16 21:38:42 +05301061static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1062 struct ath_atx_tid *tid)
1063{
Sujithd43f30152009-01-16 21:38:53 +05301064 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301065 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001066 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301067 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001068 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301069
1070 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001071 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301072 return;
1073
1074 INIT_LIST_HEAD(&bf_q);
1075
Felix Fietkau269c44b2010-11-14 15:20:06 +01001076 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301077
1078 /*
Sujithd43f30152009-01-16 21:38:53 +05301079 * no frames picked up to be aggregated;
1080 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301081 */
1082 if (list_empty(&bf_q))
1083 break;
1084
1085 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301086 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001087 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301088
Felix Fietkau55195412011-04-17 23:28:09 +02001089 if (tid->ac->clear_ps_filter) {
1090 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001091 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1092 } else {
1093 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001094 }
1095
Sujithd43f30152009-01-16 21:38:53 +05301096 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001097 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001098 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1099 bf->bf_state.bf_type = BUF_AMPDU;
1100 } else {
1101 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301102 }
1103
Felix Fietkau493cf042011-09-14 21:24:22 +02001104 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001105 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001106 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301107 status != ATH_AGGR_BAW_CLOSED);
1108}
1109
Felix Fietkau231c3a12010-09-20 19:35:28 +02001110int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1111 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301112{
1113 struct ath_atx_tid *txtid;
1114 struct ath_node *an;
1115
1116 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301117 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001118
1119 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1120 return -EAGAIN;
1121
Sujithf83da962009-07-23 15:32:37 +05301122 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001123 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001124 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001125
Felix Fietkau2ed72222011-01-10 17:05:49 -07001126 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1127 txtid->baw_head = txtid->baw_tail = 0;
1128
Felix Fietkau231c3a12010-09-20 19:35:28 +02001129 return 0;
Sujithe8324352009-01-16 21:38:42 +05301130}
1131
Sujithf83da962009-07-23 15:32:37 +05301132void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301133{
1134 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1135 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001136 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301137
1138 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301139 return;
Sujithe8324352009-01-16 21:38:42 +05301140
1141 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301142 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301143 return;
Sujithe8324352009-01-16 21:38:42 +05301144 }
1145
Sujithe8324352009-01-16 21:38:42 +05301146 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001147 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001148
1149 /*
1150 * If frames are still being transmitted for this TID, they will be
1151 * cleaned up during tx completion. To prevent race conditions, this
1152 * TID can only be reused after all in-progress subframes have been
1153 * completed.
1154 */
1155 if (txtid->baw_head != txtid->baw_tail)
1156 txtid->state |= AGGR_CLEANUP;
1157 else
1158 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301159 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301160
Felix Fietkau90fa5392010-09-20 13:45:38 +02001161 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301162}
1163
Johannes Berg042ec452011-09-29 16:04:26 +02001164void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1165 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001166{
1167 struct ath_atx_tid *tid;
1168 struct ath_atx_ac *ac;
1169 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001170 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001171 int tidno;
1172
1173 for (tidno = 0, tid = &an->tid[tidno];
1174 tidno < WME_NUM_TID; tidno++, tid++) {
1175
1176 if (!tid->sched)
1177 continue;
1178
1179 ac = tid->ac;
1180 txq = ac->txq;
1181
1182 spin_lock_bh(&txq->axq_lock);
1183
Johannes Berg042ec452011-09-29 16:04:26 +02001184 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001185
1186 tid->sched = false;
1187 list_del(&tid->list);
1188
1189 if (ac->sched) {
1190 ac->sched = false;
1191 list_del(&ac->list);
1192 }
1193
1194 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001195
Johannes Berg042ec452011-09-29 16:04:26 +02001196 ieee80211_sta_set_buffered(sta, tidno, buffered);
1197 }
Felix Fietkau55195412011-04-17 23:28:09 +02001198}
1199
1200void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1201{
1202 struct ath_atx_tid *tid;
1203 struct ath_atx_ac *ac;
1204 struct ath_txq *txq;
1205 int tidno;
1206
1207 for (tidno = 0, tid = &an->tid[tidno];
1208 tidno < WME_NUM_TID; tidno++, tid++) {
1209
1210 ac = tid->ac;
1211 txq = ac->txq;
1212
1213 spin_lock_bh(&txq->axq_lock);
1214 ac->clear_ps_filter = true;
1215
Felix Fietkau56dc6332011-08-28 00:32:22 +02001216 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001217 ath_tx_queue_tid(txq, tid);
1218 ath_txq_schedule(sc, txq);
1219 }
1220
1221 spin_unlock_bh(&txq->axq_lock);
1222 }
1223}
1224
Sujithe8324352009-01-16 21:38:42 +05301225void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1226{
1227 struct ath_atx_tid *txtid;
1228 struct ath_node *an;
1229
1230 an = (struct ath_node *)sta->drv_priv;
1231
1232 if (sc->sc_flags & SC_OP_TXAGGR) {
1233 txtid = ATH_AN_2_TID(an, tid);
1234 txtid->baw_size =
1235 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1236 txtid->state |= AGGR_ADDBA_COMPLETE;
1237 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1238 ath_tx_resume_tid(sc, txtid);
1239 }
1240}
1241
Sujithe8324352009-01-16 21:38:42 +05301242/********************/
1243/* Queue Management */
1244/********************/
1245
Sujithe8324352009-01-16 21:38:42 +05301246static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1247 struct ath_txq *txq)
1248{
1249 struct ath_atx_ac *ac, *ac_tmp;
1250 struct ath_atx_tid *tid, *tid_tmp;
1251
1252 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1253 list_del(&ac->list);
1254 ac->sched = false;
1255 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1256 list_del(&tid->list);
1257 tid->sched = false;
1258 ath_tid_drain(sc, txq, tid);
1259 }
1260 }
1261}
1262
1263struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1264{
Sujithcbe61d82009-02-09 13:27:12 +05301265 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301266 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001267 static const int subtype_txq_to_hwq[] = {
1268 [WME_AC_BE] = ATH_TXQ_AC_BE,
1269 [WME_AC_BK] = ATH_TXQ_AC_BK,
1270 [WME_AC_VI] = ATH_TXQ_AC_VI,
1271 [WME_AC_VO] = ATH_TXQ_AC_VO,
1272 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001273 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301274
1275 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001276 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301277 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1278 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1279 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1280 qi.tqi_physCompBuf = 0;
1281
1282 /*
1283 * Enable interrupts only for EOL and DESC conditions.
1284 * We mark tx descriptors to receive a DESC interrupt
1285 * when a tx queue gets deep; otherwise waiting for the
1286 * EOL to reap descriptors. Note that this is done to
1287 * reduce interrupt load and this only defers reaping
1288 * descriptors, never transmitting frames. Aside from
1289 * reducing interrupts this also permits more concurrency.
1290 * The only potential downside is if the tx queue backs
1291 * up in which case the top half of the kernel may backup
1292 * due to a lack of tx descriptors.
1293 *
1294 * The UAPSD queue is an exception, since we take a desc-
1295 * based intr on the EOSP frames.
1296 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001297 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1298 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1299 TXQ_FLAG_TXERRINT_ENABLE;
1300 } else {
1301 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1302 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1303 else
1304 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1305 TXQ_FLAG_TXDESCINT_ENABLE;
1306 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001307 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1308 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301309 /*
1310 * NB: don't print a message, this happens
1311 * normally on parts with too few tx queues
1312 */
1313 return NULL;
1314 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001315 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1316 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301317
Ben Greear60f2d1d2011-01-09 23:11:52 -08001318 txq->axq_qnum = axq_qnum;
1319 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301320 txq->axq_link = NULL;
1321 INIT_LIST_HEAD(&txq->axq_q);
1322 INIT_LIST_HEAD(&txq->axq_acq);
1323 spin_lock_init(&txq->axq_lock);
1324 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001325 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001326 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001327 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001328
1329 txq->txq_headidx = txq->txq_tailidx = 0;
1330 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1331 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301332 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001333 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301334}
1335
Sujithe8324352009-01-16 21:38:42 +05301336int ath_txq_update(struct ath_softc *sc, int qnum,
1337 struct ath9k_tx_queue_info *qinfo)
1338{
Sujithcbe61d82009-02-09 13:27:12 +05301339 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301340 int error = 0;
1341 struct ath9k_tx_queue_info qi;
1342
1343 if (qnum == sc->beacon.beaconq) {
1344 /*
1345 * XXX: for beacon queue, we just save the parameter.
1346 * It will be picked up by ath_beaconq_config when
1347 * it's necessary.
1348 */
1349 sc->beacon.beacon_qi = *qinfo;
1350 return 0;
1351 }
1352
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001353 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301354
1355 ath9k_hw_get_txq_props(ah, qnum, &qi);
1356 qi.tqi_aifs = qinfo->tqi_aifs;
1357 qi.tqi_cwmin = qinfo->tqi_cwmin;
1358 qi.tqi_cwmax = qinfo->tqi_cwmax;
1359 qi.tqi_burstTime = qinfo->tqi_burstTime;
1360 qi.tqi_readyTime = qinfo->tqi_readyTime;
1361
1362 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001363 ath_err(ath9k_hw_common(sc->sc_ah),
1364 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301365 error = -EIO;
1366 } else {
1367 ath9k_hw_resettxqueue(ah, qnum);
1368 }
1369
1370 return error;
1371}
1372
1373int ath_cabq_update(struct ath_softc *sc)
1374{
1375 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001376 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301377 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301378
1379 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1380 /*
1381 * Ensure the readytime % is within the bounds.
1382 */
Sujith17d79042009-02-09 13:27:03 +05301383 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1384 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1385 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1386 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301387
Steve Brown9814f6b2011-02-07 17:10:39 -07001388 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301389 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301390 ath_txq_update(sc, qnum, &qi);
1391
1392 return 0;
1393}
1394
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001395static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1396{
1397 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1398 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1399}
1400
Felix Fietkaufce041b2011-05-19 12:20:25 +02001401static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1402 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301403 __releases(txq->axq_lock)
1404 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301405{
1406 struct ath_buf *bf, *lastbf;
1407 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001408 struct ath_tx_status ts;
1409
1410 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001411 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301412 INIT_LIST_HEAD(&bf_head);
1413
Felix Fietkaufce041b2011-05-19 12:20:25 +02001414 while (!list_empty(list)) {
1415 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301416
Felix Fietkaufce041b2011-05-19 12:20:25 +02001417 if (bf->bf_stale) {
1418 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301419
Felix Fietkaufce041b2011-05-19 12:20:25 +02001420 ath_tx_return_buffer(sc, bf);
1421 continue;
Sujithe8324352009-01-16 21:38:42 +05301422 }
1423
1424 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001425 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001426
Sujithe8324352009-01-16 21:38:42 +05301427 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001428 if (bf_is_ampdu_not_probing(bf))
1429 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301430
Felix Fietkaufce041b2011-05-19 12:20:25 +02001431 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301432 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001433 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1434 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301435 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001436 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001437 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001438 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001439}
1440
1441/*
1442 * Drain a given TX queue (could be Beacon or Data)
1443 *
1444 * This assumes output has been stopped and
1445 * we do not need to block ath_tx_tasklet.
1446 */
1447void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1448{
1449 spin_lock_bh(&txq->axq_lock);
1450 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1451 int idx = txq->txq_tailidx;
1452
1453 while (!list_empty(&txq->txq_fifo[idx])) {
1454 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1455 retry_tx);
1456
1457 INCR(idx, ATH_TXFIFO_DEPTH);
1458 }
1459 txq->txq_tailidx = idx;
1460 }
1461
1462 txq->axq_link = NULL;
1463 txq->axq_tx_inprogress = false;
1464 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001465
1466 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001467 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1468 ath_txq_drain_pending_buffers(sc, txq);
1469
1470 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301471}
1472
Felix Fietkau080e1a22010-12-05 20:17:53 +01001473bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301474{
Sujithcbe61d82009-02-09 13:27:12 +05301475 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001476 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301477 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001478 int i;
1479 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301480
1481 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001482 return true;
Sujith043a0402009-01-16 21:38:47 +05301483
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001484 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301485
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001486 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301487 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001488 if (!ATH_TXQ_SETUP(sc, i))
1489 continue;
1490
Felix Fietkau34d25812011-10-07 02:28:12 +02001491 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1492 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301493 }
1494
Felix Fietkau080e1a22010-12-05 20:17:53 +01001495 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001496 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301497
1498 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001499 if (!ATH_TXQ_SETUP(sc, i))
1500 continue;
1501
1502 /*
1503 * The caller will resume queues with ieee80211_wake_queues.
1504 * Mark the queue as not stopped to prevent ath_tx_complete
1505 * from waking the queue too early.
1506 */
1507 txq = &sc->tx.txq[i];
1508 txq->stopped = false;
1509 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301510 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001511
1512 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301513}
1514
Sujithe8324352009-01-16 21:38:42 +05301515void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1516{
1517 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1518 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1519}
1520
Ben Greear7755bad2011-01-18 17:30:00 -08001521/* For each axq_acq entry, for each tid, try to schedule packets
1522 * for transmit until ampdu_depth has reached min Q depth.
1523 */
Sujithe8324352009-01-16 21:38:42 +05301524void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1525{
Ben Greear7755bad2011-01-18 17:30:00 -08001526 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1527 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301528
Felix Fietkau236de512011-09-03 01:40:25 +02001529 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001530 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301531 return;
1532
1533 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001534 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301535
Ben Greear7755bad2011-01-18 17:30:00 -08001536 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1537 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1538 list_del(&ac->list);
1539 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301540
Ben Greear7755bad2011-01-18 17:30:00 -08001541 while (!list_empty(&ac->tid_q)) {
1542 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1543 list);
1544 list_del(&tid->list);
1545 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301546
Ben Greear7755bad2011-01-18 17:30:00 -08001547 if (tid->paused)
1548 continue;
Sujithe8324352009-01-16 21:38:42 +05301549
Ben Greear7755bad2011-01-18 17:30:00 -08001550 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301551
Ben Greear7755bad2011-01-18 17:30:00 -08001552 /*
1553 * add tid to round-robin queue if more frames
1554 * are pending for the tid
1555 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001556 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001557 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301558
Ben Greear7755bad2011-01-18 17:30:00 -08001559 if (tid == last_tid ||
1560 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1561 break;
Sujithe8324352009-01-16 21:38:42 +05301562 }
Ben Greear7755bad2011-01-18 17:30:00 -08001563
1564 if (!list_empty(&ac->tid_q)) {
1565 if (!ac->sched) {
1566 ac->sched = true;
1567 list_add_tail(&ac->list, &txq->axq_acq);
1568 }
1569 }
1570
1571 if (ac == last_ac ||
1572 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1573 return;
Sujithe8324352009-01-16 21:38:42 +05301574 }
1575}
1576
Sujithe8324352009-01-16 21:38:42 +05301577/***********/
1578/* TX, DMA */
1579/***********/
1580
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001581/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001582 * Insert a chain of ath_buf (descriptors) on a txq and
1583 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001584 */
Sujith102e0572008-10-29 10:15:16 +05301585static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001586 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001587{
Sujithcbe61d82009-02-09 13:27:12 +05301588 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001589 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001590 struct ath_buf *bf, *bf_last;
1591 bool puttxbuf = false;
1592 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301593
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001594 /*
1595 * Insert the frame on the outbound list and
1596 * pass it on to the hardware.
1597 */
1598
1599 if (list_empty(head))
1600 return;
1601
Felix Fietkaufce041b2011-05-19 12:20:25 +02001602 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001603 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001604 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001605
Joe Perches226afe62010-12-02 19:12:37 -08001606 ath_dbg(common, ATH_DBG_QUEUE,
1607 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001608
Felix Fietkaufce041b2011-05-19 12:20:25 +02001609 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1610 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001611 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001612 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001613 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001614 list_splice_tail_init(head, &txq->axq_q);
1615
Felix Fietkaufce041b2011-05-19 12:20:25 +02001616 if (txq->axq_link) {
1617 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001618 ath_dbg(common, ATH_DBG_XMIT,
1619 "link[%u] (%p)=%llx (%p)\n",
1620 txq->axq_qnum, txq->axq_link,
1621 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001622 } else if (!edma)
1623 puttxbuf = true;
1624
1625 txq->axq_link = bf_last->bf_desc;
1626 }
1627
1628 if (puttxbuf) {
1629 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1630 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1631 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1632 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1633 }
1634
1635 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001636 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001637 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001638 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001639
1640 if (!internal) {
1641 txq->axq_depth++;
1642 if (bf_is_ampdu_not_probing(bf))
1643 txq->axq_ampdu_depth++;
1644 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001645}
1646
Sujithe8324352009-01-16 21:38:42 +05301647static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001648 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301649{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001650 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001651 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001652 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301653
1654 /*
1655 * Do not queue to h/w when any of the following conditions is true:
1656 * - there are pending frames in software queue
1657 * - the TID is currently paused for ADDBA/BAR request
1658 * - seqno is not within block-ack window
1659 * - h/w queue depth exceeds low water mark
1660 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001661 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001662 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001663 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001664 /*
Sujithe8324352009-01-16 21:38:42 +05301665 * Add this frame to software queue for scheduling later
1666 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001667 */
Ben Greearbda8add2011-01-09 23:11:48 -08001668 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001669 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001670 if (!txctl->an || !txctl->an->sleeping)
1671 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301672 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001673 }
1674
Felix Fietkau44f1d262011-08-28 00:32:25 +02001675 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1676 if (!bf)
1677 return;
1678
Felix Fietkau399c6482011-09-14 21:24:17 +02001679 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001680 INIT_LIST_HEAD(&bf_head);
1681 list_add(&bf->list, &bf_head);
1682
Sujithe8324352009-01-16 21:38:42 +05301683 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001684 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301685
1686 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001687 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301688 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001689 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001690 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301691}
1692
Felix Fietkau82b873a2010-11-11 03:18:37 +01001693static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001694 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001695{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001696 struct ath_frame_info *fi = get_frame_info(skb);
1697 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301698 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001699
Felix Fietkau44f1d262011-08-28 00:32:25 +02001700 bf = fi->bf;
1701 if (!bf)
1702 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1703
1704 if (!bf)
1705 return;
1706
1707 INIT_LIST_HEAD(&bf_head);
1708 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001709 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301710
1711 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001712 if (tid)
1713 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301714
Sujithd43f30152009-01-16 21:38:53 +05301715 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001716 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001717 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301718 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001719}
1720
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001721static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1722 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301723{
1724 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001725 struct ieee80211_sta *sta = tx_info->control.sta;
1726 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001727 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001728 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001729 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001730 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301731
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001732 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301733
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001734 if (sta)
1735 an = (struct ath_node *) sta->drv_priv;
1736
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001737 memset(fi, 0, sizeof(*fi));
1738 if (hw_key)
1739 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001740 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1741 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001742 else
1743 fi->keyix = ATH9K_TXKEYIX_INVALID;
1744 fi->keytype = keytype;
1745 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301746}
1747
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301748u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1749{
1750 struct ath_hw *ah = sc->sc_ah;
1751 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301752 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1753 (curchan->channelFlags & CHANNEL_5GHZ) &&
1754 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301755 return 0x3;
1756 else
1757 return chainmask;
1758}
1759
Felix Fietkau44f1d262011-08-28 00:32:25 +02001760/*
1761 * Assign a descriptor (and sequence number if necessary,
1762 * and map buffer for DMA. Frees skb on error
1763 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001764static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001765 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001766 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001767 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301768{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001769 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001770 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001772 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001773 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001774
1775 bf = ath_tx_get_buffer(sc);
1776 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001777 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001778 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001779 }
Sujithe8324352009-01-16 21:38:42 +05301780
Sujithe8324352009-01-16 21:38:42 +05301781 ATH_TXBUF_RESET(bf);
1782
Felix Fietkaufa05f872011-08-28 00:32:24 +02001783 if (tid) {
1784 seqno = tid->seq_next;
1785 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1786 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1787 bf->bf_state.seqno = seqno;
1788 }
1789
Sujithe8324352009-01-16 21:38:42 +05301790 bf->bf_mpdu = skb;
1791
Ben Greearc1739eb32010-10-14 12:45:29 -07001792 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1793 skb->len, DMA_TO_DEVICE);
1794 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301795 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001796 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001797 ath_err(ath9k_hw_common(sc->sc_ah),
1798 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001799 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001800 goto error;
Sujithe8324352009-01-16 21:38:42 +05301801 }
1802
Felix Fietkau56dc6332011-08-28 00:32:22 +02001803 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001804
1805 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001806
1807error:
1808 dev_kfree_skb_any(skb);
1809 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001810}
1811
1812/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001813static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001814 struct ath_tx_control *txctl)
1815{
Felix Fietkau04caf862010-11-14 15:20:12 +01001816 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1817 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001818 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001819 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001820 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301821
Sujithe8324352009-01-16 21:38:42 +05301822 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301823 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1824 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001825 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1826 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001827 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001828
Felix Fietkau066dae92010-11-07 14:59:39 +01001829 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001830 }
1831
1832 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001833 /*
1834 * Try aggregation if it's a unicast data frame
1835 * and the destination is HT capable.
1836 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001837 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301838 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001839 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1840 if (!bf)
1841 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001842
Felix Fietkau82b873a2010-11-11 03:18:37 +01001843 bf->bf_state.bfs_paprd = txctl->paprd;
1844
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301845 if (txctl->paprd)
1846 bf->bf_state.bfs_paprd_timestamp = jiffies;
1847
Felix Fietkau44f1d262011-08-28 00:32:25 +02001848 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301849 }
1850
Felix Fietkaufa05f872011-08-28 00:32:24 +02001851out:
Sujithe8324352009-01-16 21:38:42 +05301852 spin_unlock_bh(&txctl->txq->axq_lock);
1853}
1854
1855/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001856int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301857 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001858{
Felix Fietkau28d16702010-11-14 15:20:10 +01001859 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1860 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001861 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001862 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001863 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001864 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001865 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001866 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001867 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001868
Ben Greeara9927ba2010-12-06 21:13:49 -08001869 /* NOTE: sta can be NULL according to net/mac80211.h */
1870 if (sta)
1871 txctl->an = (struct ath_node *)sta->drv_priv;
1872
Felix Fietkau04caf862010-11-14 15:20:12 +01001873 if (info->control.hw_key)
1874 frmlen += info->control.hw_key->icv_len;
1875
Felix Fietkau28d16702010-11-14 15:20:10 +01001876 /*
1877 * As a temporary workaround, assign seq# here; this will likely need
1878 * to be cleaned up to work better with Beacon transmission and virtual
1879 * BSSes.
1880 */
1881 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1882 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1883 sc->tx.seq_no += 0x10;
1884 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1885 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1886 }
1887
John W. Linville42cecc32011-09-19 15:42:31 -04001888 /* Add the padding after the header if this is not already done */
1889 padpos = ath9k_cmn_padpos(hdr->frame_control);
1890 padsize = padpos & 3;
1891 if (padsize && skb->len > padpos) {
1892 if (skb_headroom(skb) < padsize)
1893 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001894
John W. Linville42cecc32011-09-19 15:42:31 -04001895 skb_push(skb, padsize);
1896 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001897 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001898 }
1899
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001900 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1901 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1902 !ieee80211_is_data(hdr->frame_control))
1903 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1904
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001905 setup_frame_info(hw, skb, frmlen);
1906
1907 /*
1908 * At this point, the vif, hw_key and sta pointers in the tx control
1909 * info are no longer valid (overwritten by the ath_frame_info data.
1910 */
1911
Felix Fietkau066dae92010-11-07 14:59:39 +01001912 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001913 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001914 if (txq == sc->tx.txq_map[q] &&
1915 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001916 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001917 txq->stopped = 1;
1918 }
1919 spin_unlock_bh(&txq->axq_lock);
1920
Felix Fietkau44f1d262011-08-28 00:32:25 +02001921 ath_tx_start_dma(sc, skb, txctl);
1922 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001923}
1924
Sujithe8324352009-01-16 21:38:42 +05301925/*****************/
1926/* TX Completion */
1927/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001928
Sujithe8324352009-01-16 21:38:42 +05301929static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301930 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001931{
Sujithe8324352009-01-16 21:38:42 +05301932 struct ieee80211_hw *hw = sc->hw;
1933 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001934 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001935 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001936 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301937
Joe Perches226afe62010-12-02 19:12:37 -08001938 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301939
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301940 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301941 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301942
Felix Fietkau55797b12011-09-14 21:24:16 +02001943 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301944 /* Frame was ACKed */
1945 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301946
John W. Linville42cecc32011-09-19 15:42:31 -04001947 padpos = ath9k_cmn_padpos(hdr->frame_control);
1948 padsize = padpos & 3;
1949 if (padsize && skb->len>padpos+padsize) {
1950 /*
1951 * Remove MAC header padding before giving the frame back to
1952 * mac80211.
1953 */
1954 memmove(skb->data + padsize, skb->data, padpos);
1955 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301956 }
1957
Sujith1b04b932010-01-08 10:36:05 +05301958 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1959 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001960 ath_dbg(common, ATH_DBG_PS,
1961 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301962 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1963 PS_WAIT_FOR_CAB |
1964 PS_WAIT_FOR_PSPOLL_DATA |
1965 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001966 }
1967
Felix Fietkau7545daf2011-01-24 19:23:16 +01001968 q = skb_get_queue_mapping(skb);
1969 if (txq == sc->tx.txq_map[q]) {
1970 spin_lock_bh(&txq->axq_lock);
1971 if (WARN_ON(--txq->pending_frames < 0))
1972 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001973
Felix Fietkau7545daf2011-01-24 19:23:16 +01001974 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1975 ieee80211_wake_queue(sc->hw, q);
1976 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001977 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001978 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001979 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001980
1981 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301982}
1983
1984static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001985 struct ath_txq *txq, struct list_head *bf_q,
1986 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301987{
1988 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001989 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301990 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301991 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301992
Sujithe8324352009-01-16 21:38:42 +05301993 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301994 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301995
Felix Fietkau55797b12011-09-14 21:24:16 +02001996 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301997 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301998
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001999 if (ts->ts_status & ATH9K_TXERR_FILT)
2000 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2001
Ben Greearc1739eb32010-10-14 12:45:29 -07002002 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002003 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002004
2005 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302006 if (time_after(jiffies,
2007 bf->bf_state.bfs_paprd_timestamp +
2008 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002009 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002010 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002011 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002012 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002013 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302014 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002015 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002016 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2017 * accidentally reference it later.
2018 */
2019 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302020
2021 /*
2022 * Return the list of ath_buf of this mpdu to free queue
2023 */
2024 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2025 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2026 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2027}
2028
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002029static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2030 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002031 int txok)
Sujithc4288392008-11-18 09:09:30 +05302032{
Sujitha22be222009-03-30 15:28:36 +05302033 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302034 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302035 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002036 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002037 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302038 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302039
Sujith95e4acb2009-03-13 08:56:09 +05302040 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002041 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302042
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002043 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302044 WARN_ON(tx_rateindex >= hw->max_rates);
2045
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002046 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002047 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302048
Felix Fietkaub572d032010-11-14 15:20:07 +01002049 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002050 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302051 tx_info->status.ampdu_len = nframes;
2052 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002053
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002054 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002055 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002056 /*
2057 * If an underrun error is seen assume it as an excessive
2058 * retry only if max frame trigger level has been reached
2059 * (2 KB for single stream, and 4 KB for dual stream).
2060 * Adjust the long retry as if the frame was tried
2061 * hw->max_rate_tries times to affect how rate control updates
2062 * PER for the failed rate.
2063 * In case of congestion on the bus penalizing this type of
2064 * underruns should help hardware actually transmit new frames
2065 * successfully by eventually preferring slower rates.
2066 * This itself should also alleviate congestion on the bus.
2067 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002068 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2069 ATH9K_TX_DELIM_UNDERRUN)) &&
2070 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002071 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002072 tx_info->status.rates[tx_rateindex].count =
2073 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302074 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302075
Felix Fietkau545750d2009-11-23 22:21:01 +01002076 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302077 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002078 tx_info->status.rates[i].idx = -1;
2079 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302080
Felix Fietkau78c46532010-06-25 01:26:16 +02002081 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302082}
2083
Felix Fietkaufce041b2011-05-19 12:20:25 +02002084static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2085 struct ath_tx_status *ts, struct ath_buf *bf,
2086 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302087 __releases(txq->axq_lock)
2088 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002089{
2090 int txok;
2091
2092 txq->axq_depth--;
2093 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2094 txq->axq_tx_inprogress = false;
2095 if (bf_is_ampdu_not_probing(bf))
2096 txq->axq_ampdu_depth--;
2097
2098 spin_unlock_bh(&txq->axq_lock);
2099
2100 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002101 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002102 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2103 } else
2104 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2105
2106 spin_lock_bh(&txq->axq_lock);
2107
2108 if (sc->sc_flags & SC_OP_TXAGGR)
2109 ath_txq_schedule(sc, txq);
2110}
2111
Sujithc4288392008-11-18 09:09:30 +05302112static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113{
Sujithcbe61d82009-02-09 13:27:12 +05302114 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002115 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2117 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302118 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002119 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 int status;
2121
Joe Perches226afe62010-12-02 19:12:37 -08002122 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2123 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2124 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125
Felix Fietkaufce041b2011-05-19 12:20:25 +02002126 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002128 if (work_pending(&sc->hw_reset_work))
2129 break;
2130
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 if (list_empty(&txq->axq_q)) {
2132 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002133 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002134 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135 break;
2136 }
2137 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2138
2139 /*
2140 * There is a race condition that a BH gets scheduled
2141 * after sw writes TxE and before hw re-load the last
2142 * descriptor to get the newly chained one.
2143 * Software must keep the last DONE descriptor as a
2144 * holding descriptor - software does so by marking
2145 * it with the STALE flag.
2146 */
2147 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302148 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002149 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002150 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002152
2153 bf = list_entry(bf_held->list.next, struct ath_buf,
2154 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 }
2156
2157 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302158 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159
Felix Fietkau29bffa92010-03-29 20:14:23 -07002160 memset(&ts, 0, sizeof(ts));
2161 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002162 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002164
Ben Greear2dac4fb2011-01-09 23:11:45 -08002165 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166
2167 /*
2168 * Remove ath_buf's of the same transmit unit from txq,
2169 * however leave the last descriptor back as the holding
2170 * descriptor for hw.
2171 */
Sujitha119cc42009-03-30 15:28:38 +05302172 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 if (!list_is_singular(&lastbf->list))
2175 list_cut_position(&bf_head,
2176 &txq->axq_q, lastbf->list.prev);
2177
Felix Fietkaufce041b2011-05-19 12:20:25 +02002178 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002179 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002180 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181 }
Johannes Berge6a98542008-10-21 12:40:02 +02002182
Felix Fietkaufce041b2011-05-19 12:20:25 +02002183 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002184 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002185 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186}
2187
Sujith305fe472009-07-23 15:32:29 +05302188static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002189{
2190 struct ath_softc *sc = container_of(work, struct ath_softc,
2191 tx_complete_work.work);
2192 struct ath_txq *txq;
2193 int i;
2194 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002195#ifdef CONFIG_ATH9K_DEBUGFS
2196 sc->tx_complete_poll_work_seen++;
2197#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002198
2199 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2200 if (ATH_TXQ_SETUP(sc, i)) {
2201 txq = &sc->tx.txq[i];
2202 spin_lock_bh(&txq->axq_lock);
2203 if (txq->axq_depth) {
2204 if (txq->axq_tx_inprogress) {
2205 needreset = true;
2206 spin_unlock_bh(&txq->axq_lock);
2207 break;
2208 } else {
2209 txq->axq_tx_inprogress = true;
2210 }
2211 }
2212 spin_unlock_bh(&txq->axq_lock);
2213 }
2214
2215 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002216 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2217 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002218 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002219 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002220 }
2221
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002222 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002223 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2224}
2225
2226
Sujithe8324352009-01-16 21:38:42 +05302227
2228void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002229{
Sujithe8324352009-01-16 21:38:42 +05302230 int i;
2231 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002232
Sujithe8324352009-01-16 21:38:42 +05302233 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002234
2235 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302236 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2237 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002238 }
2239}
2240
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002241void ath_tx_edma_tasklet(struct ath_softc *sc)
2242{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002243 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002244 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2245 struct ath_hw *ah = sc->sc_ah;
2246 struct ath_txq *txq;
2247 struct ath_buf *bf, *lastbf;
2248 struct list_head bf_head;
2249 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002250
2251 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002252 if (work_pending(&sc->hw_reset_work))
2253 break;
2254
Felix Fietkaufce041b2011-05-19 12:20:25 +02002255 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256 if (status == -EINPROGRESS)
2257 break;
2258 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002259 ath_dbg(common, ATH_DBG_XMIT,
2260 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261 break;
2262 }
2263
2264 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002265 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002266 continue;
2267
Felix Fietkaufce041b2011-05-19 12:20:25 +02002268 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002269
2270 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002271
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002272 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2273 spin_unlock_bh(&txq->axq_lock);
2274 return;
2275 }
2276
2277 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2278 struct ath_buf, list);
2279 lastbf = bf->bf_lastbf;
2280
2281 INIT_LIST_HEAD(&bf_head);
2282 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2283 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002284
Felix Fietkaufce041b2011-05-19 12:20:25 +02002285 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2286 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002287
Felix Fietkaufce041b2011-05-19 12:20:25 +02002288 if (!list_empty(&txq->axq_q)) {
2289 struct list_head bf_q;
2290
2291 INIT_LIST_HEAD(&bf_q);
2292 txq->axq_link = NULL;
2293 list_splice_tail_init(&txq->axq_q, &bf_q);
2294 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2295 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002296 }
2297
Felix Fietkaufce041b2011-05-19 12:20:25 +02002298 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002299 spin_unlock_bh(&txq->axq_lock);
2300 }
2301}
2302
Sujithe8324352009-01-16 21:38:42 +05302303/*****************/
2304/* Init, Cleanup */
2305/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002306
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002307static int ath_txstatus_setup(struct ath_softc *sc, int size)
2308{
2309 struct ath_descdma *dd = &sc->txsdma;
2310 u8 txs_len = sc->sc_ah->caps.txs_len;
2311
2312 dd->dd_desc_len = size * txs_len;
2313 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2314 &dd->dd_desc_paddr, GFP_KERNEL);
2315 if (!dd->dd_desc)
2316 return -ENOMEM;
2317
2318 return 0;
2319}
2320
2321static int ath_tx_edma_init(struct ath_softc *sc)
2322{
2323 int err;
2324
2325 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2326 if (!err)
2327 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2328 sc->txsdma.dd_desc_paddr,
2329 ATH_TXSTATUS_RING_SIZE);
2330
2331 return err;
2332}
2333
2334static void ath_tx_edma_cleanup(struct ath_softc *sc)
2335{
2336 struct ath_descdma *dd = &sc->txsdma;
2337
2338 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2339 dd->dd_desc_paddr);
2340}
2341
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342int ath_tx_init(struct ath_softc *sc, int nbufs)
2343{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002344 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002345 int error = 0;
2346
Sujith797fe5cb2009-03-30 15:28:45 +05302347 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002348
Sujith797fe5cb2009-03-30 15:28:45 +05302349 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002350 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302351 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002352 ath_err(common,
2353 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 goto err;
2355 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002356
Sujith797fe5cb2009-03-30 15:28:45 +05302357 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002358 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302359 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002360 ath_err(common,
2361 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302362 goto err;
2363 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002364
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002365 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2366
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002367 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2368 error = ath_tx_edma_init(sc);
2369 if (error)
2370 goto err;
2371 }
2372
Sujith797fe5cb2009-03-30 15:28:45 +05302373err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374 if (error != 0)
2375 ath_tx_cleanup(sc);
2376
2377 return error;
2378}
2379
Sujith797fe5cb2009-03-30 15:28:45 +05302380void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381{
Sujithb77f4832008-12-07 21:44:03 +05302382 if (sc->beacon.bdma.dd_desc_len != 0)
2383 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384
Sujithb77f4832008-12-07 21:44:03 +05302385 if (sc->tx.txdma.dd_desc_len != 0)
2386 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002387
2388 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2389 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002390}
2391
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2393{
Sujithc5170162008-10-29 10:13:59 +05302394 struct ath_atx_tid *tid;
2395 struct ath_atx_ac *ac;
2396 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397
Sujith8ee5afb2008-12-07 21:43:36 +05302398 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302399 tidno < WME_NUM_TID;
2400 tidno++, tid++) {
2401 tid->an = an;
2402 tid->tidno = tidno;
2403 tid->seq_start = tid->seq_next = 0;
2404 tid->baw_size = WME_MAX_BA;
2405 tid->baw_head = tid->baw_tail = 0;
2406 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302407 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302408 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002409 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302410 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302411 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302412 tid->state &= ~AGGR_ADDBA_COMPLETE;
2413 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302414 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415
Sujith8ee5afb2008-12-07 21:43:36 +05302416 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302417 acno < WME_NUM_AC; acno++, ac++) {
2418 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002419 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302420 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002421 }
2422}
2423
Sujithb5aa9bf2008-10-29 10:13:31 +05302424void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425{
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 struct ath_atx_ac *ac;
2427 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002429 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302430
Felix Fietkau2b409942010-07-07 19:42:08 +02002431 for (tidno = 0, tid = &an->tid[tidno];
2432 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433
Felix Fietkau2b409942010-07-07 19:42:08 +02002434 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002435 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002436
Felix Fietkau2b409942010-07-07 19:42:08 +02002437 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002438
Felix Fietkau2b409942010-07-07 19:42:08 +02002439 if (tid->sched) {
2440 list_del(&tid->list);
2441 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002442 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002443
2444 if (ac->sched) {
2445 list_del(&ac->list);
2446 tid->ac->sched = false;
2447 }
2448
2449 ath_tid_drain(sc, txq, tid);
2450 tid->state &= ~AGGR_ADDBA_COMPLETE;
2451 tid->state &= ~AGGR_CLEANUP;
2452
2453 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002454 }
2455}