blob: f5d4764888b910ce83a1104a9d6c3b2e74c555c9 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200265 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200274 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
275 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530276}
277
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200278static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
279{
280 struct ath_buf *bf = NULL;
281
282 spin_lock_bh(&sc->tx.txbuflock);
283
284 if (unlikely(list_empty(&sc->tx.txbuf))) {
285 spin_unlock_bh(&sc->tx.txbuflock);
286 return NULL;
287 }
288
289 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
290 list_del(&bf->list);
291
292 spin_unlock_bh(&sc->tx.txbuflock);
293
294 return bf;
295}
296
297static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
298{
299 spin_lock_bh(&sc->tx.txbuflock);
300 list_add_tail(&bf->list, &sc->tx.txbuf);
301 spin_unlock_bh(&sc->tx.txbuflock);
302}
303
Sujithd43f30152009-01-16 21:38:53 +0530304static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
305{
306 struct ath_buf *tbf;
307
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200308 tbf = ath_tx_get_buffer(sc);
309 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530310 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530311
312 ATH_TXBUF_RESET(tbf);
313
314 tbf->bf_mpdu = bf->bf_mpdu;
315 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400316 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530317 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530318
319 return tbf;
320}
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
323 struct ath_tx_status *ts, int txok,
324 int *nframes, int *nbad)
325{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100326 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100327 u16 seq_st = 0;
328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int ba_index;
330 int isaggr = 0;
331
332 *nbad = 0;
333 *nframes = 0;
334
Felix Fietkaub572d032010-11-14 15:20:07 +0100335 isaggr = bf_isaggr(bf);
336 if (isaggr) {
337 seq_st = ts->ts_seqnum;
338 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
339 }
340
341 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100342 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200343 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100344
345 (*nframes)++;
346 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
347 (*nbad)++;
348
349 bf = bf->bf_next;
350 }
351}
352
353
Sujithd43f30152009-01-16 21:38:53 +0530354static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
355 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100356 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530357{
358 struct ath_node *an = NULL;
359 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100361 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530362 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800363 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530364 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200366 struct list_head bf_head;
367 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530369 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
371 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200372 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100373 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200374 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100375 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200376 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530377
Sujitha22be222009-03-30 15:28:36 +0530378 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530379 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530380
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800381 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800382
Felix Fietkau78c46532010-06-25 01:26:16 +0200383 memcpy(rates, tx_info->control.rates, sizeof(rates));
384
Sujith1286ec62009-01-27 13:30:37 +0530385 rcu_read_lock();
386
Ben Greear686b9cb2010-09-23 09:44:36 -0700387 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530388 if (!sta) {
389 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200390
Felix Fietkau31e79a52010-07-12 23:16:34 +0200391 INIT_LIST_HEAD(&bf_head);
392 while (bf) {
393 bf_next = bf->bf_next;
394
Felix Fietkaufce041b2011-05-19 12:20:25 +0200395 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 list_move_tail(&bf->list, &bf_head);
397
Felix Fietkau31e79a52010-07-12 23:16:34 +0200398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
399 0, 0);
400
401 bf = bf_next;
402 }
Sujith1286ec62009-01-27 13:30:37 +0530403 return;
Sujithe8324352009-01-16 21:38:42 +0530404 }
405
Sujith1286ec62009-01-27 13:30:37 +0530406 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
408 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530409
Felix Fietkaub11b1602010-07-11 12:48:44 +0200410 /*
411 * The hardware occasionally sends a tx status for the wrong TID.
412 * In this case, the BA status cannot be considered valid and all
413 * subframes need to be retransmitted
414 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100415 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200416 txok = false;
417
Sujithe8324352009-01-16 21:38:42 +0530418 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530419 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530420
Sujithd43f30152009-01-16 21:38:53 +0530421 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700422 if (ts->ts_flags & ATH9K_TX_BA) {
423 seq_st = ts->ts_seqnum;
424 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530425 } else {
Sujithd43f30152009-01-16 21:38:53 +0530426 /*
427 * AR5416 can become deaf/mute when BA
428 * issue happens. Chip needs to be reset.
429 * But AP code may have sychronization issues
430 * when perform internal reset in this routine.
431 * Only enable reset in STA mode for now.
432 */
Sujith2660b812009-02-09 13:27:26 +0530433 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530434 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530435 }
436 }
437
Felix Fietkau56dc6332011-08-28 00:32:22 +0200438 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530439
Felix Fietkaub572d032010-11-14 15:20:07 +0100440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530441 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200442 u16 seqno = bf->bf_state.seqno;
443
Felix Fietkauf0b82202011-01-15 14:30:15 +0100444 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530445 bf_next = bf->bf_next;
446
Felix Fietkau78c46532010-06-25 01:26:16 +0200447 skb = bf->bf_mpdu;
448 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100449 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200450
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530452 /* transmit completion, subframe is
453 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else if (!isaggr && txok) {
456 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530457 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530458 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200459 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530460 /*
461 * cleanup in progress, just fail
462 * the un-acked sub-frames
463 */
464 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200465 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
466 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
467 !an->sleeping)
468 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
469
470 clear_filter = true;
471 txpending = 1;
472 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200473 txfail = 1;
474 sendbar = 1;
475 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530476 }
477 }
478
Felix Fietkaufce041b2011-05-19 12:20:25 +0200479 /*
480 * Make sure the last desc is reclaimed if it
481 * not a holding desc.
482 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200483 INIT_LIST_HEAD(&bf_head);
484 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
485 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530486 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530487
Felix Fietkau90fa5392010-09-20 13:45:38 +0200488 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530489 /*
490 * complete the acked-ones/xretried ones; update
491 * block-ack window
492 */
493 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200494 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530495 spin_unlock_bh(&txq->axq_lock);
496
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530497 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200498 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200499 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530500 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 }
502
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530505 } else {
Sujithd43f30152009-01-16 21:38:53 +0530506 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
508 if (bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530510
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 tbf = ath_clone_txbuf(sc, bf_last);
512 /*
513 * Update tx baw and complete the
514 * frame with failed status if we
515 * run out of tx buf.
516 */
517 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200519 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400520 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400521
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 ath_tx_complete_buf(sc, bf, txq,
523 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200524 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400525 break;
526 }
527
Felix Fietkau56dc6332011-08-28 00:32:22 +0200528 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400529 }
Sujithe8324352009-01-16 21:38:42 +0530530 }
531
532 /*
533 * Put this buffer to the temporary pending
534 * queue to retain ordering
535 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200536 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530537 }
538
539 bf = bf_next;
540 }
541
Felix Fietkau4cee7862010-07-23 03:53:16 +0200542 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200543 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200544 if (an->sleeping)
545 ieee80211_sta_set_tim(sta);
546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200548 if (clear_filter)
549 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200550 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600551 if (!an->sleeping)
552 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 spin_unlock_bh(&txq->axq_lock);
554 }
555
Sujithe8324352009-01-16 21:38:42 +0530556 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200557 ath_tx_flush_tid(sc, tid);
558
Sujithe8324352009-01-16 21:38:42 +0530559 if (tid->baw_head == tid->baw_tail) {
560 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530561 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530562 }
Sujithe8324352009-01-16 21:38:42 +0530563 }
564
Sujith1286ec62009-01-27 13:30:37 +0530565 rcu_read_unlock();
566
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530567 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200568 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530569}
570
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530571static bool ath_lookup_legacy(struct ath_buf *bf)
572{
573 struct sk_buff *skb;
574 struct ieee80211_tx_info *tx_info;
575 struct ieee80211_tx_rate *rates;
576 int i;
577
578 skb = bf->bf_mpdu;
579 tx_info = IEEE80211_SKB_CB(skb);
580 rates = tx_info->control.rates;
581
Felix Fietkau059ee092011-08-27 10:25:27 +0200582 for (i = 0; i < 4; i++) {
583 if (!rates[i].count || rates[i].idx < 0)
584 break;
585
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530586 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
587 return true;
588 }
589
590 return false;
591}
592
Sujithe8324352009-01-16 21:38:42 +0530593static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
594 struct ath_atx_tid *tid)
595{
Sujithe8324352009-01-16 21:38:42 +0530596 struct sk_buff *skb;
597 struct ieee80211_tx_info *tx_info;
598 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530599 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530600 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530601 int i;
602
Sujitha22be222009-03-30 15:28:36 +0530603 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530604 tx_info = IEEE80211_SKB_CB(skb);
605 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530606
607 /*
608 * Find the lowest frame length among the rate series that will have a
609 * 4ms transmit duration.
610 * TODO - TXOP limit needs to be considered.
611 */
612 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
613
614 for (i = 0; i < 4; i++) {
615 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100616 int modeidx;
617 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530618 legacy = 1;
619 break;
620 }
621
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200622 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100623 modeidx = MCS_HT40;
624 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200625 modeidx = MCS_HT20;
626
627 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
628 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100629
630 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530631 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530632 }
633 }
634
635 /*
636 * limit aggregate size by the minimum rate if rate selected is
637 * not a probe rate, if rate selected is a probe rate then
638 * avoid aggregation of this packet.
639 */
640 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
641 return 0;
642
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530643 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
644 aggr_limit = min((max_4ms_framelen * 3) / 8,
645 (u32)ATH_AMPDU_LIMIT_MAX);
646 else
647 aggr_limit = min(max_4ms_framelen,
648 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530649
650 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300651 * h/w can accept aggregates up to 16 bit lengths (65535).
652 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530653 * as zero. Ignore 65536 since we are constrained by hw.
654 */
Sujith4ef70842009-07-23 15:32:41 +0530655 if (tid->an->maxampdu)
656 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530657
658 return aggr_limit;
659}
660
661/*
Sujithd43f30152009-01-16 21:38:53 +0530662 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530663 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530664 */
665static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530666 struct ath_buf *bf, u16 frmlen,
667 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530668{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530669#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530670 struct sk_buff *skb = bf->bf_mpdu;
671 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530672 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530673 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100674 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200675 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100676 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530677
678 /* Select standard number of delimiters based on frame length alone */
679 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
680
681 /*
682 * If encryption enabled, hardware requires some more padding between
683 * subframes.
684 * TODO - this could be improved to be dependent on the rate.
685 * The hardware can keep up at lower rates, but not higher rates
686 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530687 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
688 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530689 ndelim += ATH_AGGR_ENCRYPTDELIM;
690
691 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530692 * Add delimiter when using RTS/CTS with aggregation
693 * and non enterprise AR9003 card
694 */
Felix Fietkau34597312011-08-29 18:57:54 +0200695 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
696 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530697 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
698
699 /*
Sujithe8324352009-01-16 21:38:42 +0530700 * Convert desired mpdu density from microeconds to bytes based
701 * on highest rate in rate series (i.e. first rate) to determine
702 * required minimum length for subframe. Take into account
703 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530704 *
Sujithe8324352009-01-16 21:38:42 +0530705 * If there is no mpdu density restriction, no further calculation
706 * is needed.
707 */
Sujith4ef70842009-07-23 15:32:41 +0530708
709 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530710 return ndelim;
711
712 rix = tx_info->control.rates[0].idx;
713 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530714 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
715 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
716
717 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530718 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530719 else
Sujith4ef70842009-07-23 15:32:41 +0530720 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530721
722 if (nsymbols == 0)
723 nsymbols = 1;
724
Felix Fietkauc6663872010-04-19 19:57:33 +0200725 streams = HT_RC_2_STREAMS(rix);
726 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530727 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
728
Sujithe8324352009-01-16 21:38:42 +0530729 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530730 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
731 ndelim = max(mindelim, ndelim);
732 }
733
734 return ndelim;
735}
736
737static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530738 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530739 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100740 struct list_head *bf_q,
741 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530742{
743#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200744 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530745 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530746 u16 aggr_limit = 0, al = 0, bpad = 0,
747 al_delta, h_baw = tid->baw_size / 2;
748 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200749 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100750 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200751 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200752 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530753
754 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200755 skb = skb_peek(&tid->buf_q);
756 fi = get_frame_info(skb);
757 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200758 if (!fi->bf)
759 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200760
Felix Fietkau44f1d262011-08-28 00:32:25 +0200761 if (!bf)
762 continue;
763
Felix Fietkau399c6482011-09-14 21:24:17 +0200764 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200765 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200766 if (!bf_first)
767 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530768
Sujithd43f30152009-01-16 21:38:53 +0530769 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200770 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530771 status = ATH_AGGR_BAW_CLOSED;
772 break;
773 }
774
775 if (!rl) {
776 aggr_limit = ath_lookup_rate(sc, bf, tid);
777 rl = 1;
778 }
779
Sujithd43f30152009-01-16 21:38:53 +0530780 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100781 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530782
Sujithd43f30152009-01-16 21:38:53 +0530783 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530784 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
785 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530786 status = ATH_AGGR_LIMITED;
787 break;
788 }
789
Felix Fietkau0299a502010-10-21 02:47:24 +0200790 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200791 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200792 break;
793
Sujithd43f30152009-01-16 21:38:53 +0530794 /* do not exceed subframe limit */
795 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530796 status = ATH_AGGR_LIMITED;
797 break;
798 }
799
Sujithd43f30152009-01-16 21:38:53 +0530800 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530801 al += bpad + al_delta;
802
803 /*
804 * Get the delimiters needed to meet the MPDU
805 * density for this node.
806 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530807 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
808 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530809 bpad = PADBYTES(al_delta) + (ndelim << 2);
810
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530811 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530812 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530813
Sujithd43f30152009-01-16 21:38:53 +0530814 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100815 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200816 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200817 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200818
819 __skb_unlink(skb, &tid->buf_q);
820 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200821 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530822 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200823
Sujithe8324352009-01-16 21:38:42 +0530824 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530825
Felix Fietkau56dc6332011-08-28 00:32:22 +0200826 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530827
Felix Fietkau269c44b2010-11-14 15:20:06 +0100828 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530829
Sujithe8324352009-01-16 21:38:42 +0530830 return status;
831#undef PADBYTES
832}
833
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200834/*
835 * rix - rate index
836 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
837 * width - 0 for 20 MHz, 1 for 40 MHz
838 * half_gi - to use 4us v/s 3.6 us for symbol time
839 */
840static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
841 int width, int half_gi, bool shortPreamble)
842{
843 u32 nbits, nsymbits, duration, nsymbols;
844 int streams;
845
846 /* find number of symbols: PLCP + data */
847 streams = HT_RC_2_STREAMS(rix);
848 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
849 nsymbits = bits_per_symbol[rix % 8][width] * streams;
850 nsymbols = (nbits + nsymbits - 1) / nsymbits;
851
852 if (!half_gi)
853 duration = SYMBOL_TIME(nsymbols);
854 else
855 duration = SYMBOL_TIME_HALFGI(nsymbols);
856
857 /* addup duration for legacy/ht training and signal fields */
858 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
859
860 return duration;
861}
862
Felix Fietkau493cf042011-09-14 21:24:22 +0200863static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
864 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200865{
866 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200867 struct sk_buff *skb;
868 struct ieee80211_tx_info *tx_info;
869 struct ieee80211_tx_rate *rates;
870 const struct ieee80211_rate *rate;
871 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200872 int i;
873 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200874
875 skb = bf->bf_mpdu;
876 tx_info = IEEE80211_SKB_CB(skb);
877 rates = tx_info->control.rates;
878 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200879
880 /* set dur_update_en for l-sig computation except for PS-Poll frames */
881 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200882
883 /*
884 * We check if Short Preamble is needed for the CTS rate by
885 * checking the BSS's global flag.
886 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
887 */
888 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200889 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200890 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200891 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200892
893 for (i = 0; i < 4; i++) {
894 bool is_40, is_sgi, is_sp;
895 int phy;
896
897 if (!rates[i].count || (rates[i].idx < 0))
898 continue;
899
900 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200901 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200902
903 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200904 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
905 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200906 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200907 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
908 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200909 }
910
911 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200912 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200913 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200914 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200915
916 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
917 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
918 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
919
920 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
921 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200922 info->rates[i].Rate = rix | 0x80;
923 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
924 ah->txchainmask, info->rates[i].Rate);
925 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200926 is_40, is_sgi, is_sp);
927 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200928 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200929 continue;
930 }
931
932 /* legacy rates */
933 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
934 !(rate->flags & IEEE80211_RATE_ERP_G))
935 phy = WLAN_RC_PHY_CCK;
936 else
937 phy = WLAN_RC_PHY_OFDM;
938
939 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200940 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200941 if (rate->hw_value_short) {
942 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200943 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200944 } else {
945 is_sp = false;
946 }
947
948 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200949 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200950 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200951 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
952 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200953
Felix Fietkau493cf042011-09-14 21:24:22 +0200954 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200955 phy, rate->bitrate * 100, len, rix, is_sp);
956 }
957
958 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
959 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200960 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200961
962 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200963 if (info->flags & ATH9K_TXDESC_RTSENA)
964 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200965}
966
Felix Fietkau493cf042011-09-14 21:24:22 +0200967static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
968{
969 struct ieee80211_hdr *hdr;
970 enum ath9k_pkt_type htype;
971 __le16 fc;
972
973 hdr = (struct ieee80211_hdr *)skb->data;
974 fc = hdr->frame_control;
975
976 if (ieee80211_is_beacon(fc))
977 htype = ATH9K_PKT_TYPE_BEACON;
978 else if (ieee80211_is_probe_resp(fc))
979 htype = ATH9K_PKT_TYPE_PROBE_RESP;
980 else if (ieee80211_is_atim(fc))
981 htype = ATH9K_PKT_TYPE_ATIM;
982 else if (ieee80211_is_pspoll(fc))
983 htype = ATH9K_PKT_TYPE_PSPOLL;
984 else
985 htype = ATH9K_PKT_TYPE_NORMAL;
986
987 return htype;
988}
989
990static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
991 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200992{
993 struct ath_hw *ah = sc->sc_ah;
994 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
995 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200996 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +0200997 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +0200998
Felix Fietkau493cf042011-09-14 21:24:22 +0200999 memset(&info, 0, sizeof(info));
1000 info.is_first = true;
1001 info.is_last = true;
1002 info.txpower = MAX_RATE_POWER;
1003 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001004
Felix Fietkau493cf042011-09-14 21:24:22 +02001005 info.flags = ATH9K_TXDESC_INTREQ;
1006 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1007 info.flags |= ATH9K_TXDESC_NOACK;
1008 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1009 info.flags |= ATH9K_TXDESC_LDPC;
1010
1011 ath_buf_set_rate(sc, bf, &info, len);
1012
1013 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1014 info.flags |= ATH9K_TXDESC_CLRDMASK;
1015
1016 if (bf->bf_state.bfs_paprd)
1017 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1018
Felix Fietkau399c6482011-09-14 21:24:17 +02001019
1020 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001021 struct sk_buff *skb = bf->bf_mpdu;
1022 struct ath_frame_info *fi = get_frame_info(skb);
1023
1024 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001025 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001026 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001027 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001028 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001029
John W. Linville42cecc32011-09-19 15:42:31 -04001030 info.buf_addr[0] = bf->bf_buf_addr;
1031 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001032 info.pkt_len = fi->framelen;
1033 info.keyix = fi->keyix;
1034 info.keytype = fi->keytype;
1035
1036 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001037 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001038 info.aggr = AGGR_BUF_FIRST;
1039 else if (!bf->bf_next)
1040 info.aggr = AGGR_BUF_LAST;
1041 else
1042 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001043
Felix Fietkau493cf042011-09-14 21:24:22 +02001044 info.ndelim = bf->bf_state.ndelim;
1045 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001046 }
1047
Felix Fietkau493cf042011-09-14 21:24:22 +02001048 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001049 bf = bf->bf_next;
1050 }
1051}
1052
Sujithe8324352009-01-16 21:38:42 +05301053static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1054 struct ath_atx_tid *tid)
1055{
Sujithd43f30152009-01-16 21:38:53 +05301056 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301057 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001058 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301059 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001060 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301061
1062 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001063 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301064 return;
1065
1066 INIT_LIST_HEAD(&bf_q);
1067
Felix Fietkau269c44b2010-11-14 15:20:06 +01001068 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301069
1070 /*
Sujithd43f30152009-01-16 21:38:53 +05301071 * no frames picked up to be aggregated;
1072 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301073 */
1074 if (list_empty(&bf_q))
1075 break;
1076
1077 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301078 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001079 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301080
Felix Fietkau55195412011-04-17 23:28:09 +02001081 if (tid->ac->clear_ps_filter) {
1082 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001083 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1084 } else {
1085 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001086 }
1087
Sujithd43f30152009-01-16 21:38:53 +05301088 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001089 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001090 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1091 bf->bf_state.bf_type = BUF_AMPDU;
1092 } else {
1093 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301094 }
1095
Felix Fietkau493cf042011-09-14 21:24:22 +02001096 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001097 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001098 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301099 status != ATH_AGGR_BAW_CLOSED);
1100}
1101
Felix Fietkau231c3a12010-09-20 19:35:28 +02001102int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1103 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301104{
1105 struct ath_atx_tid *txtid;
1106 struct ath_node *an;
1107
1108 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301109 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001110
1111 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1112 return -EAGAIN;
1113
Sujithf83da962009-07-23 15:32:37 +05301114 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001115 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001116 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001117
Felix Fietkau2ed72222011-01-10 17:05:49 -07001118 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1119 txtid->baw_head = txtid->baw_tail = 0;
1120
Felix Fietkau231c3a12010-09-20 19:35:28 +02001121 return 0;
Sujithe8324352009-01-16 21:38:42 +05301122}
1123
Sujithf83da962009-07-23 15:32:37 +05301124void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301125{
1126 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1127 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001128 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301129
1130 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301131 return;
Sujithe8324352009-01-16 21:38:42 +05301132
1133 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301134 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301135 return;
Sujithe8324352009-01-16 21:38:42 +05301136 }
1137
Sujithe8324352009-01-16 21:38:42 +05301138 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001139 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001140
1141 /*
1142 * If frames are still being transmitted for this TID, they will be
1143 * cleaned up during tx completion. To prevent race conditions, this
1144 * TID can only be reused after all in-progress subframes have been
1145 * completed.
1146 */
1147 if (txtid->baw_head != txtid->baw_tail)
1148 txtid->state |= AGGR_CLEANUP;
1149 else
1150 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301151 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301152
Felix Fietkau90fa5392010-09-20 13:45:38 +02001153 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301154}
1155
Felix Fietkau55195412011-04-17 23:28:09 +02001156bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1157{
1158 struct ath_atx_tid *tid;
1159 struct ath_atx_ac *ac;
1160 struct ath_txq *txq;
1161 bool buffered = false;
1162 int tidno;
1163
1164 for (tidno = 0, tid = &an->tid[tidno];
1165 tidno < WME_NUM_TID; tidno++, tid++) {
1166
1167 if (!tid->sched)
1168 continue;
1169
1170 ac = tid->ac;
1171 txq = ac->txq;
1172
1173 spin_lock_bh(&txq->axq_lock);
1174
Felix Fietkau56dc6332011-08-28 00:32:22 +02001175 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +02001176 buffered = true;
1177
1178 tid->sched = false;
1179 list_del(&tid->list);
1180
1181 if (ac->sched) {
1182 ac->sched = false;
1183 list_del(&ac->list);
1184 }
1185
1186 spin_unlock_bh(&txq->axq_lock);
1187 }
1188
1189 return buffered;
1190}
1191
1192void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1193{
1194 struct ath_atx_tid *tid;
1195 struct ath_atx_ac *ac;
1196 struct ath_txq *txq;
1197 int tidno;
1198
1199 for (tidno = 0, tid = &an->tid[tidno];
1200 tidno < WME_NUM_TID; tidno++, tid++) {
1201
1202 ac = tid->ac;
1203 txq = ac->txq;
1204
1205 spin_lock_bh(&txq->axq_lock);
1206 ac->clear_ps_filter = true;
1207
Felix Fietkau56dc6332011-08-28 00:32:22 +02001208 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001209 ath_tx_queue_tid(txq, tid);
1210 ath_txq_schedule(sc, txq);
1211 }
1212
1213 spin_unlock_bh(&txq->axq_lock);
1214 }
1215}
1216
Sujithe8324352009-01-16 21:38:42 +05301217void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1218{
1219 struct ath_atx_tid *txtid;
1220 struct ath_node *an;
1221
1222 an = (struct ath_node *)sta->drv_priv;
1223
1224 if (sc->sc_flags & SC_OP_TXAGGR) {
1225 txtid = ATH_AN_2_TID(an, tid);
1226 txtid->baw_size =
1227 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1228 txtid->state |= AGGR_ADDBA_COMPLETE;
1229 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1230 ath_tx_resume_tid(sc, txtid);
1231 }
1232}
1233
Sujithe8324352009-01-16 21:38:42 +05301234/********************/
1235/* Queue Management */
1236/********************/
1237
Sujithe8324352009-01-16 21:38:42 +05301238static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1239 struct ath_txq *txq)
1240{
1241 struct ath_atx_ac *ac, *ac_tmp;
1242 struct ath_atx_tid *tid, *tid_tmp;
1243
1244 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1245 list_del(&ac->list);
1246 ac->sched = false;
1247 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1248 list_del(&tid->list);
1249 tid->sched = false;
1250 ath_tid_drain(sc, txq, tid);
1251 }
1252 }
1253}
1254
1255struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1256{
Sujithcbe61d82009-02-09 13:27:12 +05301257 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001258 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301259 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001260 static const int subtype_txq_to_hwq[] = {
1261 [WME_AC_BE] = ATH_TXQ_AC_BE,
1262 [WME_AC_BK] = ATH_TXQ_AC_BK,
1263 [WME_AC_VI] = ATH_TXQ_AC_VI,
1264 [WME_AC_VO] = ATH_TXQ_AC_VO,
1265 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001266 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301267
1268 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001269 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301270 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1271 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1272 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1273 qi.tqi_physCompBuf = 0;
1274
1275 /*
1276 * Enable interrupts only for EOL and DESC conditions.
1277 * We mark tx descriptors to receive a DESC interrupt
1278 * when a tx queue gets deep; otherwise waiting for the
1279 * EOL to reap descriptors. Note that this is done to
1280 * reduce interrupt load and this only defers reaping
1281 * descriptors, never transmitting frames. Aside from
1282 * reducing interrupts this also permits more concurrency.
1283 * The only potential downside is if the tx queue backs
1284 * up in which case the top half of the kernel may backup
1285 * due to a lack of tx descriptors.
1286 *
1287 * The UAPSD queue is an exception, since we take a desc-
1288 * based intr on the EOSP frames.
1289 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001290 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1291 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1292 TXQ_FLAG_TXERRINT_ENABLE;
1293 } else {
1294 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1295 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1296 else
1297 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1298 TXQ_FLAG_TXDESCINT_ENABLE;
1299 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001300 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1301 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301302 /*
1303 * NB: don't print a message, this happens
1304 * normally on parts with too few tx queues
1305 */
1306 return NULL;
1307 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001308 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001309 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001310 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1311 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301312 return NULL;
1313 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001314 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1315 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301316
Ben Greear60f2d1d2011-01-09 23:11:52 -08001317 txq->axq_qnum = axq_qnum;
1318 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301319 txq->axq_link = NULL;
1320 INIT_LIST_HEAD(&txq->axq_q);
1321 INIT_LIST_HEAD(&txq->axq_acq);
1322 spin_lock_init(&txq->axq_lock);
1323 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001324 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001325 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001326 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001327
1328 txq->txq_headidx = txq->txq_tailidx = 0;
1329 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1330 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301331 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001332 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301333}
1334
Sujithe8324352009-01-16 21:38:42 +05301335int ath_txq_update(struct ath_softc *sc, int qnum,
1336 struct ath9k_tx_queue_info *qinfo)
1337{
Sujithcbe61d82009-02-09 13:27:12 +05301338 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301339 int error = 0;
1340 struct ath9k_tx_queue_info qi;
1341
1342 if (qnum == sc->beacon.beaconq) {
1343 /*
1344 * XXX: for beacon queue, we just save the parameter.
1345 * It will be picked up by ath_beaconq_config when
1346 * it's necessary.
1347 */
1348 sc->beacon.beacon_qi = *qinfo;
1349 return 0;
1350 }
1351
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001352 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301353
1354 ath9k_hw_get_txq_props(ah, qnum, &qi);
1355 qi.tqi_aifs = qinfo->tqi_aifs;
1356 qi.tqi_cwmin = qinfo->tqi_cwmin;
1357 qi.tqi_cwmax = qinfo->tqi_cwmax;
1358 qi.tqi_burstTime = qinfo->tqi_burstTime;
1359 qi.tqi_readyTime = qinfo->tqi_readyTime;
1360
1361 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001362 ath_err(ath9k_hw_common(sc->sc_ah),
1363 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301364 error = -EIO;
1365 } else {
1366 ath9k_hw_resettxqueue(ah, qnum);
1367 }
1368
1369 return error;
1370}
1371
1372int ath_cabq_update(struct ath_softc *sc)
1373{
1374 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001375 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301376 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301377
1378 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1379 /*
1380 * Ensure the readytime % is within the bounds.
1381 */
Sujith17d79042009-02-09 13:27:03 +05301382 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1383 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1384 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1385 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301386
Steve Brown9814f6b2011-02-07 17:10:39 -07001387 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301388 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301389 ath_txq_update(sc, qnum, &qi);
1390
1391 return 0;
1392}
1393
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001394static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1395{
1396 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1397 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1398}
1399
Felix Fietkaufce041b2011-05-19 12:20:25 +02001400static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1401 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301402 __releases(txq->axq_lock)
1403 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301404{
1405 struct ath_buf *bf, *lastbf;
1406 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001407 struct ath_tx_status ts;
1408
1409 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301410 INIT_LIST_HEAD(&bf_head);
1411
Felix Fietkaufce041b2011-05-19 12:20:25 +02001412 while (!list_empty(list)) {
1413 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301414
Felix Fietkaufce041b2011-05-19 12:20:25 +02001415 if (bf->bf_stale) {
1416 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301417
Felix Fietkaufce041b2011-05-19 12:20:25 +02001418 ath_tx_return_buffer(sc, bf);
1419 continue;
Sujithe8324352009-01-16 21:38:42 +05301420 }
1421
1422 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001423 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001424
Sujithe8324352009-01-16 21:38:42 +05301425 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001426 if (bf_is_ampdu_not_probing(bf))
1427 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301428
Felix Fietkaufce041b2011-05-19 12:20:25 +02001429 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301430 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001431 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1432 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301433 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001434 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001435 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001436 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001437}
1438
1439/*
1440 * Drain a given TX queue (could be Beacon or Data)
1441 *
1442 * This assumes output has been stopped and
1443 * we do not need to block ath_tx_tasklet.
1444 */
1445void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1446{
1447 spin_lock_bh(&txq->axq_lock);
1448 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1449 int idx = txq->txq_tailidx;
1450
1451 while (!list_empty(&txq->txq_fifo[idx])) {
1452 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1453 retry_tx);
1454
1455 INCR(idx, ATH_TXFIFO_DEPTH);
1456 }
1457 txq->txq_tailidx = idx;
1458 }
1459
1460 txq->axq_link = NULL;
1461 txq->axq_tx_inprogress = false;
1462 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001463
1464 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001465 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1466 ath_txq_drain_pending_buffers(sc, txq);
1467
1468 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301469}
1470
Felix Fietkau080e1a22010-12-05 20:17:53 +01001471bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301472{
Sujithcbe61d82009-02-09 13:27:12 +05301473 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001474 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301475 struct ath_txq *txq;
1476 int i, npend = 0;
1477
1478 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001479 return true;
Sujith043a0402009-01-16 21:38:47 +05301480
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001481 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301482
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001483 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301484 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001485 if (!ATH_TXQ_SETUP(sc, i))
1486 continue;
1487
1488 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301489 }
1490
Felix Fietkau080e1a22010-12-05 20:17:53 +01001491 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001492 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301493
1494 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001495 if (!ATH_TXQ_SETUP(sc, i))
1496 continue;
1497
1498 /*
1499 * The caller will resume queues with ieee80211_wake_queues.
1500 * Mark the queue as not stopped to prevent ath_tx_complete
1501 * from waking the queue too early.
1502 */
1503 txq = &sc->tx.txq[i];
1504 txq->stopped = false;
1505 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301506 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001507
1508 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301509}
1510
Sujithe8324352009-01-16 21:38:42 +05301511void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1512{
1513 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1514 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1515}
1516
Ben Greear7755bad2011-01-18 17:30:00 -08001517/* For each axq_acq entry, for each tid, try to schedule packets
1518 * for transmit until ampdu_depth has reached min Q depth.
1519 */
Sujithe8324352009-01-16 21:38:42 +05301520void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1521{
Ben Greear7755bad2011-01-18 17:30:00 -08001522 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1523 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301524
Felix Fietkau236de512011-09-03 01:40:25 +02001525 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001526 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301527 return;
1528
1529 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001530 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301531
Ben Greear7755bad2011-01-18 17:30:00 -08001532 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1533 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1534 list_del(&ac->list);
1535 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301536
Ben Greear7755bad2011-01-18 17:30:00 -08001537 while (!list_empty(&ac->tid_q)) {
1538 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1539 list);
1540 list_del(&tid->list);
1541 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301542
Ben Greear7755bad2011-01-18 17:30:00 -08001543 if (tid->paused)
1544 continue;
Sujithe8324352009-01-16 21:38:42 +05301545
Ben Greear7755bad2011-01-18 17:30:00 -08001546 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301547
Ben Greear7755bad2011-01-18 17:30:00 -08001548 /*
1549 * add tid to round-robin queue if more frames
1550 * are pending for the tid
1551 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001552 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001553 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301554
Ben Greear7755bad2011-01-18 17:30:00 -08001555 if (tid == last_tid ||
1556 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1557 break;
Sujithe8324352009-01-16 21:38:42 +05301558 }
Ben Greear7755bad2011-01-18 17:30:00 -08001559
1560 if (!list_empty(&ac->tid_q)) {
1561 if (!ac->sched) {
1562 ac->sched = true;
1563 list_add_tail(&ac->list, &txq->axq_acq);
1564 }
1565 }
1566
1567 if (ac == last_ac ||
1568 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1569 return;
Sujithe8324352009-01-16 21:38:42 +05301570 }
1571}
1572
Sujithe8324352009-01-16 21:38:42 +05301573/***********/
1574/* TX, DMA */
1575/***********/
1576
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001578 * Insert a chain of ath_buf (descriptors) on a txq and
1579 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580 */
Sujith102e0572008-10-29 10:15:16 +05301581static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001582 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583{
Sujithcbe61d82009-02-09 13:27:12 +05301584 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001585 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001586 struct ath_buf *bf, *bf_last;
1587 bool puttxbuf = false;
1588 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301589
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001590 /*
1591 * Insert the frame on the outbound list and
1592 * pass it on to the hardware.
1593 */
1594
1595 if (list_empty(head))
1596 return;
1597
Felix Fietkaufce041b2011-05-19 12:20:25 +02001598 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001599 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001600 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001601
Joe Perches226afe62010-12-02 19:12:37 -08001602 ath_dbg(common, ATH_DBG_QUEUE,
1603 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001604
Felix Fietkaufce041b2011-05-19 12:20:25 +02001605 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1606 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001607 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001608 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001609 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001610 list_splice_tail_init(head, &txq->axq_q);
1611
Felix Fietkaufce041b2011-05-19 12:20:25 +02001612 if (txq->axq_link) {
1613 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001614 ath_dbg(common, ATH_DBG_XMIT,
1615 "link[%u] (%p)=%llx (%p)\n",
1616 txq->axq_qnum, txq->axq_link,
1617 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001618 } else if (!edma)
1619 puttxbuf = true;
1620
1621 txq->axq_link = bf_last->bf_desc;
1622 }
1623
1624 if (puttxbuf) {
1625 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1626 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1627 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1628 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1629 }
1630
1631 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001632 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001633 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001634 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001635
1636 if (!internal) {
1637 txq->axq_depth++;
1638 if (bf_is_ampdu_not_probing(bf))
1639 txq->axq_ampdu_depth++;
1640 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001641}
1642
Sujithe8324352009-01-16 21:38:42 +05301643static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001644 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301645{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001646 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001647 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001648 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301649
1650 /*
1651 * Do not queue to h/w when any of the following conditions is true:
1652 * - there are pending frames in software queue
1653 * - the TID is currently paused for ADDBA/BAR request
1654 * - seqno is not within block-ack window
1655 * - h/w queue depth exceeds low water mark
1656 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001657 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001658 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001659 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001660 /*
Sujithe8324352009-01-16 21:38:42 +05301661 * Add this frame to software queue for scheduling later
1662 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001663 */
Ben Greearbda8add2011-01-09 23:11:48 -08001664 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001665 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001666 if (!txctl->an || !txctl->an->sleeping)
1667 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301668 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001669 }
1670
Felix Fietkau44f1d262011-08-28 00:32:25 +02001671 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1672 if (!bf)
1673 return;
1674
Felix Fietkau399c6482011-09-14 21:24:17 +02001675 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001676 INIT_LIST_HEAD(&bf_head);
1677 list_add(&bf->list, &bf_head);
1678
Sujithe8324352009-01-16 21:38:42 +05301679 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001680 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301681
1682 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001683 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301684 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001685 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001686 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301687}
1688
Felix Fietkau82b873a2010-11-11 03:18:37 +01001689static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001690 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001691{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001692 struct ath_frame_info *fi = get_frame_info(skb);
1693 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301694 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001695
Felix Fietkau44f1d262011-08-28 00:32:25 +02001696 bf = fi->bf;
1697 if (!bf)
1698 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1699
1700 if (!bf)
1701 return;
1702
1703 INIT_LIST_HEAD(&bf_head);
1704 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001705 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301706
1707 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001708 if (tid)
1709 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301710
Sujithd43f30152009-01-16 21:38:53 +05301711 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001712 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001713 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301714 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001715}
1716
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001717static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1718 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301719{
1720 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001721 struct ieee80211_sta *sta = tx_info->control.sta;
1722 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001723 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001724 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001725 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001726 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301727
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001728 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301729
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001730 if (sta)
1731 an = (struct ath_node *) sta->drv_priv;
1732
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001733 memset(fi, 0, sizeof(*fi));
1734 if (hw_key)
1735 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001736 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1737 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001738 else
1739 fi->keyix = ATH9K_TXKEYIX_INVALID;
1740 fi->keytype = keytype;
1741 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301742}
1743
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301744u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1745{
1746 struct ath_hw *ah = sc->sc_ah;
1747 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301748 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1749 (curchan->channelFlags & CHANNEL_5GHZ) &&
1750 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301751 return 0x3;
1752 else
1753 return chainmask;
1754}
1755
Felix Fietkau44f1d262011-08-28 00:32:25 +02001756/*
1757 * Assign a descriptor (and sequence number if necessary,
1758 * and map buffer for DMA. Frees skb on error
1759 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001760static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001761 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001762 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001763 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301764{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001765 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001766 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001768 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001769 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001770
1771 bf = ath_tx_get_buffer(sc);
1772 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001773 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001774 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001775 }
Sujithe8324352009-01-16 21:38:42 +05301776
Sujithe8324352009-01-16 21:38:42 +05301777 ATH_TXBUF_RESET(bf);
1778
Felix Fietkaufa05f872011-08-28 00:32:24 +02001779 if (tid) {
1780 seqno = tid->seq_next;
1781 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1782 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1783 bf->bf_state.seqno = seqno;
1784 }
1785
Sujithe8324352009-01-16 21:38:42 +05301786 bf->bf_mpdu = skb;
1787
Ben Greearc1739eb32010-10-14 12:45:29 -07001788 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1789 skb->len, DMA_TO_DEVICE);
1790 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301791 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001792 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001793 ath_err(ath9k_hw_common(sc->sc_ah),
1794 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001795 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001796 goto error;
Sujithe8324352009-01-16 21:38:42 +05301797 }
1798
Felix Fietkau56dc6332011-08-28 00:32:22 +02001799 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001800
1801 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001802
1803error:
1804 dev_kfree_skb_any(skb);
1805 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001806}
1807
1808/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001809static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001810 struct ath_tx_control *txctl)
1811{
Felix Fietkau04caf862010-11-14 15:20:12 +01001812 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1813 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001814 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001815 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001816 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301817
Sujithe8324352009-01-16 21:38:42 +05301818 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301819 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1820 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001821 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1822 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001823 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001824
Felix Fietkau066dae92010-11-07 14:59:39 +01001825 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001826 }
1827
1828 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001829 /*
1830 * Try aggregation if it's a unicast data frame
1831 * and the destination is HT capable.
1832 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001833 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301834 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001835 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1836 if (!bf)
1837 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001838
Felix Fietkau82b873a2010-11-11 03:18:37 +01001839 bf->bf_state.bfs_paprd = txctl->paprd;
1840
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301841 if (txctl->paprd)
1842 bf->bf_state.bfs_paprd_timestamp = jiffies;
1843
Felix Fietkau44f1d262011-08-28 00:32:25 +02001844 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301845 }
1846
Felix Fietkaufa05f872011-08-28 00:32:24 +02001847out:
Sujithe8324352009-01-16 21:38:42 +05301848 spin_unlock_bh(&txctl->txq->axq_lock);
1849}
1850
1851/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001852int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301853 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001854{
Felix Fietkau28d16702010-11-14 15:20:10 +01001855 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1856 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001857 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001858 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001859 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001860 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001861 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001862 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001863 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001864
Ben Greeara9927ba2010-12-06 21:13:49 -08001865 /* NOTE: sta can be NULL according to net/mac80211.h */
1866 if (sta)
1867 txctl->an = (struct ath_node *)sta->drv_priv;
1868
Felix Fietkau04caf862010-11-14 15:20:12 +01001869 if (info->control.hw_key)
1870 frmlen += info->control.hw_key->icv_len;
1871
Felix Fietkau28d16702010-11-14 15:20:10 +01001872 /*
1873 * As a temporary workaround, assign seq# here; this will likely need
1874 * to be cleaned up to work better with Beacon transmission and virtual
1875 * BSSes.
1876 */
1877 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1878 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1879 sc->tx.seq_no += 0x10;
1880 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1881 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1882 }
1883
John W. Linville42cecc32011-09-19 15:42:31 -04001884 /* Add the padding after the header if this is not already done */
1885 padpos = ath9k_cmn_padpos(hdr->frame_control);
1886 padsize = padpos & 3;
1887 if (padsize && skb->len > padpos) {
1888 if (skb_headroom(skb) < padsize)
1889 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001890
John W. Linville42cecc32011-09-19 15:42:31 -04001891 skb_push(skb, padsize);
1892 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001893 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001894 }
1895
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001896 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1897 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1898 !ieee80211_is_data(hdr->frame_control))
1899 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1900
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001901 setup_frame_info(hw, skb, frmlen);
1902
1903 /*
1904 * At this point, the vif, hw_key and sta pointers in the tx control
1905 * info are no longer valid (overwritten by the ath_frame_info data.
1906 */
1907
Felix Fietkau066dae92010-11-07 14:59:39 +01001908 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001909 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001910 if (txq == sc->tx.txq_map[q] &&
1911 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001912 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001913 txq->stopped = 1;
1914 }
1915 spin_unlock_bh(&txq->axq_lock);
1916
Felix Fietkau44f1d262011-08-28 00:32:25 +02001917 ath_tx_start_dma(sc, skb, txctl);
1918 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001919}
1920
Sujithe8324352009-01-16 21:38:42 +05301921/*****************/
1922/* TX Completion */
1923/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001924
Sujithe8324352009-01-16 21:38:42 +05301925static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301926 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001927{
Sujithe8324352009-01-16 21:38:42 +05301928 struct ieee80211_hw *hw = sc->hw;
1929 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001930 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001931 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001932 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301933
Joe Perches226afe62010-12-02 19:12:37 -08001934 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301935
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301936 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301937 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301938
Felix Fietkau55797b12011-09-14 21:24:16 +02001939 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301940 /* Frame was ACKed */
1941 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301942
John W. Linville42cecc32011-09-19 15:42:31 -04001943 padpos = ath9k_cmn_padpos(hdr->frame_control);
1944 padsize = padpos & 3;
1945 if (padsize && skb->len>padpos+padsize) {
1946 /*
1947 * Remove MAC header padding before giving the frame back to
1948 * mac80211.
1949 */
1950 memmove(skb->data + padsize, skb->data, padpos);
1951 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301952 }
1953
Sujith1b04b932010-01-08 10:36:05 +05301954 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1955 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001956 ath_dbg(common, ATH_DBG_PS,
1957 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301958 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1959 PS_WAIT_FOR_CAB |
1960 PS_WAIT_FOR_PSPOLL_DATA |
1961 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001962 }
1963
Felix Fietkau7545daf2011-01-24 19:23:16 +01001964 q = skb_get_queue_mapping(skb);
1965 if (txq == sc->tx.txq_map[q]) {
1966 spin_lock_bh(&txq->axq_lock);
1967 if (WARN_ON(--txq->pending_frames < 0))
1968 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001969
Felix Fietkau7545daf2011-01-24 19:23:16 +01001970 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1971 ieee80211_wake_queue(sc->hw, q);
1972 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001973 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001974 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001975 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001976
1977 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301978}
1979
1980static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001981 struct ath_txq *txq, struct list_head *bf_q,
1982 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301983{
1984 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001985 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301986 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301987 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301988
Sujithe8324352009-01-16 21:38:42 +05301989 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301990 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301991
Felix Fietkau55797b12011-09-14 21:24:16 +02001992 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301993 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301994
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001995 if (ts->ts_status & ATH9K_TXERR_FILT)
1996 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1997
Ben Greearc1739eb32010-10-14 12:45:29 -07001998 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001999 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002000
2001 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302002 if (time_after(jiffies,
2003 bf->bf_state.bfs_paprd_timestamp +
2004 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002005 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002006 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002007 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002008 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002009 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302010 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002011 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002012 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2013 * accidentally reference it later.
2014 */
2015 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302016
2017 /*
2018 * Return the list of ath_buf of this mpdu to free queue
2019 */
2020 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2021 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2022 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2023}
2024
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002025static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2026 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002027 int txok)
Sujithc4288392008-11-18 09:09:30 +05302028{
Sujitha22be222009-03-30 15:28:36 +05302029 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302030 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302031 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002032 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002033 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302034 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302035
Sujith95e4acb2009-03-13 08:56:09 +05302036 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002037 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302038
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002039 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302040 WARN_ON(tx_rateindex >= hw->max_rates);
2041
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002042 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002043 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302044
Felix Fietkaub572d032010-11-14 15:20:07 +01002045 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002046 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302047 tx_info->status.ampdu_len = nframes;
2048 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002049
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002050 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002051 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002052 /*
2053 * If an underrun error is seen assume it as an excessive
2054 * retry only if max frame trigger level has been reached
2055 * (2 KB for single stream, and 4 KB for dual stream).
2056 * Adjust the long retry as if the frame was tried
2057 * hw->max_rate_tries times to affect how rate control updates
2058 * PER for the failed rate.
2059 * In case of congestion on the bus penalizing this type of
2060 * underruns should help hardware actually transmit new frames
2061 * successfully by eventually preferring slower rates.
2062 * This itself should also alleviate congestion on the bus.
2063 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002064 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2065 ATH9K_TX_DELIM_UNDERRUN)) &&
2066 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002067 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002068 tx_info->status.rates[tx_rateindex].count =
2069 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302070 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302071
Felix Fietkau545750d2009-11-23 22:21:01 +01002072 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302073 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002074 tx_info->status.rates[i].idx = -1;
2075 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302076
Felix Fietkau78c46532010-06-25 01:26:16 +02002077 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302078}
2079
Felix Fietkaufce041b2011-05-19 12:20:25 +02002080static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2081 struct ath_tx_status *ts, struct ath_buf *bf,
2082 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302083 __releases(txq->axq_lock)
2084 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002085{
2086 int txok;
2087
2088 txq->axq_depth--;
2089 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2090 txq->axq_tx_inprogress = false;
2091 if (bf_is_ampdu_not_probing(bf))
2092 txq->axq_ampdu_depth--;
2093
2094 spin_unlock_bh(&txq->axq_lock);
2095
2096 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002097 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002098 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2099 } else
2100 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2101
2102 spin_lock_bh(&txq->axq_lock);
2103
2104 if (sc->sc_flags & SC_OP_TXAGGR)
2105 ath_txq_schedule(sc, txq);
2106}
2107
Sujithc4288392008-11-18 09:09:30 +05302108static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109{
Sujithcbe61d82009-02-09 13:27:12 +05302110 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002111 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2113 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302114 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002115 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 int status;
2117
Joe Perches226afe62010-12-02 19:12:37 -08002118 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2119 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2120 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121
Felix Fietkaufce041b2011-05-19 12:20:25 +02002122 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002124 if (work_pending(&sc->hw_reset_work))
2125 break;
2126
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 if (list_empty(&txq->axq_q)) {
2128 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002129 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002130 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 break;
2132 }
2133 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2134
2135 /*
2136 * There is a race condition that a BH gets scheduled
2137 * after sw writes TxE and before hw re-load the last
2138 * descriptor to get the newly chained one.
2139 * Software must keep the last DONE descriptor as a
2140 * holding descriptor - software does so by marking
2141 * it with the STALE flag.
2142 */
2143 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302144 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002145 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002146 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002148
2149 bf = list_entry(bf_held->list.next, struct ath_buf,
2150 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151 }
2152
2153 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302154 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155
Felix Fietkau29bffa92010-03-29 20:14:23 -07002156 memset(&ts, 0, sizeof(ts));
2157 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002158 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002160
Ben Greear2dac4fb2011-01-09 23:11:45 -08002161 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162
2163 /*
2164 * Remove ath_buf's of the same transmit unit from txq,
2165 * however leave the last descriptor back as the holding
2166 * descriptor for hw.
2167 */
Sujitha119cc42009-03-30 15:28:38 +05302168 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 if (!list_is_singular(&lastbf->list))
2171 list_cut_position(&bf_head,
2172 &txq->axq_q, lastbf->list.prev);
2173
Felix Fietkaufce041b2011-05-19 12:20:25 +02002174 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002175 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002176 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 }
Johannes Berge6a98542008-10-21 12:40:02 +02002178
Felix Fietkaufce041b2011-05-19 12:20:25 +02002179 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002181 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182}
2183
Sujith305fe472009-07-23 15:32:29 +05302184static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002185{
2186 struct ath_softc *sc = container_of(work, struct ath_softc,
2187 tx_complete_work.work);
2188 struct ath_txq *txq;
2189 int i;
2190 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002191#ifdef CONFIG_ATH9K_DEBUGFS
2192 sc->tx_complete_poll_work_seen++;
2193#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002194
2195 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2196 if (ATH_TXQ_SETUP(sc, i)) {
2197 txq = &sc->tx.txq[i];
2198 spin_lock_bh(&txq->axq_lock);
2199 if (txq->axq_depth) {
2200 if (txq->axq_tx_inprogress) {
2201 needreset = true;
2202 spin_unlock_bh(&txq->axq_lock);
2203 break;
2204 } else {
2205 txq->axq_tx_inprogress = true;
2206 }
2207 }
2208 spin_unlock_bh(&txq->axq_lock);
2209 }
2210
2211 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002212 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2213 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002214 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002215 }
2216
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002217 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002218 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2219}
2220
2221
Sujithe8324352009-01-16 21:38:42 +05302222
2223void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002224{
Sujithe8324352009-01-16 21:38:42 +05302225 int i;
2226 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002227
Sujithe8324352009-01-16 21:38:42 +05302228 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002229
2230 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302231 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2232 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002233 }
2234}
2235
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002236void ath_tx_edma_tasklet(struct ath_softc *sc)
2237{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002238 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002239 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2240 struct ath_hw *ah = sc->sc_ah;
2241 struct ath_txq *txq;
2242 struct ath_buf *bf, *lastbf;
2243 struct list_head bf_head;
2244 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002245
2246 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002247 if (work_pending(&sc->hw_reset_work))
2248 break;
2249
Felix Fietkaufce041b2011-05-19 12:20:25 +02002250 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 if (status == -EINPROGRESS)
2252 break;
2253 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002254 ath_dbg(common, ATH_DBG_XMIT,
2255 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256 break;
2257 }
2258
2259 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002260 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261 continue;
2262
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264
2265 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002266
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002267 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2268 spin_unlock_bh(&txq->axq_lock);
2269 return;
2270 }
2271
2272 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2273 struct ath_buf, list);
2274 lastbf = bf->bf_lastbf;
2275
2276 INIT_LIST_HEAD(&bf_head);
2277 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2278 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2281 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282
Felix Fietkaufce041b2011-05-19 12:20:25 +02002283 if (!list_empty(&txq->axq_q)) {
2284 struct list_head bf_q;
2285
2286 INIT_LIST_HEAD(&bf_q);
2287 txq->axq_link = NULL;
2288 list_splice_tail_init(&txq->axq_q, &bf_q);
2289 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2290 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002291 }
2292
Felix Fietkaufce041b2011-05-19 12:20:25 +02002293 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002294 spin_unlock_bh(&txq->axq_lock);
2295 }
2296}
2297
Sujithe8324352009-01-16 21:38:42 +05302298/*****************/
2299/* Init, Cleanup */
2300/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002301
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002302static int ath_txstatus_setup(struct ath_softc *sc, int size)
2303{
2304 struct ath_descdma *dd = &sc->txsdma;
2305 u8 txs_len = sc->sc_ah->caps.txs_len;
2306
2307 dd->dd_desc_len = size * txs_len;
2308 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2309 &dd->dd_desc_paddr, GFP_KERNEL);
2310 if (!dd->dd_desc)
2311 return -ENOMEM;
2312
2313 return 0;
2314}
2315
2316static int ath_tx_edma_init(struct ath_softc *sc)
2317{
2318 int err;
2319
2320 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2321 if (!err)
2322 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2323 sc->txsdma.dd_desc_paddr,
2324 ATH_TXSTATUS_RING_SIZE);
2325
2326 return err;
2327}
2328
2329static void ath_tx_edma_cleanup(struct ath_softc *sc)
2330{
2331 struct ath_descdma *dd = &sc->txsdma;
2332
2333 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2334 dd->dd_desc_paddr);
2335}
2336
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337int ath_tx_init(struct ath_softc *sc, int nbufs)
2338{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002339 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002340 int error = 0;
2341
Sujith797fe5cb2009-03-30 15:28:45 +05302342 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343
Sujith797fe5cb2009-03-30 15:28:45 +05302344 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002345 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302346 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002347 ath_err(common,
2348 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302349 goto err;
2350 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002351
Sujith797fe5cb2009-03-30 15:28:45 +05302352 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002353 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002355 ath_err(common,
2356 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302357 goto err;
2358 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002360 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2361
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002362 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2363 error = ath_tx_edma_init(sc);
2364 if (error)
2365 goto err;
2366 }
2367
Sujith797fe5cb2009-03-30 15:28:45 +05302368err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002369 if (error != 0)
2370 ath_tx_cleanup(sc);
2371
2372 return error;
2373}
2374
Sujith797fe5cb2009-03-30 15:28:45 +05302375void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376{
Sujithb77f4832008-12-07 21:44:03 +05302377 if (sc->beacon.bdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Sujithb77f4832008-12-07 21:44:03 +05302380 if (sc->tx.txdma.dd_desc_len != 0)
2381 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002382
2383 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2384 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385}
2386
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2388{
Sujithc5170162008-10-29 10:13:59 +05302389 struct ath_atx_tid *tid;
2390 struct ath_atx_ac *ac;
2391 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Sujith8ee5afb2008-12-07 21:43:36 +05302393 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302394 tidno < WME_NUM_TID;
2395 tidno++, tid++) {
2396 tid->an = an;
2397 tid->tidno = tidno;
2398 tid->seq_start = tid->seq_next = 0;
2399 tid->baw_size = WME_MAX_BA;
2400 tid->baw_head = tid->baw_tail = 0;
2401 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302402 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302403 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002404 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302405 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302406 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302407 tid->state &= ~AGGR_ADDBA_COMPLETE;
2408 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302409 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410
Sujith8ee5afb2008-12-07 21:43:36 +05302411 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302412 acno < WME_NUM_AC; acno++, ac++) {
2413 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002414 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302415 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002416 }
2417}
2418
Sujithb5aa9bf2008-10-29 10:13:31 +05302419void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420{
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 struct ath_atx_ac *ac;
2422 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002424 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302425
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 for (tidno = 0, tid = &an->tid[tidno];
2427 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428
Felix Fietkau2b409942010-07-07 19:42:08 +02002429 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002430 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431
Felix Fietkau2b409942010-07-07 19:42:08 +02002432 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433
Felix Fietkau2b409942010-07-07 19:42:08 +02002434 if (tid->sched) {
2435 list_del(&tid->list);
2436 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002437 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002438
2439 if (ac->sched) {
2440 list_del(&ac->list);
2441 tid->ac->sched = false;
2442 }
2443
2444 ath_tid_drain(sc, txq, tid);
2445 tid->state &= ~AGGR_ADDBA_COMPLETE;
2446 tid->state &= ~AGGR_CLEANUP;
2447
2448 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002449 }
2450}