blob: b4a07718b3007dd7603a69fe13ff649ed349d950 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200265 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200274 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
275 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530276}
277
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200278static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
279{
280 struct ath_buf *bf = NULL;
281
282 spin_lock_bh(&sc->tx.txbuflock);
283
284 if (unlikely(list_empty(&sc->tx.txbuf))) {
285 spin_unlock_bh(&sc->tx.txbuflock);
286 return NULL;
287 }
288
289 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
290 list_del(&bf->list);
291
292 spin_unlock_bh(&sc->tx.txbuflock);
293
294 return bf;
295}
296
297static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
298{
299 spin_lock_bh(&sc->tx.txbuflock);
300 list_add_tail(&bf->list, &sc->tx.txbuf);
301 spin_unlock_bh(&sc->tx.txbuflock);
302}
303
Sujithd43f30152009-01-16 21:38:53 +0530304static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
305{
306 struct ath_buf *tbf;
307
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200308 tbf = ath_tx_get_buffer(sc);
309 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530310 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530311
312 ATH_TXBUF_RESET(tbf);
313
314 tbf->bf_mpdu = bf->bf_mpdu;
315 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400316 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530317 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530318
319 return tbf;
320}
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
323 struct ath_tx_status *ts, int txok,
324 int *nframes, int *nbad)
325{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100326 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100327 u16 seq_st = 0;
328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int ba_index;
330 int isaggr = 0;
331
332 *nbad = 0;
333 *nframes = 0;
334
Felix Fietkaub572d032010-11-14 15:20:07 +0100335 isaggr = bf_isaggr(bf);
336 if (isaggr) {
337 seq_st = ts->ts_seqnum;
338 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
339 }
340
341 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100342 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200343 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100344
345 (*nframes)++;
346 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
347 (*nbad)++;
348
349 bf = bf->bf_next;
350 }
351}
352
353
Sujithd43f30152009-01-16 21:38:53 +0530354static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
355 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100356 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530357{
358 struct ath_node *an = NULL;
359 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100361 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530362 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800363 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530364 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200366 struct list_head bf_head;
367 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530369 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
371 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200372 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100373 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200374 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100375 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200376 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530377
Sujitha22be222009-03-30 15:28:36 +0530378 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530379 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530380
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800381 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800382
Felix Fietkau78c46532010-06-25 01:26:16 +0200383 memcpy(rates, tx_info->control.rates, sizeof(rates));
384
Sujith1286ec62009-01-27 13:30:37 +0530385 rcu_read_lock();
386
Ben Greear686b9cb2010-09-23 09:44:36 -0700387 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530388 if (!sta) {
389 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200390
Felix Fietkau31e79a52010-07-12 23:16:34 +0200391 INIT_LIST_HEAD(&bf_head);
392 while (bf) {
393 bf_next = bf->bf_next;
394
Felix Fietkaufce041b2011-05-19 12:20:25 +0200395 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 list_move_tail(&bf->list, &bf_head);
397
Felix Fietkau31e79a52010-07-12 23:16:34 +0200398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
399 0, 0);
400
401 bf = bf_next;
402 }
Sujith1286ec62009-01-27 13:30:37 +0530403 return;
Sujithe8324352009-01-16 21:38:42 +0530404 }
405
Sujith1286ec62009-01-27 13:30:37 +0530406 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
408 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530409
Felix Fietkaub11b1602010-07-11 12:48:44 +0200410 /*
411 * The hardware occasionally sends a tx status for the wrong TID.
412 * In this case, the BA status cannot be considered valid and all
413 * subframes need to be retransmitted
414 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100415 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200416 txok = false;
417
Sujithe8324352009-01-16 21:38:42 +0530418 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530419 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530420
Sujithd43f30152009-01-16 21:38:53 +0530421 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700422 if (ts->ts_flags & ATH9K_TX_BA) {
423 seq_st = ts->ts_seqnum;
424 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530425 } else {
Sujithd43f30152009-01-16 21:38:53 +0530426 /*
427 * AR5416 can become deaf/mute when BA
428 * issue happens. Chip needs to be reset.
429 * But AP code may have sychronization issues
430 * when perform internal reset in this routine.
431 * Only enable reset in STA mode for now.
432 */
Sujith2660b812009-02-09 13:27:26 +0530433 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530434 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530435 }
436 }
437
Felix Fietkau56dc6332011-08-28 00:32:22 +0200438 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530439
Felix Fietkaub572d032010-11-14 15:20:07 +0100440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530441 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200442 u16 seqno = bf->bf_state.seqno;
443
Felix Fietkauf0b82202011-01-15 14:30:15 +0100444 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530445 bf_next = bf->bf_next;
446
Felix Fietkau78c46532010-06-25 01:26:16 +0200447 skb = bf->bf_mpdu;
448 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100449 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200450
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530452 /* transmit completion, subframe is
453 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else if (!isaggr && txok) {
456 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530457 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530458 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200459 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530460 /*
461 * cleanup in progress, just fail
462 * the un-acked sub-frames
463 */
464 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200465 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
466 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
467 !an->sleeping)
468 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
469
470 clear_filter = true;
471 txpending = 1;
472 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200473 txfail = 1;
474 sendbar = 1;
475 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530476 }
477 }
478
Felix Fietkaufce041b2011-05-19 12:20:25 +0200479 /*
480 * Make sure the last desc is reclaimed if it
481 * not a holding desc.
482 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200483 INIT_LIST_HEAD(&bf_head);
484 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
485 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530486 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530487
Felix Fietkau90fa5392010-09-20 13:45:38 +0200488 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530489 /*
490 * complete the acked-ones/xretried ones; update
491 * block-ack window
492 */
493 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200494 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530495 spin_unlock_bh(&txq->axq_lock);
496
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530497 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200498 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200499 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530500 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 }
502
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530505 } else {
Sujithd43f30152009-01-16 21:38:53 +0530506 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
508 if (bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530510
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 tbf = ath_clone_txbuf(sc, bf_last);
512 /*
513 * Update tx baw and complete the
514 * frame with failed status if we
515 * run out of tx buf.
516 */
517 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200519 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400520 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400521
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 ath_tx_complete_buf(sc, bf, txq,
523 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200524 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400525 break;
526 }
527
Felix Fietkau56dc6332011-08-28 00:32:22 +0200528 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400529 }
Sujithe8324352009-01-16 21:38:42 +0530530 }
531
532 /*
533 * Put this buffer to the temporary pending
534 * queue to retain ordering
535 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200536 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530537 }
538
539 bf = bf_next;
540 }
541
Felix Fietkau4cee7862010-07-23 03:53:16 +0200542 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200543 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200544 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200545 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200548 if (clear_filter)
549 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200550 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600551 if (!an->sleeping)
552 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 spin_unlock_bh(&txq->axq_lock);
554 }
555
Sujithe8324352009-01-16 21:38:42 +0530556 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200557 ath_tx_flush_tid(sc, tid);
558
Sujithe8324352009-01-16 21:38:42 +0530559 if (tid->baw_head == tid->baw_tail) {
560 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530561 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530562 }
Sujithe8324352009-01-16 21:38:42 +0530563 }
564
Sujith1286ec62009-01-27 13:30:37 +0530565 rcu_read_unlock();
566
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530567 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200568 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530569}
570
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530571static bool ath_lookup_legacy(struct ath_buf *bf)
572{
573 struct sk_buff *skb;
574 struct ieee80211_tx_info *tx_info;
575 struct ieee80211_tx_rate *rates;
576 int i;
577
578 skb = bf->bf_mpdu;
579 tx_info = IEEE80211_SKB_CB(skb);
580 rates = tx_info->control.rates;
581
Felix Fietkau059ee092011-08-27 10:25:27 +0200582 for (i = 0; i < 4; i++) {
583 if (!rates[i].count || rates[i].idx < 0)
584 break;
585
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530586 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
587 return true;
588 }
589
590 return false;
591}
592
Sujithe8324352009-01-16 21:38:42 +0530593static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
594 struct ath_atx_tid *tid)
595{
Sujithe8324352009-01-16 21:38:42 +0530596 struct sk_buff *skb;
597 struct ieee80211_tx_info *tx_info;
598 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530599 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530600 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530601 int i;
602
Sujitha22be222009-03-30 15:28:36 +0530603 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530604 tx_info = IEEE80211_SKB_CB(skb);
605 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530606
607 /*
608 * Find the lowest frame length among the rate series that will have a
609 * 4ms transmit duration.
610 * TODO - TXOP limit needs to be considered.
611 */
612 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
613
614 for (i = 0; i < 4; i++) {
615 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100616 int modeidx;
617 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530618 legacy = 1;
619 break;
620 }
621
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200622 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100623 modeidx = MCS_HT40;
624 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200625 modeidx = MCS_HT20;
626
627 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
628 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100629
630 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530631 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530632 }
633 }
634
635 /*
636 * limit aggregate size by the minimum rate if rate selected is
637 * not a probe rate, if rate selected is a probe rate then
638 * avoid aggregation of this packet.
639 */
640 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
641 return 0;
642
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530643 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
644 aggr_limit = min((max_4ms_framelen * 3) / 8,
645 (u32)ATH_AMPDU_LIMIT_MAX);
646 else
647 aggr_limit = min(max_4ms_framelen,
648 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530649
650 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300651 * h/w can accept aggregates up to 16 bit lengths (65535).
652 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530653 * as zero. Ignore 65536 since we are constrained by hw.
654 */
Sujith4ef70842009-07-23 15:32:41 +0530655 if (tid->an->maxampdu)
656 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530657
658 return aggr_limit;
659}
660
661/*
Sujithd43f30152009-01-16 21:38:53 +0530662 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530663 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530664 */
665static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530666 struct ath_buf *bf, u16 frmlen,
667 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530668{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530669#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530670 struct sk_buff *skb = bf->bf_mpdu;
671 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530672 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530673 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100674 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200675 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100676 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530677
678 /* Select standard number of delimiters based on frame length alone */
679 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
680
681 /*
682 * If encryption enabled, hardware requires some more padding between
683 * subframes.
684 * TODO - this could be improved to be dependent on the rate.
685 * The hardware can keep up at lower rates, but not higher rates
686 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530687 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
688 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530689 ndelim += ATH_AGGR_ENCRYPTDELIM;
690
691 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530692 * Add delimiter when using RTS/CTS with aggregation
693 * and non enterprise AR9003 card
694 */
Felix Fietkau34597312011-08-29 18:57:54 +0200695 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
696 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530697 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
698
699 /*
Sujithe8324352009-01-16 21:38:42 +0530700 * Convert desired mpdu density from microeconds to bytes based
701 * on highest rate in rate series (i.e. first rate) to determine
702 * required minimum length for subframe. Take into account
703 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530704 *
Sujithe8324352009-01-16 21:38:42 +0530705 * If there is no mpdu density restriction, no further calculation
706 * is needed.
707 */
Sujith4ef70842009-07-23 15:32:41 +0530708
709 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530710 return ndelim;
711
712 rix = tx_info->control.rates[0].idx;
713 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530714 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
715 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
716
717 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530718 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530719 else
Sujith4ef70842009-07-23 15:32:41 +0530720 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530721
722 if (nsymbols == 0)
723 nsymbols = 1;
724
Felix Fietkauc6663872010-04-19 19:57:33 +0200725 streams = HT_RC_2_STREAMS(rix);
726 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530727 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
728
Sujithe8324352009-01-16 21:38:42 +0530729 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530730 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
731 ndelim = max(mindelim, ndelim);
732 }
733
734 return ndelim;
735}
736
737static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530738 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530739 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100740 struct list_head *bf_q,
741 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530742{
743#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200744 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530745 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530746 u16 aggr_limit = 0, al = 0, bpad = 0,
747 al_delta, h_baw = tid->baw_size / 2;
748 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200749 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100750 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200751 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200752 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530753
754 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200755 skb = skb_peek(&tid->buf_q);
756 fi = get_frame_info(skb);
757 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200758 if (!fi->bf)
759 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200760
Felix Fietkau44f1d262011-08-28 00:32:25 +0200761 if (!bf)
762 continue;
763
Felix Fietkau399c6482011-09-14 21:24:17 +0200764 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200765 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200766 if (!bf_first)
767 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530768
Sujithd43f30152009-01-16 21:38:53 +0530769 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200770 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530771 status = ATH_AGGR_BAW_CLOSED;
772 break;
773 }
774
775 if (!rl) {
776 aggr_limit = ath_lookup_rate(sc, bf, tid);
777 rl = 1;
778 }
779
Sujithd43f30152009-01-16 21:38:53 +0530780 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100781 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530782
Sujithd43f30152009-01-16 21:38:53 +0530783 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530784 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
785 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530786 status = ATH_AGGR_LIMITED;
787 break;
788 }
789
Felix Fietkau0299a502010-10-21 02:47:24 +0200790 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200791 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200792 break;
793
Sujithd43f30152009-01-16 21:38:53 +0530794 /* do not exceed subframe limit */
795 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530796 status = ATH_AGGR_LIMITED;
797 break;
798 }
799
Sujithd43f30152009-01-16 21:38:53 +0530800 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530801 al += bpad + al_delta;
802
803 /*
804 * Get the delimiters needed to meet the MPDU
805 * density for this node.
806 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530807 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
808 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530809 bpad = PADBYTES(al_delta) + (ndelim << 2);
810
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530811 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530812 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530813
Sujithd43f30152009-01-16 21:38:53 +0530814 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100815 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200816 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200817 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200818
819 __skb_unlink(skb, &tid->buf_q);
820 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200821 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530822 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200823
Sujithe8324352009-01-16 21:38:42 +0530824 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530825
Felix Fietkau56dc6332011-08-28 00:32:22 +0200826 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530827
Felix Fietkau269c44b2010-11-14 15:20:06 +0100828 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530829
Sujithe8324352009-01-16 21:38:42 +0530830 return status;
831#undef PADBYTES
832}
833
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200834/*
835 * rix - rate index
836 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
837 * width - 0 for 20 MHz, 1 for 40 MHz
838 * half_gi - to use 4us v/s 3.6 us for symbol time
839 */
840static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
841 int width, int half_gi, bool shortPreamble)
842{
843 u32 nbits, nsymbits, duration, nsymbols;
844 int streams;
845
846 /* find number of symbols: PLCP + data */
847 streams = HT_RC_2_STREAMS(rix);
848 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
849 nsymbits = bits_per_symbol[rix % 8][width] * streams;
850 nsymbols = (nbits + nsymbits - 1) / nsymbits;
851
852 if (!half_gi)
853 duration = SYMBOL_TIME(nsymbols);
854 else
855 duration = SYMBOL_TIME_HALFGI(nsymbols);
856
857 /* addup duration for legacy/ht training and signal fields */
858 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
859
860 return duration;
861}
862
Felix Fietkau493cf042011-09-14 21:24:22 +0200863static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
864 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200865{
866 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200867 struct sk_buff *skb;
868 struct ieee80211_tx_info *tx_info;
869 struct ieee80211_tx_rate *rates;
870 const struct ieee80211_rate *rate;
871 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200872 int i;
873 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200874
875 skb = bf->bf_mpdu;
876 tx_info = IEEE80211_SKB_CB(skb);
877 rates = tx_info->control.rates;
878 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200879
880 /* set dur_update_en for l-sig computation except for PS-Poll frames */
881 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200882
883 /*
884 * We check if Short Preamble is needed for the CTS rate by
885 * checking the BSS's global flag.
886 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
887 */
888 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200889 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200890 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200891 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200892
893 for (i = 0; i < 4; i++) {
894 bool is_40, is_sgi, is_sp;
895 int phy;
896
897 if (!rates[i].count || (rates[i].idx < 0))
898 continue;
899
900 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200901 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200902
903 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200904 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
905 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200906 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200907 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
908 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200909 }
910
911 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200912 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200913 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200914 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200915
916 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
917 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
918 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
919
920 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
921 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200922 info->rates[i].Rate = rix | 0x80;
923 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
924 ah->txchainmask, info->rates[i].Rate);
925 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200926 is_40, is_sgi, is_sp);
927 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200928 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200929 continue;
930 }
931
932 /* legacy rates */
933 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
934 !(rate->flags & IEEE80211_RATE_ERP_G))
935 phy = WLAN_RC_PHY_CCK;
936 else
937 phy = WLAN_RC_PHY_OFDM;
938
939 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200940 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200941 if (rate->hw_value_short) {
942 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200943 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200944 } else {
945 is_sp = false;
946 }
947
948 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200949 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200950 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200951 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
952 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200953
Felix Fietkau493cf042011-09-14 21:24:22 +0200954 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200955 phy, rate->bitrate * 100, len, rix, is_sp);
956 }
957
958 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
959 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200960 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200961
962 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200963 if (info->flags & ATH9K_TXDESC_RTSENA)
964 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200965}
966
Felix Fietkau493cf042011-09-14 21:24:22 +0200967static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
968{
969 struct ieee80211_hdr *hdr;
970 enum ath9k_pkt_type htype;
971 __le16 fc;
972
973 hdr = (struct ieee80211_hdr *)skb->data;
974 fc = hdr->frame_control;
975
976 if (ieee80211_is_beacon(fc))
977 htype = ATH9K_PKT_TYPE_BEACON;
978 else if (ieee80211_is_probe_resp(fc))
979 htype = ATH9K_PKT_TYPE_PROBE_RESP;
980 else if (ieee80211_is_atim(fc))
981 htype = ATH9K_PKT_TYPE_ATIM;
982 else if (ieee80211_is_pspoll(fc))
983 htype = ATH9K_PKT_TYPE_PSPOLL;
984 else
985 htype = ATH9K_PKT_TYPE_NORMAL;
986
987 return htype;
988}
989
990static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
991 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200992{
993 struct ath_hw *ah = sc->sc_ah;
994 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
995 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200996 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +0200997 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +0200998
Felix Fietkau493cf042011-09-14 21:24:22 +0200999 memset(&info, 0, sizeof(info));
1000 info.is_first = true;
1001 info.is_last = true;
1002 info.txpower = MAX_RATE_POWER;
1003 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001004
Felix Fietkau493cf042011-09-14 21:24:22 +02001005 info.flags = ATH9K_TXDESC_INTREQ;
1006 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1007 info.flags |= ATH9K_TXDESC_NOACK;
1008 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1009 info.flags |= ATH9K_TXDESC_LDPC;
1010
1011 ath_buf_set_rate(sc, bf, &info, len);
1012
1013 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1014 info.flags |= ATH9K_TXDESC_CLRDMASK;
1015
1016 if (bf->bf_state.bfs_paprd)
1017 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1018
Felix Fietkau399c6482011-09-14 21:24:17 +02001019
1020 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001021 struct sk_buff *skb = bf->bf_mpdu;
1022 struct ath_frame_info *fi = get_frame_info(skb);
1023
1024 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001025 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001026 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001027 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001028 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001029
John W. Linville42cecc32011-09-19 15:42:31 -04001030 info.buf_addr[0] = bf->bf_buf_addr;
1031 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001032 info.pkt_len = fi->framelen;
1033 info.keyix = fi->keyix;
1034 info.keytype = fi->keytype;
1035
1036 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001037 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001038 info.aggr = AGGR_BUF_FIRST;
1039 else if (!bf->bf_next)
1040 info.aggr = AGGR_BUF_LAST;
1041 else
1042 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001043
Felix Fietkau493cf042011-09-14 21:24:22 +02001044 info.ndelim = bf->bf_state.ndelim;
1045 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001046 }
1047
Felix Fietkau493cf042011-09-14 21:24:22 +02001048 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001049 bf = bf->bf_next;
1050 }
1051}
1052
Sujithe8324352009-01-16 21:38:42 +05301053static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1054 struct ath_atx_tid *tid)
1055{
Sujithd43f30152009-01-16 21:38:53 +05301056 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301057 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001058 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301059 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001060 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301061
1062 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001063 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301064 return;
1065
1066 INIT_LIST_HEAD(&bf_q);
1067
Felix Fietkau269c44b2010-11-14 15:20:06 +01001068 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301069
1070 /*
Sujithd43f30152009-01-16 21:38:53 +05301071 * no frames picked up to be aggregated;
1072 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301073 */
1074 if (list_empty(&bf_q))
1075 break;
1076
1077 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301078 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001079 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301080
Felix Fietkau55195412011-04-17 23:28:09 +02001081 if (tid->ac->clear_ps_filter) {
1082 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001083 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1084 } else {
1085 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001086 }
1087
Sujithd43f30152009-01-16 21:38:53 +05301088 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001089 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001090 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1091 bf->bf_state.bf_type = BUF_AMPDU;
1092 } else {
1093 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301094 }
1095
Felix Fietkau493cf042011-09-14 21:24:22 +02001096 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001097 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001098 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301099 status != ATH_AGGR_BAW_CLOSED);
1100}
1101
Felix Fietkau231c3a12010-09-20 19:35:28 +02001102int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1103 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301104{
1105 struct ath_atx_tid *txtid;
1106 struct ath_node *an;
1107
1108 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301109 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001110
1111 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1112 return -EAGAIN;
1113
Sujithf83da962009-07-23 15:32:37 +05301114 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001115 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001116 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001117
Felix Fietkau2ed72222011-01-10 17:05:49 -07001118 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1119 txtid->baw_head = txtid->baw_tail = 0;
1120
Felix Fietkau231c3a12010-09-20 19:35:28 +02001121 return 0;
Sujithe8324352009-01-16 21:38:42 +05301122}
1123
Sujithf83da962009-07-23 15:32:37 +05301124void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301125{
1126 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1127 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001128 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301129
1130 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301131 return;
Sujithe8324352009-01-16 21:38:42 +05301132
1133 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301134 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301135 return;
Sujithe8324352009-01-16 21:38:42 +05301136 }
1137
Sujithe8324352009-01-16 21:38:42 +05301138 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001139 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001140
1141 /*
1142 * If frames are still being transmitted for this TID, they will be
1143 * cleaned up during tx completion. To prevent race conditions, this
1144 * TID can only be reused after all in-progress subframes have been
1145 * completed.
1146 */
1147 if (txtid->baw_head != txtid->baw_tail)
1148 txtid->state |= AGGR_CLEANUP;
1149 else
1150 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301151 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301152
Felix Fietkau90fa5392010-09-20 13:45:38 +02001153 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301154}
1155
Johannes Berg042ec452011-09-29 16:04:26 +02001156void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1157 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001158{
1159 struct ath_atx_tid *tid;
1160 struct ath_atx_ac *ac;
1161 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001162 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001163 int tidno;
1164
1165 for (tidno = 0, tid = &an->tid[tidno];
1166 tidno < WME_NUM_TID; tidno++, tid++) {
1167
1168 if (!tid->sched)
1169 continue;
1170
1171 ac = tid->ac;
1172 txq = ac->txq;
1173
1174 spin_lock_bh(&txq->axq_lock);
1175
Johannes Berg042ec452011-09-29 16:04:26 +02001176 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001177
1178 tid->sched = false;
1179 list_del(&tid->list);
1180
1181 if (ac->sched) {
1182 ac->sched = false;
1183 list_del(&ac->list);
1184 }
1185
1186 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001187
Johannes Berg042ec452011-09-29 16:04:26 +02001188 ieee80211_sta_set_buffered(sta, tidno, buffered);
1189 }
Felix Fietkau55195412011-04-17 23:28:09 +02001190}
1191
1192void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1193{
1194 struct ath_atx_tid *tid;
1195 struct ath_atx_ac *ac;
1196 struct ath_txq *txq;
1197 int tidno;
1198
1199 for (tidno = 0, tid = &an->tid[tidno];
1200 tidno < WME_NUM_TID; tidno++, tid++) {
1201
1202 ac = tid->ac;
1203 txq = ac->txq;
1204
1205 spin_lock_bh(&txq->axq_lock);
1206 ac->clear_ps_filter = true;
1207
Felix Fietkau56dc6332011-08-28 00:32:22 +02001208 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001209 ath_tx_queue_tid(txq, tid);
1210 ath_txq_schedule(sc, txq);
1211 }
1212
1213 spin_unlock_bh(&txq->axq_lock);
1214 }
1215}
1216
Sujithe8324352009-01-16 21:38:42 +05301217void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1218{
1219 struct ath_atx_tid *txtid;
1220 struct ath_node *an;
1221
1222 an = (struct ath_node *)sta->drv_priv;
1223
1224 if (sc->sc_flags & SC_OP_TXAGGR) {
1225 txtid = ATH_AN_2_TID(an, tid);
1226 txtid->baw_size =
1227 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1228 txtid->state |= AGGR_ADDBA_COMPLETE;
1229 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1230 ath_tx_resume_tid(sc, txtid);
1231 }
1232}
1233
Sujithe8324352009-01-16 21:38:42 +05301234/********************/
1235/* Queue Management */
1236/********************/
1237
Sujithe8324352009-01-16 21:38:42 +05301238static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1239 struct ath_txq *txq)
1240{
1241 struct ath_atx_ac *ac, *ac_tmp;
1242 struct ath_atx_tid *tid, *tid_tmp;
1243
1244 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1245 list_del(&ac->list);
1246 ac->sched = false;
1247 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1248 list_del(&tid->list);
1249 tid->sched = false;
1250 ath_tid_drain(sc, txq, tid);
1251 }
1252 }
1253}
1254
1255struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1256{
Sujithcbe61d82009-02-09 13:27:12 +05301257 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301258 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001259 static const int subtype_txq_to_hwq[] = {
1260 [WME_AC_BE] = ATH_TXQ_AC_BE,
1261 [WME_AC_BK] = ATH_TXQ_AC_BK,
1262 [WME_AC_VI] = ATH_TXQ_AC_VI,
1263 [WME_AC_VO] = ATH_TXQ_AC_VO,
1264 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001265 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301266
1267 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001268 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301269 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1270 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1271 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1272 qi.tqi_physCompBuf = 0;
1273
1274 /*
1275 * Enable interrupts only for EOL and DESC conditions.
1276 * We mark tx descriptors to receive a DESC interrupt
1277 * when a tx queue gets deep; otherwise waiting for the
1278 * EOL to reap descriptors. Note that this is done to
1279 * reduce interrupt load and this only defers reaping
1280 * descriptors, never transmitting frames. Aside from
1281 * reducing interrupts this also permits more concurrency.
1282 * The only potential downside is if the tx queue backs
1283 * up in which case the top half of the kernel may backup
1284 * due to a lack of tx descriptors.
1285 *
1286 * The UAPSD queue is an exception, since we take a desc-
1287 * based intr on the EOSP frames.
1288 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001289 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1290 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1291 TXQ_FLAG_TXERRINT_ENABLE;
1292 } else {
1293 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1294 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1295 else
1296 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1297 TXQ_FLAG_TXDESCINT_ENABLE;
1298 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001299 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1300 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301301 /*
1302 * NB: don't print a message, this happens
1303 * normally on parts with too few tx queues
1304 */
1305 return NULL;
1306 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001307 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1308 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301309
Ben Greear60f2d1d2011-01-09 23:11:52 -08001310 txq->axq_qnum = axq_qnum;
1311 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301312 txq->axq_link = NULL;
1313 INIT_LIST_HEAD(&txq->axq_q);
1314 INIT_LIST_HEAD(&txq->axq_acq);
1315 spin_lock_init(&txq->axq_lock);
1316 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001317 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001318 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001319 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001320
1321 txq->txq_headidx = txq->txq_tailidx = 0;
1322 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1323 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301324 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001325 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301326}
1327
Sujithe8324352009-01-16 21:38:42 +05301328int ath_txq_update(struct ath_softc *sc, int qnum,
1329 struct ath9k_tx_queue_info *qinfo)
1330{
Sujithcbe61d82009-02-09 13:27:12 +05301331 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301332 int error = 0;
1333 struct ath9k_tx_queue_info qi;
1334
1335 if (qnum == sc->beacon.beaconq) {
1336 /*
1337 * XXX: for beacon queue, we just save the parameter.
1338 * It will be picked up by ath_beaconq_config when
1339 * it's necessary.
1340 */
1341 sc->beacon.beacon_qi = *qinfo;
1342 return 0;
1343 }
1344
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001345 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301346
1347 ath9k_hw_get_txq_props(ah, qnum, &qi);
1348 qi.tqi_aifs = qinfo->tqi_aifs;
1349 qi.tqi_cwmin = qinfo->tqi_cwmin;
1350 qi.tqi_cwmax = qinfo->tqi_cwmax;
1351 qi.tqi_burstTime = qinfo->tqi_burstTime;
1352 qi.tqi_readyTime = qinfo->tqi_readyTime;
1353
1354 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001355 ath_err(ath9k_hw_common(sc->sc_ah),
1356 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301357 error = -EIO;
1358 } else {
1359 ath9k_hw_resettxqueue(ah, qnum);
1360 }
1361
1362 return error;
1363}
1364
1365int ath_cabq_update(struct ath_softc *sc)
1366{
1367 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001368 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301369 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301370
1371 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1372 /*
1373 * Ensure the readytime % is within the bounds.
1374 */
Sujith17d79042009-02-09 13:27:03 +05301375 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1376 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1377 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1378 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301379
Steve Brown9814f6b2011-02-07 17:10:39 -07001380 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301381 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301382 ath_txq_update(sc, qnum, &qi);
1383
1384 return 0;
1385}
1386
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001387static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1388{
1389 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1390 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1391}
1392
Felix Fietkaufce041b2011-05-19 12:20:25 +02001393static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1394 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301395 __releases(txq->axq_lock)
1396 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301397{
1398 struct ath_buf *bf, *lastbf;
1399 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001400 struct ath_tx_status ts;
1401
1402 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301403 INIT_LIST_HEAD(&bf_head);
1404
Felix Fietkaufce041b2011-05-19 12:20:25 +02001405 while (!list_empty(list)) {
1406 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301407
Felix Fietkaufce041b2011-05-19 12:20:25 +02001408 if (bf->bf_stale) {
1409 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301410
Felix Fietkaufce041b2011-05-19 12:20:25 +02001411 ath_tx_return_buffer(sc, bf);
1412 continue;
Sujithe8324352009-01-16 21:38:42 +05301413 }
1414
1415 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001416 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001417
Sujithe8324352009-01-16 21:38:42 +05301418 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001419 if (bf_is_ampdu_not_probing(bf))
1420 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301421
Felix Fietkaufce041b2011-05-19 12:20:25 +02001422 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301423 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001424 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1425 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301426 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001427 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001428 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001429 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001430}
1431
1432/*
1433 * Drain a given TX queue (could be Beacon or Data)
1434 *
1435 * This assumes output has been stopped and
1436 * we do not need to block ath_tx_tasklet.
1437 */
1438void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1439{
1440 spin_lock_bh(&txq->axq_lock);
1441 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1442 int idx = txq->txq_tailidx;
1443
1444 while (!list_empty(&txq->txq_fifo[idx])) {
1445 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1446 retry_tx);
1447
1448 INCR(idx, ATH_TXFIFO_DEPTH);
1449 }
1450 txq->txq_tailidx = idx;
1451 }
1452
1453 txq->axq_link = NULL;
1454 txq->axq_tx_inprogress = false;
1455 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001456
1457 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001458 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1459 ath_txq_drain_pending_buffers(sc, txq);
1460
1461 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301462}
1463
Felix Fietkau080e1a22010-12-05 20:17:53 +01001464bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301465{
Sujithcbe61d82009-02-09 13:27:12 +05301466 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001467 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301468 struct ath_txq *txq;
1469 int i, npend = 0;
1470
1471 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001472 return true;
Sujith043a0402009-01-16 21:38:47 +05301473
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001474 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301475
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001476 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301477 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001478 if (!ATH_TXQ_SETUP(sc, i))
1479 continue;
1480
1481 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301482 }
1483
Felix Fietkau080e1a22010-12-05 20:17:53 +01001484 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001485 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301486
1487 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001488 if (!ATH_TXQ_SETUP(sc, i))
1489 continue;
1490
1491 /*
1492 * The caller will resume queues with ieee80211_wake_queues.
1493 * Mark the queue as not stopped to prevent ath_tx_complete
1494 * from waking the queue too early.
1495 */
1496 txq = &sc->tx.txq[i];
1497 txq->stopped = false;
1498 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301499 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001500
1501 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301502}
1503
Sujithe8324352009-01-16 21:38:42 +05301504void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1505{
1506 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1507 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1508}
1509
Ben Greear7755bad2011-01-18 17:30:00 -08001510/* For each axq_acq entry, for each tid, try to schedule packets
1511 * for transmit until ampdu_depth has reached min Q depth.
1512 */
Sujithe8324352009-01-16 21:38:42 +05301513void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1514{
Ben Greear7755bad2011-01-18 17:30:00 -08001515 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1516 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301517
Felix Fietkau236de512011-09-03 01:40:25 +02001518 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001519 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301520 return;
1521
1522 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001523 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301524
Ben Greear7755bad2011-01-18 17:30:00 -08001525 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1526 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1527 list_del(&ac->list);
1528 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301529
Ben Greear7755bad2011-01-18 17:30:00 -08001530 while (!list_empty(&ac->tid_q)) {
1531 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1532 list);
1533 list_del(&tid->list);
1534 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301535
Ben Greear7755bad2011-01-18 17:30:00 -08001536 if (tid->paused)
1537 continue;
Sujithe8324352009-01-16 21:38:42 +05301538
Ben Greear7755bad2011-01-18 17:30:00 -08001539 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301540
Ben Greear7755bad2011-01-18 17:30:00 -08001541 /*
1542 * add tid to round-robin queue if more frames
1543 * are pending for the tid
1544 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001545 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001546 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301547
Ben Greear7755bad2011-01-18 17:30:00 -08001548 if (tid == last_tid ||
1549 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1550 break;
Sujithe8324352009-01-16 21:38:42 +05301551 }
Ben Greear7755bad2011-01-18 17:30:00 -08001552
1553 if (!list_empty(&ac->tid_q)) {
1554 if (!ac->sched) {
1555 ac->sched = true;
1556 list_add_tail(&ac->list, &txq->axq_acq);
1557 }
1558 }
1559
1560 if (ac == last_ac ||
1561 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1562 return;
Sujithe8324352009-01-16 21:38:42 +05301563 }
1564}
1565
Sujithe8324352009-01-16 21:38:42 +05301566/***********/
1567/* TX, DMA */
1568/***********/
1569
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001570/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001571 * Insert a chain of ath_buf (descriptors) on a txq and
1572 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001573 */
Sujith102e0572008-10-29 10:15:16 +05301574static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001575 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001576{
Sujithcbe61d82009-02-09 13:27:12 +05301577 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001578 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001579 struct ath_buf *bf, *bf_last;
1580 bool puttxbuf = false;
1581 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301582
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583 /*
1584 * Insert the frame on the outbound list and
1585 * pass it on to the hardware.
1586 */
1587
1588 if (list_empty(head))
1589 return;
1590
Felix Fietkaufce041b2011-05-19 12:20:25 +02001591 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001592 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001593 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001594
Joe Perches226afe62010-12-02 19:12:37 -08001595 ath_dbg(common, ATH_DBG_QUEUE,
1596 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001597
Felix Fietkaufce041b2011-05-19 12:20:25 +02001598 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1599 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001600 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001601 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001603 list_splice_tail_init(head, &txq->axq_q);
1604
Felix Fietkaufce041b2011-05-19 12:20:25 +02001605 if (txq->axq_link) {
1606 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001607 ath_dbg(common, ATH_DBG_XMIT,
1608 "link[%u] (%p)=%llx (%p)\n",
1609 txq->axq_qnum, txq->axq_link,
1610 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001611 } else if (!edma)
1612 puttxbuf = true;
1613
1614 txq->axq_link = bf_last->bf_desc;
1615 }
1616
1617 if (puttxbuf) {
1618 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1619 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1620 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1621 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1622 }
1623
1624 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001625 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001626 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001627 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001628
1629 if (!internal) {
1630 txq->axq_depth++;
1631 if (bf_is_ampdu_not_probing(bf))
1632 txq->axq_ampdu_depth++;
1633 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001634}
1635
Sujithe8324352009-01-16 21:38:42 +05301636static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001637 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301638{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001639 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001640 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001641 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301642
1643 /*
1644 * Do not queue to h/w when any of the following conditions is true:
1645 * - there are pending frames in software queue
1646 * - the TID is currently paused for ADDBA/BAR request
1647 * - seqno is not within block-ack window
1648 * - h/w queue depth exceeds low water mark
1649 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001650 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001651 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001652 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001653 /*
Sujithe8324352009-01-16 21:38:42 +05301654 * Add this frame to software queue for scheduling later
1655 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001656 */
Ben Greearbda8add2011-01-09 23:11:48 -08001657 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001658 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001659 if (!txctl->an || !txctl->an->sleeping)
1660 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301661 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001662 }
1663
Felix Fietkau44f1d262011-08-28 00:32:25 +02001664 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1665 if (!bf)
1666 return;
1667
Felix Fietkau399c6482011-09-14 21:24:17 +02001668 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001669 INIT_LIST_HEAD(&bf_head);
1670 list_add(&bf->list, &bf_head);
1671
Sujithe8324352009-01-16 21:38:42 +05301672 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001673 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301674
1675 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001676 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301677 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001678 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001679 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301680}
1681
Felix Fietkau82b873a2010-11-11 03:18:37 +01001682static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001683 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001684{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001685 struct ath_frame_info *fi = get_frame_info(skb);
1686 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301687 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001688
Felix Fietkau44f1d262011-08-28 00:32:25 +02001689 bf = fi->bf;
1690 if (!bf)
1691 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1692
1693 if (!bf)
1694 return;
1695
1696 INIT_LIST_HEAD(&bf_head);
1697 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001698 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301699
1700 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001701 if (tid)
1702 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301703
Sujithd43f30152009-01-16 21:38:53 +05301704 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001705 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001706 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301707 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001708}
1709
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001710static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1711 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301712{
1713 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001714 struct ieee80211_sta *sta = tx_info->control.sta;
1715 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001716 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001717 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001718 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001719 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301720
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001721 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301722
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001723 if (sta)
1724 an = (struct ath_node *) sta->drv_priv;
1725
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001726 memset(fi, 0, sizeof(*fi));
1727 if (hw_key)
1728 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001729 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1730 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001731 else
1732 fi->keyix = ATH9K_TXKEYIX_INVALID;
1733 fi->keytype = keytype;
1734 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301735}
1736
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301737u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1738{
1739 struct ath_hw *ah = sc->sc_ah;
1740 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301741 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1742 (curchan->channelFlags & CHANNEL_5GHZ) &&
1743 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301744 return 0x3;
1745 else
1746 return chainmask;
1747}
1748
Felix Fietkau44f1d262011-08-28 00:32:25 +02001749/*
1750 * Assign a descriptor (and sequence number if necessary,
1751 * and map buffer for DMA. Frees skb on error
1752 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001753static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001754 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001755 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001756 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301757{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001758 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001759 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001760 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001761 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001762 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001763
1764 bf = ath_tx_get_buffer(sc);
1765 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001766 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001767 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001768 }
Sujithe8324352009-01-16 21:38:42 +05301769
Sujithe8324352009-01-16 21:38:42 +05301770 ATH_TXBUF_RESET(bf);
1771
Felix Fietkaufa05f872011-08-28 00:32:24 +02001772 if (tid) {
1773 seqno = tid->seq_next;
1774 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1775 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1776 bf->bf_state.seqno = seqno;
1777 }
1778
Sujithe8324352009-01-16 21:38:42 +05301779 bf->bf_mpdu = skb;
1780
Ben Greearc1739eb2010-10-14 12:45:29 -07001781 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1782 skb->len, DMA_TO_DEVICE);
1783 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301784 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001785 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001786 ath_err(ath9k_hw_common(sc->sc_ah),
1787 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001788 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001789 goto error;
Sujithe8324352009-01-16 21:38:42 +05301790 }
1791
Felix Fietkau56dc6332011-08-28 00:32:22 +02001792 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001793
1794 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001795
1796error:
1797 dev_kfree_skb_any(skb);
1798 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001799}
1800
1801/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001802static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001803 struct ath_tx_control *txctl)
1804{
Felix Fietkau04caf862010-11-14 15:20:12 +01001805 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1806 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001807 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001808 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001809 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301810
Sujithe8324352009-01-16 21:38:42 +05301811 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301812 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1813 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001814 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1815 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001816 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001817
Felix Fietkau066dae92010-11-07 14:59:39 +01001818 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001819 }
1820
1821 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001822 /*
1823 * Try aggregation if it's a unicast data frame
1824 * and the destination is HT capable.
1825 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001826 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301827 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001828 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1829 if (!bf)
1830 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001831
Felix Fietkau82b873a2010-11-11 03:18:37 +01001832 bf->bf_state.bfs_paprd = txctl->paprd;
1833
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301834 if (txctl->paprd)
1835 bf->bf_state.bfs_paprd_timestamp = jiffies;
1836
Felix Fietkau44f1d262011-08-28 00:32:25 +02001837 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301838 }
1839
Felix Fietkaufa05f872011-08-28 00:32:24 +02001840out:
Sujithe8324352009-01-16 21:38:42 +05301841 spin_unlock_bh(&txctl->txq->axq_lock);
1842}
1843
1844/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001845int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301846 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001847{
Felix Fietkau28d16702010-11-14 15:20:10 +01001848 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1849 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001850 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001851 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001852 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001853 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001854 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001855 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001856 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001857
Ben Greeara9927ba2010-12-06 21:13:49 -08001858 /* NOTE: sta can be NULL according to net/mac80211.h */
1859 if (sta)
1860 txctl->an = (struct ath_node *)sta->drv_priv;
1861
Felix Fietkau04caf862010-11-14 15:20:12 +01001862 if (info->control.hw_key)
1863 frmlen += info->control.hw_key->icv_len;
1864
Felix Fietkau28d16702010-11-14 15:20:10 +01001865 /*
1866 * As a temporary workaround, assign seq# here; this will likely need
1867 * to be cleaned up to work better with Beacon transmission and virtual
1868 * BSSes.
1869 */
1870 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1871 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1872 sc->tx.seq_no += 0x10;
1873 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1874 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1875 }
1876
John W. Linville42cecc32011-09-19 15:42:31 -04001877 /* Add the padding after the header if this is not already done */
1878 padpos = ath9k_cmn_padpos(hdr->frame_control);
1879 padsize = padpos & 3;
1880 if (padsize && skb->len > padpos) {
1881 if (skb_headroom(skb) < padsize)
1882 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001883
John W. Linville42cecc32011-09-19 15:42:31 -04001884 skb_push(skb, padsize);
1885 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc42011-09-15 10:03:12 +02001886 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001887 }
1888
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001889 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1890 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1891 !ieee80211_is_data(hdr->frame_control))
1892 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1893
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001894 setup_frame_info(hw, skb, frmlen);
1895
1896 /*
1897 * At this point, the vif, hw_key and sta pointers in the tx control
1898 * info are no longer valid (overwritten by the ath_frame_info data.
1899 */
1900
Felix Fietkau066dae92010-11-07 14:59:39 +01001901 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001902 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001903 if (txq == sc->tx.txq_map[q] &&
1904 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001905 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001906 txq->stopped = 1;
1907 }
1908 spin_unlock_bh(&txq->axq_lock);
1909
Felix Fietkau44f1d262011-08-28 00:32:25 +02001910 ath_tx_start_dma(sc, skb, txctl);
1911 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001912}
1913
Sujithe8324352009-01-16 21:38:42 +05301914/*****************/
1915/* TX Completion */
1916/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001917
Sujithe8324352009-01-16 21:38:42 +05301918static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301919 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001920{
Sujithe8324352009-01-16 21:38:42 +05301921 struct ieee80211_hw *hw = sc->hw;
1922 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001923 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001924 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001925 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301926
Joe Perches226afe62010-12-02 19:12:37 -08001927 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301928
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301929 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301930 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301931
Felix Fietkau55797b12011-09-14 21:24:16 +02001932 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301933 /* Frame was ACKed */
1934 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301935
John W. Linville42cecc32011-09-19 15:42:31 -04001936 padpos = ath9k_cmn_padpos(hdr->frame_control);
1937 padsize = padpos & 3;
1938 if (padsize && skb->len>padpos+padsize) {
1939 /*
1940 * Remove MAC header padding before giving the frame back to
1941 * mac80211.
1942 */
1943 memmove(skb->data + padsize, skb->data, padpos);
1944 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301945 }
1946
Sujith1b04b932010-01-08 10:36:05 +05301947 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1948 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001949 ath_dbg(common, ATH_DBG_PS,
1950 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301951 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1952 PS_WAIT_FOR_CAB |
1953 PS_WAIT_FOR_PSPOLL_DATA |
1954 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001955 }
1956
Felix Fietkau7545daf2011-01-24 19:23:16 +01001957 q = skb_get_queue_mapping(skb);
1958 if (txq == sc->tx.txq_map[q]) {
1959 spin_lock_bh(&txq->axq_lock);
1960 if (WARN_ON(--txq->pending_frames < 0))
1961 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001962
Felix Fietkau7545daf2011-01-24 19:23:16 +01001963 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1964 ieee80211_wake_queue(sc->hw, q);
1965 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001966 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001967 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001968 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001969
1970 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301971}
1972
1973static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001974 struct ath_txq *txq, struct list_head *bf_q,
1975 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301976{
1977 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001978 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301979 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301980 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301981
Sujithe8324352009-01-16 21:38:42 +05301982 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301983 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301984
Felix Fietkau55797b12011-09-14 21:24:16 +02001985 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301986 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301987
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001988 if (ts->ts_status & ATH9K_TXERR_FILT)
1989 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1990
Ben Greearc1739eb2010-10-14 12:45:29 -07001991 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001992 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001993
1994 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301995 if (time_after(jiffies,
1996 bf->bf_state.bfs_paprd_timestamp +
1997 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001998 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001999 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002000 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002001 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002002 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302003 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002004 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002005 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2006 * accidentally reference it later.
2007 */
2008 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302009
2010 /*
2011 * Return the list of ath_buf of this mpdu to free queue
2012 */
2013 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2014 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2015 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2016}
2017
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002018static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2019 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002020 int txok)
Sujithc4288392008-11-18 09:09:30 +05302021{
Sujitha22be222009-03-30 15:28:36 +05302022 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302023 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302024 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002025 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002026 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302027 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302028
Sujith95e4acb2009-03-13 08:56:09 +05302029 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002030 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302031
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002032 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302033 WARN_ON(tx_rateindex >= hw->max_rates);
2034
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002035 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002036 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302037
Felix Fietkaub572d032010-11-14 15:20:07 +01002038 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002039 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302040 tx_info->status.ampdu_len = nframes;
2041 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002042
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002043 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002044 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002045 /*
2046 * If an underrun error is seen assume it as an excessive
2047 * retry only if max frame trigger level has been reached
2048 * (2 KB for single stream, and 4 KB for dual stream).
2049 * Adjust the long retry as if the frame was tried
2050 * hw->max_rate_tries times to affect how rate control updates
2051 * PER for the failed rate.
2052 * In case of congestion on the bus penalizing this type of
2053 * underruns should help hardware actually transmit new frames
2054 * successfully by eventually preferring slower rates.
2055 * This itself should also alleviate congestion on the bus.
2056 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002057 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2058 ATH9K_TX_DELIM_UNDERRUN)) &&
2059 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002060 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002061 tx_info->status.rates[tx_rateindex].count =
2062 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302063 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302064
Felix Fietkau545750d2009-11-23 22:21:01 +01002065 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302066 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002067 tx_info->status.rates[i].idx = -1;
2068 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302069
Felix Fietkau78c46532010-06-25 01:26:16 +02002070 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302071}
2072
Felix Fietkaufce041b2011-05-19 12:20:25 +02002073static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2074 struct ath_tx_status *ts, struct ath_buf *bf,
2075 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302076 __releases(txq->axq_lock)
2077 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002078{
2079 int txok;
2080
2081 txq->axq_depth--;
2082 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2083 txq->axq_tx_inprogress = false;
2084 if (bf_is_ampdu_not_probing(bf))
2085 txq->axq_ampdu_depth--;
2086
2087 spin_unlock_bh(&txq->axq_lock);
2088
2089 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002090 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002091 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2092 } else
2093 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2094
2095 spin_lock_bh(&txq->axq_lock);
2096
2097 if (sc->sc_flags & SC_OP_TXAGGR)
2098 ath_txq_schedule(sc, txq);
2099}
2100
Sujithc4288392008-11-18 09:09:30 +05302101static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002102{
Sujithcbe61d82009-02-09 13:27:12 +05302103 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002104 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002105 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2106 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302107 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002108 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 int status;
2110
Joe Perches226afe62010-12-02 19:12:37 -08002111 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2112 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2113 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002114
Felix Fietkaufce041b2011-05-19 12:20:25 +02002115 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002117 if (work_pending(&sc->hw_reset_work))
2118 break;
2119
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 if (list_empty(&txq->axq_q)) {
2121 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002122 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002123 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 break;
2125 }
2126 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2127
2128 /*
2129 * There is a race condition that a BH gets scheduled
2130 * after sw writes TxE and before hw re-load the last
2131 * descriptor to get the newly chained one.
2132 * Software must keep the last DONE descriptor as a
2133 * holding descriptor - software does so by marking
2134 * it with the STALE flag.
2135 */
2136 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302137 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002139 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002141
2142 bf = list_entry(bf_held->list.next, struct ath_buf,
2143 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002144 }
2145
2146 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302147 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148
Felix Fietkau29bffa92010-03-29 20:14:23 -07002149 memset(&ts, 0, sizeof(ts));
2150 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002151 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002152 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002153
Ben Greear2dac4fb2011-01-09 23:11:45 -08002154 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155
2156 /*
2157 * Remove ath_buf's of the same transmit unit from txq,
2158 * however leave the last descriptor back as the holding
2159 * descriptor for hw.
2160 */
Sujitha119cc42009-03-30 15:28:38 +05302161 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 if (!list_is_singular(&lastbf->list))
2164 list_cut_position(&bf_head,
2165 &txq->axq_q, lastbf->list.prev);
2166
Felix Fietkaufce041b2011-05-19 12:20:25 +02002167 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002168 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002169 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 }
Johannes Berge6a98542008-10-21 12:40:02 +02002171
Felix Fietkaufce041b2011-05-19 12:20:25 +02002172 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002174 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002175}
2176
Sujith305fe472009-07-23 15:32:29 +05302177static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002178{
2179 struct ath_softc *sc = container_of(work, struct ath_softc,
2180 tx_complete_work.work);
2181 struct ath_txq *txq;
2182 int i;
2183 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002184#ifdef CONFIG_ATH9K_DEBUGFS
2185 sc->tx_complete_poll_work_seen++;
2186#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002187
2188 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2189 if (ATH_TXQ_SETUP(sc, i)) {
2190 txq = &sc->tx.txq[i];
2191 spin_lock_bh(&txq->axq_lock);
2192 if (txq->axq_depth) {
2193 if (txq->axq_tx_inprogress) {
2194 needreset = true;
2195 spin_unlock_bh(&txq->axq_lock);
2196 break;
2197 } else {
2198 txq->axq_tx_inprogress = true;
2199 }
2200 }
2201 spin_unlock_bh(&txq->axq_lock);
2202 }
2203
2204 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002205 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2206 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002207 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002208 }
2209
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002210 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002211 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2212}
2213
2214
Sujithe8324352009-01-16 21:38:42 +05302215
2216void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002217{
Sujithe8324352009-01-16 21:38:42 +05302218 int i;
2219 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002220
Sujithe8324352009-01-16 21:38:42 +05302221 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002222
2223 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302224 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2225 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002226 }
2227}
2228
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002229void ath_tx_edma_tasklet(struct ath_softc *sc)
2230{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002231 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002232 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2233 struct ath_hw *ah = sc->sc_ah;
2234 struct ath_txq *txq;
2235 struct ath_buf *bf, *lastbf;
2236 struct list_head bf_head;
2237 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002238
2239 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002240 if (work_pending(&sc->hw_reset_work))
2241 break;
2242
Felix Fietkaufce041b2011-05-19 12:20:25 +02002243 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002244 if (status == -EINPROGRESS)
2245 break;
2246 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002247 ath_dbg(common, ATH_DBG_XMIT,
2248 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249 break;
2250 }
2251
2252 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002253 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002254 continue;
2255
Felix Fietkaufce041b2011-05-19 12:20:25 +02002256 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002257
2258 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002259
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002260 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2261 spin_unlock_bh(&txq->axq_lock);
2262 return;
2263 }
2264
2265 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2266 struct ath_buf, list);
2267 lastbf = bf->bf_lastbf;
2268
2269 INIT_LIST_HEAD(&bf_head);
2270 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2271 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002272
Felix Fietkaufce041b2011-05-19 12:20:25 +02002273 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2274 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002275
Felix Fietkaufce041b2011-05-19 12:20:25 +02002276 if (!list_empty(&txq->axq_q)) {
2277 struct list_head bf_q;
2278
2279 INIT_LIST_HEAD(&bf_q);
2280 txq->axq_link = NULL;
2281 list_splice_tail_init(&txq->axq_q, &bf_q);
2282 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2283 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002284 }
2285
Felix Fietkaufce041b2011-05-19 12:20:25 +02002286 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002287 spin_unlock_bh(&txq->axq_lock);
2288 }
2289}
2290
Sujithe8324352009-01-16 21:38:42 +05302291/*****************/
2292/* Init, Cleanup */
2293/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002294
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002295static int ath_txstatus_setup(struct ath_softc *sc, int size)
2296{
2297 struct ath_descdma *dd = &sc->txsdma;
2298 u8 txs_len = sc->sc_ah->caps.txs_len;
2299
2300 dd->dd_desc_len = size * txs_len;
2301 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2302 &dd->dd_desc_paddr, GFP_KERNEL);
2303 if (!dd->dd_desc)
2304 return -ENOMEM;
2305
2306 return 0;
2307}
2308
2309static int ath_tx_edma_init(struct ath_softc *sc)
2310{
2311 int err;
2312
2313 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2314 if (!err)
2315 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2316 sc->txsdma.dd_desc_paddr,
2317 ATH_TXSTATUS_RING_SIZE);
2318
2319 return err;
2320}
2321
2322static void ath_tx_edma_cleanup(struct ath_softc *sc)
2323{
2324 struct ath_descdma *dd = &sc->txsdma;
2325
2326 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2327 dd->dd_desc_paddr);
2328}
2329
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002330int ath_tx_init(struct ath_softc *sc, int nbufs)
2331{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002332 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002333 int error = 0;
2334
Sujith797fe5c2009-03-30 15:28:45 +05302335 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002336
Sujith797fe5c2009-03-30 15:28:45 +05302337 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002338 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302339 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002340 ath_err(common,
2341 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302342 goto err;
2343 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344
Sujith797fe5c2009-03-30 15:28:45 +05302345 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002346 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302347 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002348 ath_err(common,
2349 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302350 goto err;
2351 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002353 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2354
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002355 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2356 error = ath_tx_edma_init(sc);
2357 if (error)
2358 goto err;
2359 }
2360
Sujith797fe5c2009-03-30 15:28:45 +05302361err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362 if (error != 0)
2363 ath_tx_cleanup(sc);
2364
2365 return error;
2366}
2367
Sujith797fe5c2009-03-30 15:28:45 +05302368void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002369{
Sujithb77f4832008-12-07 21:44:03 +05302370 if (sc->beacon.bdma.dd_desc_len != 0)
2371 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002372
Sujithb77f4832008-12-07 21:44:03 +05302373 if (sc->tx.txdma.dd_desc_len != 0)
2374 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002375
2376 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2377 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002378}
2379
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2381{
Sujithc5170162008-10-29 10:13:59 +05302382 struct ath_atx_tid *tid;
2383 struct ath_atx_ac *ac;
2384 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385
Sujith8ee5afb2008-12-07 21:43:36 +05302386 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302387 tidno < WME_NUM_TID;
2388 tidno++, tid++) {
2389 tid->an = an;
2390 tid->tidno = tidno;
2391 tid->seq_start = tid->seq_next = 0;
2392 tid->baw_size = WME_MAX_BA;
2393 tid->baw_head = tid->baw_tail = 0;
2394 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302395 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302396 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002397 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302398 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302399 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302400 tid->state &= ~AGGR_ADDBA_COMPLETE;
2401 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302402 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002403
Sujith8ee5afb2008-12-07 21:43:36 +05302404 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302405 acno < WME_NUM_AC; acno++, ac++) {
2406 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002407 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302408 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409 }
2410}
2411
Sujithb5aa9bf2008-10-29 10:13:31 +05302412void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002413{
Felix Fietkau2b409942010-07-07 19:42:08 +02002414 struct ath_atx_ac *ac;
2415 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002416 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002417 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302418
Felix Fietkau2b409942010-07-07 19:42:08 +02002419 for (tidno = 0, tid = &an->tid[tidno];
2420 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002421
Felix Fietkau2b409942010-07-07 19:42:08 +02002422 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002423 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424
Felix Fietkau2b409942010-07-07 19:42:08 +02002425 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002426
Felix Fietkau2b409942010-07-07 19:42:08 +02002427 if (tid->sched) {
2428 list_del(&tid->list);
2429 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002430 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002431
2432 if (ac->sched) {
2433 list_del(&ac->list);
2434 tid->ac->sched = false;
2435 }
2436
2437 ath_tid_drain(sc, txq, tid);
2438 tid->state &= ~AGGR_ADDBA_COMPLETE;
2439 tid->state &= ~AGGR_CLEANUP;
2440
2441 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002442 }
2443}