blob: 634a29a946d30bcca06833e82e4e63189c1c0a64 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530265 struct ieee80211_hdr *hdr;
266
Sujithfec247c2009-07-27 12:08:16 +0530267 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100268 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100269 return;
Sujithe8324352009-01-16 21:38:42 +0530270
Sujithe8324352009-01-16 21:38:42 +0530271 hdr = (struct ieee80211_hdr *)skb->data;
272 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
273}
274
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
276{
277 struct ath_buf *bf = NULL;
278
279 spin_lock_bh(&sc->tx.txbuflock);
280
281 if (unlikely(list_empty(&sc->tx.txbuf))) {
282 spin_unlock_bh(&sc->tx.txbuflock);
283 return NULL;
284 }
285
286 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
289 spin_unlock_bh(&sc->tx.txbuflock);
290
291 return bf;
292}
293
294static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
295{
296 spin_lock_bh(&sc->tx.txbuflock);
297 list_add_tail(&bf->list, &sc->tx.txbuf);
298 spin_unlock_bh(&sc->tx.txbuflock);
299}
300
Sujithd43f30152009-01-16 21:38:53 +0530301static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
302{
303 struct ath_buf *tbf;
304
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200305 tbf = ath_tx_get_buffer(sc);
306 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530307 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530308
309 ATH_TXBUF_RESET(tbf);
310
311 tbf->bf_mpdu = bf->bf_mpdu;
312 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400313 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530314 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530315
316 return tbf;
317}
318
Felix Fietkaub572d032010-11-14 15:20:07 +0100319static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
320 struct ath_tx_status *ts, int txok,
321 int *nframes, int *nbad)
322{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100323 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100324 u16 seq_st = 0;
325 u32 ba[WME_BA_BMP_SIZE >> 5];
326 int ba_index;
327 int isaggr = 0;
328
329 *nbad = 0;
330 *nframes = 0;
331
Felix Fietkaub572d032010-11-14 15:20:07 +0100332 isaggr = bf_isaggr(bf);
333 if (isaggr) {
334 seq_st = ts->ts_seqnum;
335 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
336 }
337
338 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100339 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200340 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100341
342 (*nframes)++;
343 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
344 (*nbad)++;
345
346 bf = bf->bf_next;
347 }
348}
349
350
Sujithd43f30152009-01-16 21:38:53 +0530351static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100353 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530354{
355 struct ath_node *an = NULL;
356 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530357 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100358 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530359 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800360 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530361 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530362 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200363 struct list_head bf_head;
364 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530365 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530366 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100370 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200371 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100372 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200373 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530374
Sujitha22be222009-03-30 15:28:36 +0530375 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530376 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530377
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800378 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379
Felix Fietkau78c46532010-06-25 01:26:16 +0200380 memcpy(rates, tx_info->control.rates, sizeof(rates));
381
Sujith1286ec62009-01-27 13:30:37 +0530382 rcu_read_lock();
383
Ben Greear686b9cb2010-09-23 09:44:36 -0700384 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530385 if (!sta) {
386 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200387
Felix Fietkau31e79a52010-07-12 23:16:34 +0200388 INIT_LIST_HEAD(&bf_head);
389 while (bf) {
390 bf_next = bf->bf_next;
391
Felix Fietkaufce041b2011-05-19 12:20:25 +0200392 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200393 list_move_tail(&bf->list, &bf_head);
394
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100395 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
397 0, 0);
398
399 bf = bf_next;
400 }
Sujith1286ec62009-01-27 13:30:37 +0530401 return;
Sujithe8324352009-01-16 21:38:42 +0530402 }
403
Sujith1286ec62009-01-27 13:30:37 +0530404 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100405 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
406 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530407
Felix Fietkaub11b1602010-07-11 12:48:44 +0200408 /*
409 * The hardware occasionally sends a tx status for the wrong TID.
410 * In this case, the BA status cannot be considered valid and all
411 * subframes need to be retransmitted
412 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100413 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200414 txok = false;
415
Sujithe8324352009-01-16 21:38:42 +0530416 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530417 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530418
Sujithd43f30152009-01-16 21:38:53 +0530419 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700420 if (ts->ts_flags & ATH9K_TX_BA) {
421 seq_st = ts->ts_seqnum;
422 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530423 } else {
Sujithd43f30152009-01-16 21:38:53 +0530424 /*
425 * AR5416 can become deaf/mute when BA
426 * issue happens. Chip needs to be reset.
427 * But AP code may have sychronization issues
428 * when perform internal reset in this routine.
429 * Only enable reset in STA mode for now.
430 */
Sujith2660b812009-02-09 13:27:26 +0530431 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530432 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530433 }
434 }
435
Felix Fietkau56dc6332011-08-28 00:32:22 +0200436 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530437
Felix Fietkaub572d032010-11-14 15:20:07 +0100438 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530439 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200440 u16 seqno = bf->bf_state.seqno;
441
Felix Fietkauf0b82202011-01-15 14:30:15 +0100442 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530443 bf_next = bf->bf_next;
444
Felix Fietkau78c46532010-06-25 01:26:16 +0200445 skb = bf->bf_mpdu;
446 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100447 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200448
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200449 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530450 /* transmit completion, subframe is
451 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 } else if (!isaggr && txok) {
454 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200457 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200463 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
464 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
465 !an->sleeping)
466 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
467
468 clear_filter = true;
469 txpending = 1;
470 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200471 txfail = 1;
472 sendbar = 1;
473 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530474 }
475 }
476
Felix Fietkaufce041b2011-05-19 12:20:25 +0200477 /*
478 * Make sure the last desc is reclaimed if it
479 * not a holding desc.
480 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200481 INIT_LIST_HEAD(&bf_head);
482 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
483 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530484 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530485
Felix Fietkau90fa5392010-09-20 13:45:38 +0200486 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530487 /*
488 * complete the acked-ones/xretried ones; update
489 * block-ack window
490 */
491 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200492 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530493 spin_unlock_bh(&txq->axq_lock);
494
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530495 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200496 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100497 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 rc_update = false;
499 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100500 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 }
502
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530505 } else {
Sujithd43f30152009-01-16 21:38:53 +0530506 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
508 if (bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530510
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 tbf = ath_clone_txbuf(sc, bf_last);
512 /*
513 * Update tx baw and complete the
514 * frame with failed status if we
515 * run out of tx buf.
516 */
517 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200519 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400520 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400521
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100522 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100523 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400524 ath_tx_complete_buf(sc, bf, txq,
525 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200526 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400527 break;
528 }
529
Felix Fietkau56dc6332011-08-28 00:32:22 +0200530 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400531 }
Sujithe8324352009-01-16 21:38:42 +0530532 }
533
534 /*
535 * Put this buffer to the temporary pending
536 * queue to retain ordering
537 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200538 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530539 }
540
541 bf = bf_next;
542 }
543
Felix Fietkau4cee7862010-07-23 03:53:16 +0200544 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200545 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200546 if (an->sleeping)
547 ieee80211_sta_set_tim(sta);
548
Felix Fietkau4cee7862010-07-23 03:53:16 +0200549 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200550 if (clear_filter)
551 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200552 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600553 if (!an->sleeping)
554 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530569 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200570 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530573static bool ath_lookup_legacy(struct ath_buf *bf)
574{
575 struct sk_buff *skb;
576 struct ieee80211_tx_info *tx_info;
577 struct ieee80211_tx_rate *rates;
578 int i;
579
580 skb = bf->bf_mpdu;
581 tx_info = IEEE80211_SKB_CB(skb);
582 rates = tx_info->control.rates;
583
Felix Fietkau059ee092011-08-27 10:25:27 +0200584 for (i = 0; i < 4; i++) {
585 if (!rates[i].count || rates[i].idx < 0)
586 break;
587
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530588 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
589 return true;
590 }
591
592 return false;
593}
594
Sujithe8324352009-01-16 21:38:42 +0530595static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
596 struct ath_atx_tid *tid)
597{
Sujithe8324352009-01-16 21:38:42 +0530598 struct sk_buff *skb;
599 struct ieee80211_tx_info *tx_info;
600 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530601 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530602 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530603 int i;
604
Sujitha22be222009-03-30 15:28:36 +0530605 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530606 tx_info = IEEE80211_SKB_CB(skb);
607 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530608
609 /*
610 * Find the lowest frame length among the rate series that will have a
611 * 4ms transmit duration.
612 * TODO - TXOP limit needs to be considered.
613 */
614 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
615
616 for (i = 0; i < 4; i++) {
617 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100618 int modeidx;
619 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530620 legacy = 1;
621 break;
622 }
623
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200624 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100625 modeidx = MCS_HT40;
626 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200627 modeidx = MCS_HT20;
628
629 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
630 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100631
632 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530633 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530634 }
635 }
636
637 /*
638 * limit aggregate size by the minimum rate if rate selected is
639 * not a probe rate, if rate selected is a probe rate then
640 * avoid aggregation of this packet.
641 */
642 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
643 return 0;
644
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530645 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
646 aggr_limit = min((max_4ms_framelen * 3) / 8,
647 (u32)ATH_AMPDU_LIMIT_MAX);
648 else
649 aggr_limit = min(max_4ms_framelen,
650 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530651
652 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300653 * h/w can accept aggregates up to 16 bit lengths (65535).
654 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530655 * as zero. Ignore 65536 since we are constrained by hw.
656 */
Sujith4ef70842009-07-23 15:32:41 +0530657 if (tid->an->maxampdu)
658 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530659
660 return aggr_limit;
661}
662
663/*
Sujithd43f30152009-01-16 21:38:53 +0530664 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530665 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530666 */
667static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530668 struct ath_buf *bf, u16 frmlen,
669 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530670{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530671#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530672 struct sk_buff *skb = bf->bf_mpdu;
673 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530674 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530675 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100676 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200677 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100678 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530679
680 /* Select standard number of delimiters based on frame length alone */
681 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
682
683 /*
684 * If encryption enabled, hardware requires some more padding between
685 * subframes.
686 * TODO - this could be improved to be dependent on the rate.
687 * The hardware can keep up at lower rates, but not higher rates
688 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530689 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
690 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530691 ndelim += ATH_AGGR_ENCRYPTDELIM;
692
693 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530694 * Add delimiter when using RTS/CTS with aggregation
695 * and non enterprise AR9003 card
696 */
Felix Fietkau34597312011-08-29 18:57:54 +0200697 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
698 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530699 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
700
701 /*
Sujithe8324352009-01-16 21:38:42 +0530702 * Convert desired mpdu density from microeconds to bytes based
703 * on highest rate in rate series (i.e. first rate) to determine
704 * required minimum length for subframe. Take into account
705 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530706 *
Sujithe8324352009-01-16 21:38:42 +0530707 * If there is no mpdu density restriction, no further calculation
708 * is needed.
709 */
Sujith4ef70842009-07-23 15:32:41 +0530710
711 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530712 return ndelim;
713
714 rix = tx_info->control.rates[0].idx;
715 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530716 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
717 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
718
719 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530720 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530721 else
Sujith4ef70842009-07-23 15:32:41 +0530722 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530723
724 if (nsymbols == 0)
725 nsymbols = 1;
726
Felix Fietkauc6663872010-04-19 19:57:33 +0200727 streams = HT_RC_2_STREAMS(rix);
728 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530729 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
730
Sujithe8324352009-01-16 21:38:42 +0530731 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530732 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
733 ndelim = max(mindelim, ndelim);
734 }
735
736 return ndelim;
737}
738
739static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530740 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530741 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100742 struct list_head *bf_q,
743 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530744{
745#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200746 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530747 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530748 u16 aggr_limit = 0, al = 0, bpad = 0,
749 al_delta, h_baw = tid->baw_size / 2;
750 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200751 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100752 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200753 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200754 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530755
756 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757 skb = skb_peek(&tid->buf_q);
758 fi = get_frame_info(skb);
759 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200760 if (!fi->bf)
761 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200762
Felix Fietkau44f1d262011-08-28 00:32:25 +0200763 if (!bf)
764 continue;
765
Felix Fietkau399c6482011-09-14 21:24:17 +0200766 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200767 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200768 if (!bf_first)
769 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530770
Sujithd43f30152009-01-16 21:38:53 +0530771 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200772 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530773 status = ATH_AGGR_BAW_CLOSED;
774 break;
775 }
776
777 if (!rl) {
778 aggr_limit = ath_lookup_rate(sc, bf, tid);
779 rl = 1;
780 }
781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100783 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530784
Sujithd43f30152009-01-16 21:38:53 +0530785 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530786 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
787 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530788 status = ATH_AGGR_LIMITED;
789 break;
790 }
791
Felix Fietkau0299a502010-10-21 02:47:24 +0200792 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200793 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200794 break;
795
Sujithd43f30152009-01-16 21:38:53 +0530796 /* do not exceed subframe limit */
797 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530798 status = ATH_AGGR_LIMITED;
799 break;
800 }
801
Sujithd43f30152009-01-16 21:38:53 +0530802 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530803 al += bpad + al_delta;
804
805 /*
806 * Get the delimiters needed to meet the MPDU
807 * density for this node.
808 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530809 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
810 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530811 bpad = PADBYTES(al_delta) + (ndelim << 2);
812
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530813 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530814 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530815
Sujithd43f30152009-01-16 21:38:53 +0530816 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100817 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200818 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200819 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200820
821 __skb_unlink(skb, &tid->buf_q);
822 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200823 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530824 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200825
Sujithe8324352009-01-16 21:38:42 +0530826 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530827
Felix Fietkau56dc6332011-08-28 00:32:22 +0200828 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530829
Felix Fietkau269c44b2010-11-14 15:20:06 +0100830 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530831
Sujithe8324352009-01-16 21:38:42 +0530832 return status;
833#undef PADBYTES
834}
835
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200836/*
837 * rix - rate index
838 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
839 * width - 0 for 20 MHz, 1 for 40 MHz
840 * half_gi - to use 4us v/s 3.6 us for symbol time
841 */
842static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
843 int width, int half_gi, bool shortPreamble)
844{
845 u32 nbits, nsymbits, duration, nsymbols;
846 int streams;
847
848 /* find number of symbols: PLCP + data */
849 streams = HT_RC_2_STREAMS(rix);
850 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
851 nsymbits = bits_per_symbol[rix % 8][width] * streams;
852 nsymbols = (nbits + nsymbits - 1) / nsymbits;
853
854 if (!half_gi)
855 duration = SYMBOL_TIME(nsymbols);
856 else
857 duration = SYMBOL_TIME_HALFGI(nsymbols);
858
859 /* addup duration for legacy/ht training and signal fields */
860 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
861
862 return duration;
863}
864
Felix Fietkau493cf042011-09-14 21:24:22 +0200865static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
866 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200867{
868 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200869 struct sk_buff *skb;
870 struct ieee80211_tx_info *tx_info;
871 struct ieee80211_tx_rate *rates;
872 const struct ieee80211_rate *rate;
873 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200874 int i;
875 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200876
877 skb = bf->bf_mpdu;
878 tx_info = IEEE80211_SKB_CB(skb);
879 rates = tx_info->control.rates;
880 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200881
882 /* set dur_update_en for l-sig computation except for PS-Poll frames */
883 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200884
885 /*
886 * We check if Short Preamble is needed for the CTS rate by
887 * checking the BSS's global flag.
888 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
889 */
890 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200891 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200892 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200893 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200894
895 for (i = 0; i < 4; i++) {
896 bool is_40, is_sgi, is_sp;
897 int phy;
898
899 if (!rates[i].count || (rates[i].idx < 0))
900 continue;
901
902 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200903 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200904
905 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200906 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
907 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200908 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200909 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
910 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200911 }
912
913 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200914 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200915 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200916 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200917
918 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
919 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
920 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
921
922 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
923 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200924 info->rates[i].Rate = rix | 0x80;
925 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
926 ah->txchainmask, info->rates[i].Rate);
927 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200928 is_40, is_sgi, is_sp);
929 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200930 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200931 continue;
932 }
933
934 /* legacy rates */
935 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
936 !(rate->flags & IEEE80211_RATE_ERP_G))
937 phy = WLAN_RC_PHY_CCK;
938 else
939 phy = WLAN_RC_PHY_OFDM;
940
941 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200942 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200943 if (rate->hw_value_short) {
944 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200945 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200946 } else {
947 is_sp = false;
948 }
949
950 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200951 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200952 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200953 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
954 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200955
Felix Fietkau493cf042011-09-14 21:24:22 +0200956 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200957 phy, rate->bitrate * 100, len, rix, is_sp);
958 }
959
960 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
961 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200962 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200963
964 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200965 if (info->flags & ATH9K_TXDESC_RTSENA)
966 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200967}
968
Felix Fietkau493cf042011-09-14 21:24:22 +0200969static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
970{
971 struct ieee80211_hdr *hdr;
972 enum ath9k_pkt_type htype;
973 __le16 fc;
974
975 hdr = (struct ieee80211_hdr *)skb->data;
976 fc = hdr->frame_control;
977
978 if (ieee80211_is_beacon(fc))
979 htype = ATH9K_PKT_TYPE_BEACON;
980 else if (ieee80211_is_probe_resp(fc))
981 htype = ATH9K_PKT_TYPE_PROBE_RESP;
982 else if (ieee80211_is_atim(fc))
983 htype = ATH9K_PKT_TYPE_ATIM;
984 else if (ieee80211_is_pspoll(fc))
985 htype = ATH9K_PKT_TYPE_PSPOLL;
986 else
987 htype = ATH9K_PKT_TYPE_NORMAL;
988
989 return htype;
990}
991
992static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
993 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200994{
995 struct ath_hw *ah = sc->sc_ah;
996 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
997 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200998 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +0200999 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001000
Felix Fietkau493cf042011-09-14 21:24:22 +02001001 memset(&info, 0, sizeof(info));
1002 info.is_first = true;
1003 info.is_last = true;
1004 info.txpower = MAX_RATE_POWER;
1005 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001006
Felix Fietkau493cf042011-09-14 21:24:22 +02001007 info.flags = ATH9K_TXDESC_INTREQ;
1008 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1009 info.flags |= ATH9K_TXDESC_NOACK;
1010 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1011 info.flags |= ATH9K_TXDESC_LDPC;
1012
1013 ath_buf_set_rate(sc, bf, &info, len);
1014
1015 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1016 info.flags |= ATH9K_TXDESC_CLRDMASK;
1017
1018 if (bf->bf_state.bfs_paprd)
1019 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1020
Felix Fietkau399c6482011-09-14 21:24:17 +02001021
1022 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001023 struct sk_buff *skb = bf->bf_mpdu;
1024 struct ath_frame_info *fi = get_frame_info(skb);
1025
1026 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001027 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001028 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001029 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001030 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001031
Felix Fietkau493cf042011-09-14 21:24:22 +02001032 info.buf_addr[0] = bf->bf_buf_addr;
1033 info.buf_len[0] = skb->len;
1034 info.pkt_len = fi->framelen;
1035 info.keyix = fi->keyix;
1036 info.keytype = fi->keytype;
1037
1038 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001039 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001040 info.aggr = AGGR_BUF_FIRST;
1041 else if (!bf->bf_next)
1042 info.aggr = AGGR_BUF_LAST;
1043 else
1044 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001045
Felix Fietkau493cf042011-09-14 21:24:22 +02001046 info.ndelim = bf->bf_state.ndelim;
1047 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001048 }
1049
Felix Fietkau493cf042011-09-14 21:24:22 +02001050 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001051 bf = bf->bf_next;
1052 }
1053}
1054
Sujithe8324352009-01-16 21:38:42 +05301055static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1056 struct ath_atx_tid *tid)
1057{
Sujithd43f30152009-01-16 21:38:53 +05301058 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301059 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001060 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301061 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001062 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301063
1064 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001065 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301066 return;
1067
1068 INIT_LIST_HEAD(&bf_q);
1069
Felix Fietkau269c44b2010-11-14 15:20:06 +01001070 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301071
1072 /*
Sujithd43f30152009-01-16 21:38:53 +05301073 * no frames picked up to be aggregated;
1074 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301075 */
1076 if (list_empty(&bf_q))
1077 break;
1078
1079 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301080 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001081 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301082
Felix Fietkau55195412011-04-17 23:28:09 +02001083 if (tid->ac->clear_ps_filter) {
1084 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001085 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1086 } else {
1087 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001088 }
1089
Sujithd43f30152009-01-16 21:38:53 +05301090 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001091 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001092 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1093 bf->bf_state.bf_type = BUF_AMPDU;
1094 } else {
1095 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301096 }
1097
Felix Fietkau493cf042011-09-14 21:24:22 +02001098 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001099 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001100 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301101 status != ATH_AGGR_BAW_CLOSED);
1102}
1103
Felix Fietkau231c3a12010-09-20 19:35:28 +02001104int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1105 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301106{
1107 struct ath_atx_tid *txtid;
1108 struct ath_node *an;
1109
1110 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301111 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001112
1113 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1114 return -EAGAIN;
1115
Sujithf83da962009-07-23 15:32:37 +05301116 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001117 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001118 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001119
Felix Fietkau2ed72222011-01-10 17:05:49 -07001120 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1121 txtid->baw_head = txtid->baw_tail = 0;
1122
Felix Fietkau231c3a12010-09-20 19:35:28 +02001123 return 0;
Sujithe8324352009-01-16 21:38:42 +05301124}
1125
Sujithf83da962009-07-23 15:32:37 +05301126void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301127{
1128 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1129 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001130 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301131
1132 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301133 return;
Sujithe8324352009-01-16 21:38:42 +05301134
1135 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301136 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301137 return;
Sujithe8324352009-01-16 21:38:42 +05301138 }
1139
Sujithe8324352009-01-16 21:38:42 +05301140 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001141 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001142
1143 /*
1144 * If frames are still being transmitted for this TID, they will be
1145 * cleaned up during tx completion. To prevent race conditions, this
1146 * TID can only be reused after all in-progress subframes have been
1147 * completed.
1148 */
1149 if (txtid->baw_head != txtid->baw_tail)
1150 txtid->state |= AGGR_CLEANUP;
1151 else
1152 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301153 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301154
Felix Fietkau90fa5392010-09-20 13:45:38 +02001155 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301156}
1157
Felix Fietkau55195412011-04-17 23:28:09 +02001158bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1159{
1160 struct ath_atx_tid *tid;
1161 struct ath_atx_ac *ac;
1162 struct ath_txq *txq;
1163 bool buffered = false;
1164 int tidno;
1165
1166 for (tidno = 0, tid = &an->tid[tidno];
1167 tidno < WME_NUM_TID; tidno++, tid++) {
1168
1169 if (!tid->sched)
1170 continue;
1171
1172 ac = tid->ac;
1173 txq = ac->txq;
1174
1175 spin_lock_bh(&txq->axq_lock);
1176
Felix Fietkau56dc6332011-08-28 00:32:22 +02001177 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +02001178 buffered = true;
1179
1180 tid->sched = false;
1181 list_del(&tid->list);
1182
1183 if (ac->sched) {
1184 ac->sched = false;
1185 list_del(&ac->list);
1186 }
1187
1188 spin_unlock_bh(&txq->axq_lock);
1189 }
1190
1191 return buffered;
1192}
1193
1194void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1195{
1196 struct ath_atx_tid *tid;
1197 struct ath_atx_ac *ac;
1198 struct ath_txq *txq;
1199 int tidno;
1200
1201 for (tidno = 0, tid = &an->tid[tidno];
1202 tidno < WME_NUM_TID; tidno++, tid++) {
1203
1204 ac = tid->ac;
1205 txq = ac->txq;
1206
1207 spin_lock_bh(&txq->axq_lock);
1208 ac->clear_ps_filter = true;
1209
Felix Fietkau56dc6332011-08-28 00:32:22 +02001210 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001211 ath_tx_queue_tid(txq, tid);
1212 ath_txq_schedule(sc, txq);
1213 }
1214
1215 spin_unlock_bh(&txq->axq_lock);
1216 }
1217}
1218
Sujithe8324352009-01-16 21:38:42 +05301219void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1220{
1221 struct ath_atx_tid *txtid;
1222 struct ath_node *an;
1223
1224 an = (struct ath_node *)sta->drv_priv;
1225
1226 if (sc->sc_flags & SC_OP_TXAGGR) {
1227 txtid = ATH_AN_2_TID(an, tid);
1228 txtid->baw_size =
1229 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1230 txtid->state |= AGGR_ADDBA_COMPLETE;
1231 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1232 ath_tx_resume_tid(sc, txtid);
1233 }
1234}
1235
Sujithe8324352009-01-16 21:38:42 +05301236/********************/
1237/* Queue Management */
1238/********************/
1239
Sujithe8324352009-01-16 21:38:42 +05301240static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1241 struct ath_txq *txq)
1242{
1243 struct ath_atx_ac *ac, *ac_tmp;
1244 struct ath_atx_tid *tid, *tid_tmp;
1245
1246 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1247 list_del(&ac->list);
1248 ac->sched = false;
1249 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1250 list_del(&tid->list);
1251 tid->sched = false;
1252 ath_tid_drain(sc, txq, tid);
1253 }
1254 }
1255}
1256
1257struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1258{
Sujithcbe61d82009-02-09 13:27:12 +05301259 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001260 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301261 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001262 static const int subtype_txq_to_hwq[] = {
1263 [WME_AC_BE] = ATH_TXQ_AC_BE,
1264 [WME_AC_BK] = ATH_TXQ_AC_BK,
1265 [WME_AC_VI] = ATH_TXQ_AC_VI,
1266 [WME_AC_VO] = ATH_TXQ_AC_VO,
1267 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001268 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301269
1270 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001271 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301272 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1273 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1274 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1275 qi.tqi_physCompBuf = 0;
1276
1277 /*
1278 * Enable interrupts only for EOL and DESC conditions.
1279 * We mark tx descriptors to receive a DESC interrupt
1280 * when a tx queue gets deep; otherwise waiting for the
1281 * EOL to reap descriptors. Note that this is done to
1282 * reduce interrupt load and this only defers reaping
1283 * descriptors, never transmitting frames. Aside from
1284 * reducing interrupts this also permits more concurrency.
1285 * The only potential downside is if the tx queue backs
1286 * up in which case the top half of the kernel may backup
1287 * due to a lack of tx descriptors.
1288 *
1289 * The UAPSD queue is an exception, since we take a desc-
1290 * based intr on the EOSP frames.
1291 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001292 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1293 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1294 TXQ_FLAG_TXERRINT_ENABLE;
1295 } else {
1296 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1297 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1298 else
1299 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1300 TXQ_FLAG_TXDESCINT_ENABLE;
1301 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001302 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1303 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301304 /*
1305 * NB: don't print a message, this happens
1306 * normally on parts with too few tx queues
1307 */
1308 return NULL;
1309 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001310 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001311 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001312 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1313 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301314 return NULL;
1315 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001316 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1317 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301318
Ben Greear60f2d1d2011-01-09 23:11:52 -08001319 txq->axq_qnum = axq_qnum;
1320 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301321 txq->axq_link = NULL;
1322 INIT_LIST_HEAD(&txq->axq_q);
1323 INIT_LIST_HEAD(&txq->axq_acq);
1324 spin_lock_init(&txq->axq_lock);
1325 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001326 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001327 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001328 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001329
1330 txq->txq_headidx = txq->txq_tailidx = 0;
1331 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1332 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301333 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001334 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301335}
1336
Sujithe8324352009-01-16 21:38:42 +05301337int ath_txq_update(struct ath_softc *sc, int qnum,
1338 struct ath9k_tx_queue_info *qinfo)
1339{
Sujithcbe61d82009-02-09 13:27:12 +05301340 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301341 int error = 0;
1342 struct ath9k_tx_queue_info qi;
1343
1344 if (qnum == sc->beacon.beaconq) {
1345 /*
1346 * XXX: for beacon queue, we just save the parameter.
1347 * It will be picked up by ath_beaconq_config when
1348 * it's necessary.
1349 */
1350 sc->beacon.beacon_qi = *qinfo;
1351 return 0;
1352 }
1353
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001354 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301355
1356 ath9k_hw_get_txq_props(ah, qnum, &qi);
1357 qi.tqi_aifs = qinfo->tqi_aifs;
1358 qi.tqi_cwmin = qinfo->tqi_cwmin;
1359 qi.tqi_cwmax = qinfo->tqi_cwmax;
1360 qi.tqi_burstTime = qinfo->tqi_burstTime;
1361 qi.tqi_readyTime = qinfo->tqi_readyTime;
1362
1363 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001364 ath_err(ath9k_hw_common(sc->sc_ah),
1365 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301366 error = -EIO;
1367 } else {
1368 ath9k_hw_resettxqueue(ah, qnum);
1369 }
1370
1371 return error;
1372}
1373
1374int ath_cabq_update(struct ath_softc *sc)
1375{
1376 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001377 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301378 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301379
1380 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1381 /*
1382 * Ensure the readytime % is within the bounds.
1383 */
Sujith17d79042009-02-09 13:27:03 +05301384 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1385 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1386 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1387 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301388
Steve Brown9814f6b2011-02-07 17:10:39 -07001389 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301390 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301391 ath_txq_update(sc, qnum, &qi);
1392
1393 return 0;
1394}
1395
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001396static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1397{
1398 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1399 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1400}
1401
Felix Fietkaufce041b2011-05-19 12:20:25 +02001402static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1403 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301404 __releases(txq->axq_lock)
1405 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301406{
1407 struct ath_buf *bf, *lastbf;
1408 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001409 struct ath_tx_status ts;
1410
1411 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301412 INIT_LIST_HEAD(&bf_head);
1413
Felix Fietkaufce041b2011-05-19 12:20:25 +02001414 while (!list_empty(list)) {
1415 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301416
Felix Fietkaufce041b2011-05-19 12:20:25 +02001417 if (bf->bf_stale) {
1418 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301419
Felix Fietkaufce041b2011-05-19 12:20:25 +02001420 ath_tx_return_buffer(sc, bf);
1421 continue;
Sujithe8324352009-01-16 21:38:42 +05301422 }
1423
1424 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001425 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001426
Sujithe8324352009-01-16 21:38:42 +05301427 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001428 if (bf_is_ampdu_not_probing(bf))
1429 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301430
Felix Fietkaufce041b2011-05-19 12:20:25 +02001431 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301432 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001433 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1434 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301435 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001436 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001437 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001438 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001439}
1440
1441/*
1442 * Drain a given TX queue (could be Beacon or Data)
1443 *
1444 * This assumes output has been stopped and
1445 * we do not need to block ath_tx_tasklet.
1446 */
1447void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1448{
1449 spin_lock_bh(&txq->axq_lock);
1450 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1451 int idx = txq->txq_tailidx;
1452
1453 while (!list_empty(&txq->txq_fifo[idx])) {
1454 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1455 retry_tx);
1456
1457 INCR(idx, ATH_TXFIFO_DEPTH);
1458 }
1459 txq->txq_tailidx = idx;
1460 }
1461
1462 txq->axq_link = NULL;
1463 txq->axq_tx_inprogress = false;
1464 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001465
1466 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001467 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1468 ath_txq_drain_pending_buffers(sc, txq);
1469
1470 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301471}
1472
Felix Fietkau080e1a22010-12-05 20:17:53 +01001473bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301474{
Sujithcbe61d82009-02-09 13:27:12 +05301475 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001476 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301477 struct ath_txq *txq;
1478 int i, npend = 0;
1479
1480 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001481 return true;
Sujith043a0402009-01-16 21:38:47 +05301482
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001483 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301484
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001485 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301486 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001487 if (!ATH_TXQ_SETUP(sc, i))
1488 continue;
1489
1490 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301491 }
1492
Felix Fietkau080e1a22010-12-05 20:17:53 +01001493 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001494 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301495
1496 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001497 if (!ATH_TXQ_SETUP(sc, i))
1498 continue;
1499
1500 /*
1501 * The caller will resume queues with ieee80211_wake_queues.
1502 * Mark the queue as not stopped to prevent ath_tx_complete
1503 * from waking the queue too early.
1504 */
1505 txq = &sc->tx.txq[i];
1506 txq->stopped = false;
1507 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301508 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001509
1510 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301511}
1512
Sujithe8324352009-01-16 21:38:42 +05301513void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1514{
1515 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1516 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1517}
1518
Ben Greear7755bad2011-01-18 17:30:00 -08001519/* For each axq_acq entry, for each tid, try to schedule packets
1520 * for transmit until ampdu_depth has reached min Q depth.
1521 */
Sujithe8324352009-01-16 21:38:42 +05301522void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1523{
Ben Greear7755bad2011-01-18 17:30:00 -08001524 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1525 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301526
Felix Fietkau236de512011-09-03 01:40:25 +02001527 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001528 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301529 return;
1530
1531 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001532 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301533
Ben Greear7755bad2011-01-18 17:30:00 -08001534 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1535 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1536 list_del(&ac->list);
1537 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301538
Ben Greear7755bad2011-01-18 17:30:00 -08001539 while (!list_empty(&ac->tid_q)) {
1540 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1541 list);
1542 list_del(&tid->list);
1543 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301544
Ben Greear7755bad2011-01-18 17:30:00 -08001545 if (tid->paused)
1546 continue;
Sujithe8324352009-01-16 21:38:42 +05301547
Ben Greear7755bad2011-01-18 17:30:00 -08001548 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301549
Ben Greear7755bad2011-01-18 17:30:00 -08001550 /*
1551 * add tid to round-robin queue if more frames
1552 * are pending for the tid
1553 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001554 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001555 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301556
Ben Greear7755bad2011-01-18 17:30:00 -08001557 if (tid == last_tid ||
1558 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1559 break;
Sujithe8324352009-01-16 21:38:42 +05301560 }
Ben Greear7755bad2011-01-18 17:30:00 -08001561
1562 if (!list_empty(&ac->tid_q)) {
1563 if (!ac->sched) {
1564 ac->sched = true;
1565 list_add_tail(&ac->list, &txq->axq_acq);
1566 }
1567 }
1568
1569 if (ac == last_ac ||
1570 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1571 return;
Sujithe8324352009-01-16 21:38:42 +05301572 }
1573}
1574
Sujithe8324352009-01-16 21:38:42 +05301575/***********/
1576/* TX, DMA */
1577/***********/
1578
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001579/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580 * Insert a chain of ath_buf (descriptors) on a txq and
1581 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001582 */
Sujith102e0572008-10-29 10:15:16 +05301583static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001584 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001585{
Sujithcbe61d82009-02-09 13:27:12 +05301586 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001587 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001588 struct ath_buf *bf, *bf_last;
1589 bool puttxbuf = false;
1590 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301591
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001592 /*
1593 * Insert the frame on the outbound list and
1594 * pass it on to the hardware.
1595 */
1596
1597 if (list_empty(head))
1598 return;
1599
Felix Fietkaufce041b2011-05-19 12:20:25 +02001600 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001601 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001602 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001603
Joe Perches226afe62010-12-02 19:12:37 -08001604 ath_dbg(common, ATH_DBG_QUEUE,
1605 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001606
Felix Fietkaufce041b2011-05-19 12:20:25 +02001607 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1608 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001609 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001610 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001611 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001612 list_splice_tail_init(head, &txq->axq_q);
1613
Felix Fietkaufce041b2011-05-19 12:20:25 +02001614 if (txq->axq_link) {
1615 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001616 ath_dbg(common, ATH_DBG_XMIT,
1617 "link[%u] (%p)=%llx (%p)\n",
1618 txq->axq_qnum, txq->axq_link,
1619 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001620 } else if (!edma)
1621 puttxbuf = true;
1622
1623 txq->axq_link = bf_last->bf_desc;
1624 }
1625
1626 if (puttxbuf) {
1627 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1628 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1629 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1630 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1631 }
1632
1633 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001634 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001635 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001636 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001637
1638 if (!internal) {
1639 txq->axq_depth++;
1640 if (bf_is_ampdu_not_probing(bf))
1641 txq->axq_ampdu_depth++;
1642 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001643}
1644
Sujithe8324352009-01-16 21:38:42 +05301645static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001646 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301647{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001648 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001649 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001650 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301651
1652 /*
1653 * Do not queue to h/w when any of the following conditions is true:
1654 * - there are pending frames in software queue
1655 * - the TID is currently paused for ADDBA/BAR request
1656 * - seqno is not within block-ack window
1657 * - h/w queue depth exceeds low water mark
1658 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001659 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001660 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001661 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001662 /*
Sujithe8324352009-01-16 21:38:42 +05301663 * Add this frame to software queue for scheduling later
1664 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001665 */
Ben Greearbda8add2011-01-09 23:11:48 -08001666 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001667 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001668 if (!txctl->an || !txctl->an->sleeping)
1669 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301670 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001671 }
1672
Felix Fietkau44f1d262011-08-28 00:32:25 +02001673 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1674 if (!bf)
1675 return;
1676
Felix Fietkau399c6482011-09-14 21:24:17 +02001677 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001678 INIT_LIST_HEAD(&bf_head);
1679 list_add(&bf->list, &bf_head);
1680
Sujithe8324352009-01-16 21:38:42 +05301681 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001682 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301683
1684 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001685 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301686 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001687 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001688 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301689}
1690
Felix Fietkau82b873a2010-11-11 03:18:37 +01001691static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001692 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001693{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001694 struct ath_frame_info *fi = get_frame_info(skb);
1695 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301696 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001697
Felix Fietkau44f1d262011-08-28 00:32:25 +02001698 bf = fi->bf;
1699 if (!bf)
1700 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1701
1702 if (!bf)
1703 return;
1704
1705 INIT_LIST_HEAD(&bf_head);
1706 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001707 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301708
1709 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001710 if (tid)
1711 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301712
Sujithd43f30152009-01-16 21:38:53 +05301713 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001714 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001715 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301716 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001717}
1718
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001719static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1720 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301721{
1722 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001723 struct ieee80211_sta *sta = tx_info->control.sta;
1724 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001725 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001726 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001727 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001728 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301729
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001730 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301731
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001732 if (sta)
1733 an = (struct ath_node *) sta->drv_priv;
1734
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001735 memset(fi, 0, sizeof(*fi));
1736 if (hw_key)
1737 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001738 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1739 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001740 else
1741 fi->keyix = ATH9K_TXKEYIX_INVALID;
1742 fi->keytype = keytype;
1743 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301744}
1745
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301746u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1747{
1748 struct ath_hw *ah = sc->sc_ah;
1749 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301750 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1751 (curchan->channelFlags & CHANNEL_5GHZ) &&
1752 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301753 return 0x3;
1754 else
1755 return chainmask;
1756}
1757
Felix Fietkau44f1d262011-08-28 00:32:25 +02001758/*
1759 * Assign a descriptor (and sequence number if necessary,
1760 * and map buffer for DMA. Frees skb on error
1761 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001762static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001763 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001764 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001765 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301766{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001767 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001768 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001770 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001771 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001772
1773 bf = ath_tx_get_buffer(sc);
1774 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001775 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001776 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001777 }
Sujithe8324352009-01-16 21:38:42 +05301778
Sujithe8324352009-01-16 21:38:42 +05301779 ATH_TXBUF_RESET(bf);
1780
Felix Fietkaufa05f872011-08-28 00:32:24 +02001781 if (tid) {
1782 seqno = tid->seq_next;
1783 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1784 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1785 bf->bf_state.seqno = seqno;
1786 }
1787
Sujithe8324352009-01-16 21:38:42 +05301788 bf->bf_mpdu = skb;
1789
Ben Greearc1739eb32010-10-14 12:45:29 -07001790 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1791 skb->len, DMA_TO_DEVICE);
1792 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301793 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001794 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001795 ath_err(ath9k_hw_common(sc->sc_ah),
1796 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001797 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001798 goto error;
Sujithe8324352009-01-16 21:38:42 +05301799 }
1800
Felix Fietkau56dc6332011-08-28 00:32:22 +02001801 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001802
1803 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001804
1805error:
1806 dev_kfree_skb_any(skb);
1807 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001808}
1809
1810/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001811static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001812 struct ath_tx_control *txctl)
1813{
Felix Fietkau04caf862010-11-14 15:20:12 +01001814 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1815 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001816 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001817 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001818 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301819
Sujithe8324352009-01-16 21:38:42 +05301820 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301821 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1822 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001823 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1824 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001825 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001826
Felix Fietkau066dae92010-11-07 14:59:39 +01001827 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001828 }
1829
1830 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001831 /*
1832 * Try aggregation if it's a unicast data frame
1833 * and the destination is HT capable.
1834 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001835 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301836 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001837 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1838 if (!bf)
1839 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001840
Felix Fietkau82b873a2010-11-11 03:18:37 +01001841 bf->bf_state.bfs_paprd = txctl->paprd;
1842
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301843 if (txctl->paprd)
1844 bf->bf_state.bfs_paprd_timestamp = jiffies;
1845
Felix Fietkau44f1d262011-08-28 00:32:25 +02001846 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301847 }
1848
Felix Fietkaufa05f872011-08-28 00:32:24 +02001849out:
Sujithe8324352009-01-16 21:38:42 +05301850 spin_unlock_bh(&txctl->txq->axq_lock);
1851}
1852
1853/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001854int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301855 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001856{
Felix Fietkau28d16702010-11-14 15:20:10 +01001857 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1858 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001859 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001860 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001861 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001862 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001863 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001864 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001865 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001866
Ben Greeara9927ba2010-12-06 21:13:49 -08001867 /* NOTE: sta can be NULL according to net/mac80211.h */
1868 if (sta)
1869 txctl->an = (struct ath_node *)sta->drv_priv;
1870
Felix Fietkau04caf862010-11-14 15:20:12 +01001871 if (info->control.hw_key)
1872 frmlen += info->control.hw_key->icv_len;
1873
Felix Fietkau28d16702010-11-14 15:20:10 +01001874 /*
1875 * As a temporary workaround, assign seq# here; this will likely need
1876 * to be cleaned up to work better with Beacon transmission and virtual
1877 * BSSes.
1878 */
1879 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1880 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1881 sc->tx.seq_no += 0x10;
1882 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1883 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1884 }
1885
1886 /* Add the padding after the header if this is not already done */
1887 padpos = ath9k_cmn_padpos(hdr->frame_control);
1888 padsize = padpos & 3;
1889 if (padsize && skb->len > padpos) {
1890 if (skb_headroom(skb) < padsize)
1891 return -ENOMEM;
1892
1893 skb_push(skb, padsize);
1894 memmove(skb->data, skb->data + padsize, padpos);
1895 }
1896
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001897 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1898 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1899 !ieee80211_is_data(hdr->frame_control))
1900 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1901
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001902 setup_frame_info(hw, skb, frmlen);
1903
1904 /*
1905 * At this point, the vif, hw_key and sta pointers in the tx control
1906 * info are no longer valid (overwritten by the ath_frame_info data.
1907 */
1908
Felix Fietkau066dae92010-11-07 14:59:39 +01001909 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001910 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001911 if (txq == sc->tx.txq_map[q] &&
1912 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001913 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001914 txq->stopped = 1;
1915 }
1916 spin_unlock_bh(&txq->axq_lock);
1917
Felix Fietkau44f1d262011-08-28 00:32:25 +02001918 ath_tx_start_dma(sc, skb, txctl);
1919 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001920}
1921
Sujithe8324352009-01-16 21:38:42 +05301922/*****************/
1923/* TX Completion */
1924/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001925
Sujithe8324352009-01-16 21:38:42 +05301926static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301927 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001928{
Sujithe8324352009-01-16 21:38:42 +05301929 struct ieee80211_hw *hw = sc->hw;
1930 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001931 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001932 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001933 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301934
Joe Perches226afe62010-12-02 19:12:37 -08001935 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301936
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301937 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301938 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301939
Felix Fietkau55797b12011-09-14 21:24:16 +02001940 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301941 /* Frame was ACKed */
1942 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301943
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001944 padpos = ath9k_cmn_padpos(hdr->frame_control);
1945 padsize = padpos & 3;
1946 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301947 /*
1948 * Remove MAC header padding before giving the frame back to
1949 * mac80211.
1950 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001951 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301952 skb_pull(skb, padsize);
1953 }
1954
Sujith1b04b932010-01-08 10:36:05 +05301955 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1956 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001957 ath_dbg(common, ATH_DBG_PS,
1958 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301959 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1960 PS_WAIT_FOR_CAB |
1961 PS_WAIT_FOR_PSPOLL_DATA |
1962 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001963 }
1964
Felix Fietkau7545daf2011-01-24 19:23:16 +01001965 q = skb_get_queue_mapping(skb);
1966 if (txq == sc->tx.txq_map[q]) {
1967 spin_lock_bh(&txq->axq_lock);
1968 if (WARN_ON(--txq->pending_frames < 0))
1969 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001970
Felix Fietkau7545daf2011-01-24 19:23:16 +01001971 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1972 ieee80211_wake_queue(sc->hw, q);
1973 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001974 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001975 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001976 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001977
1978 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301979}
1980
1981static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001982 struct ath_txq *txq, struct list_head *bf_q,
1983 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301984{
1985 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301986 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301987 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301988
Sujithe8324352009-01-16 21:38:42 +05301989 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301990 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301991
Felix Fietkau55797b12011-09-14 21:24:16 +02001992 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301993 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301994
Ben Greearc1739eb32010-10-14 12:45:29 -07001995 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001996 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001997
1998 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301999 if (time_after(jiffies,
2000 bf->bf_state.bfs_paprd_timestamp +
2001 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002002 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002003 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002004 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002005 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002006 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302007 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002008 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002009 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2010 * accidentally reference it later.
2011 */
2012 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302013
2014 /*
2015 * Return the list of ath_buf of this mpdu to free queue
2016 */
2017 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2018 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2019 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2020}
2021
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002022static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2023 struct ath_tx_status *ts, int nframes, int nbad,
2024 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302025{
Sujitha22be222009-03-30 15:28:36 +05302026 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302027 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302028 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002029 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002030 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302031 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302032
Sujith95e4acb2009-03-13 08:56:09 +05302033 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002034 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302035
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002036 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302037 WARN_ON(tx_rateindex >= hw->max_rates);
2038
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002039 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302040 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002041 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002042 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302043
Felix Fietkaub572d032010-11-14 15:20:07 +01002044 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002045
Felix Fietkaub572d032010-11-14 15:20:07 +01002046 tx_info->status.ampdu_len = nframes;
2047 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002048 }
2049
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002050 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau493cf042011-09-14 21:24:22 +02002051 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002052 /*
2053 * If an underrun error is seen assume it as an excessive
2054 * retry only if max frame trigger level has been reached
2055 * (2 KB for single stream, and 4 KB for dual stream).
2056 * Adjust the long retry as if the frame was tried
2057 * hw->max_rate_tries times to affect how rate control updates
2058 * PER for the failed rate.
2059 * In case of congestion on the bus penalizing this type of
2060 * underruns should help hardware actually transmit new frames
2061 * successfully by eventually preferring slower rates.
2062 * This itself should also alleviate congestion on the bus.
2063 */
2064 if (ieee80211_is_data(hdr->frame_control) &&
2065 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2066 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002067 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002068 tx_info->status.rates[tx_rateindex].count =
2069 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302070 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302071
Felix Fietkau545750d2009-11-23 22:21:01 +01002072 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302073 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002074 tx_info->status.rates[i].idx = -1;
2075 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302076
Felix Fietkau78c46532010-06-25 01:26:16 +02002077 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302078}
2079
Felix Fietkaufce041b2011-05-19 12:20:25 +02002080static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2081 struct ath_tx_status *ts, struct ath_buf *bf,
2082 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302083 __releases(txq->axq_lock)
2084 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002085{
2086 int txok;
2087
2088 txq->axq_depth--;
2089 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2090 txq->axq_tx_inprogress = false;
2091 if (bf_is_ampdu_not_probing(bf))
2092 txq->axq_ampdu_depth--;
2093
2094 spin_unlock_bh(&txq->axq_lock);
2095
2096 if (!bf_isampdu(bf)) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002097 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2098 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2099 } else
2100 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2101
2102 spin_lock_bh(&txq->axq_lock);
2103
2104 if (sc->sc_flags & SC_OP_TXAGGR)
2105 ath_txq_schedule(sc, txq);
2106}
2107
Sujithc4288392008-11-18 09:09:30 +05302108static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109{
Sujithcbe61d82009-02-09 13:27:12 +05302110 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002111 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2113 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302114 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002115 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 int status;
2117
Joe Perches226afe62010-12-02 19:12:37 -08002118 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2119 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2120 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121
Felix Fietkaufce041b2011-05-19 12:20:25 +02002122 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002124 if (work_pending(&sc->hw_reset_work))
2125 break;
2126
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 if (list_empty(&txq->axq_q)) {
2128 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002129 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002130 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 break;
2132 }
2133 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2134
2135 /*
2136 * There is a race condition that a BH gets scheduled
2137 * after sw writes TxE and before hw re-load the last
2138 * descriptor to get the newly chained one.
2139 * Software must keep the last DONE descriptor as a
2140 * holding descriptor - software does so by marking
2141 * it with the STALE flag.
2142 */
2143 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302144 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002145 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002146 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002148
2149 bf = list_entry(bf_held->list.next, struct ath_buf,
2150 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151 }
2152
2153 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302154 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155
Felix Fietkau29bffa92010-03-29 20:14:23 -07002156 memset(&ts, 0, sizeof(ts));
2157 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002158 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002160
Ben Greear2dac4fb2011-01-09 23:11:45 -08002161 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162
2163 /*
2164 * Remove ath_buf's of the same transmit unit from txq,
2165 * however leave the last descriptor back as the holding
2166 * descriptor for hw.
2167 */
Sujitha119cc42009-03-30 15:28:38 +05302168 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 if (!list_is_singular(&lastbf->list))
2171 list_cut_position(&bf_head,
2172 &txq->axq_q, lastbf->list.prev);
2173
Felix Fietkaufce041b2011-05-19 12:20:25 +02002174 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002175 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002176 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 }
Johannes Berge6a98542008-10-21 12:40:02 +02002178
Felix Fietkaufce041b2011-05-19 12:20:25 +02002179 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002181 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182}
2183
Sujith305fe472009-07-23 15:32:29 +05302184static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002185{
2186 struct ath_softc *sc = container_of(work, struct ath_softc,
2187 tx_complete_work.work);
2188 struct ath_txq *txq;
2189 int i;
2190 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002191#ifdef CONFIG_ATH9K_DEBUGFS
2192 sc->tx_complete_poll_work_seen++;
2193#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002194
2195 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2196 if (ATH_TXQ_SETUP(sc, i)) {
2197 txq = &sc->tx.txq[i];
2198 spin_lock_bh(&txq->axq_lock);
2199 if (txq->axq_depth) {
2200 if (txq->axq_tx_inprogress) {
2201 needreset = true;
2202 spin_unlock_bh(&txq->axq_lock);
2203 break;
2204 } else {
2205 txq->axq_tx_inprogress = true;
2206 }
2207 }
2208 spin_unlock_bh(&txq->axq_lock);
2209 }
2210
2211 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002212 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2213 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002214 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002215 }
2216
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002217 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002218 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2219}
2220
2221
Sujithe8324352009-01-16 21:38:42 +05302222
2223void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002224{
Sujithe8324352009-01-16 21:38:42 +05302225 int i;
2226 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002227
Sujithe8324352009-01-16 21:38:42 +05302228 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002229
2230 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302231 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2232 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002233 }
2234}
2235
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002236void ath_tx_edma_tasklet(struct ath_softc *sc)
2237{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002238 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002239 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2240 struct ath_hw *ah = sc->sc_ah;
2241 struct ath_txq *txq;
2242 struct ath_buf *bf, *lastbf;
2243 struct list_head bf_head;
2244 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002245
2246 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002247 if (work_pending(&sc->hw_reset_work))
2248 break;
2249
Felix Fietkaufce041b2011-05-19 12:20:25 +02002250 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 if (status == -EINPROGRESS)
2252 break;
2253 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002254 ath_dbg(common, ATH_DBG_XMIT,
2255 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256 break;
2257 }
2258
2259 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002260 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261 continue;
2262
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264
2265 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002266
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002267 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2268 spin_unlock_bh(&txq->axq_lock);
2269 return;
2270 }
2271
2272 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2273 struct ath_buf, list);
2274 lastbf = bf->bf_lastbf;
2275
2276 INIT_LIST_HEAD(&bf_head);
2277 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2278 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2281 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282
Felix Fietkaufce041b2011-05-19 12:20:25 +02002283 if (!list_empty(&txq->axq_q)) {
2284 struct list_head bf_q;
2285
2286 INIT_LIST_HEAD(&bf_q);
2287 txq->axq_link = NULL;
2288 list_splice_tail_init(&txq->axq_q, &bf_q);
2289 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2290 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002291 }
2292
Felix Fietkaufce041b2011-05-19 12:20:25 +02002293 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002294 spin_unlock_bh(&txq->axq_lock);
2295 }
2296}
2297
Sujithe8324352009-01-16 21:38:42 +05302298/*****************/
2299/* Init, Cleanup */
2300/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002301
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002302static int ath_txstatus_setup(struct ath_softc *sc, int size)
2303{
2304 struct ath_descdma *dd = &sc->txsdma;
2305 u8 txs_len = sc->sc_ah->caps.txs_len;
2306
2307 dd->dd_desc_len = size * txs_len;
2308 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2309 &dd->dd_desc_paddr, GFP_KERNEL);
2310 if (!dd->dd_desc)
2311 return -ENOMEM;
2312
2313 return 0;
2314}
2315
2316static int ath_tx_edma_init(struct ath_softc *sc)
2317{
2318 int err;
2319
2320 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2321 if (!err)
2322 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2323 sc->txsdma.dd_desc_paddr,
2324 ATH_TXSTATUS_RING_SIZE);
2325
2326 return err;
2327}
2328
2329static void ath_tx_edma_cleanup(struct ath_softc *sc)
2330{
2331 struct ath_descdma *dd = &sc->txsdma;
2332
2333 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2334 dd->dd_desc_paddr);
2335}
2336
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337int ath_tx_init(struct ath_softc *sc, int nbufs)
2338{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002339 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002340 int error = 0;
2341
Sujith797fe5cb2009-03-30 15:28:45 +05302342 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343
Sujith797fe5cb2009-03-30 15:28:45 +05302344 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002345 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302346 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002347 ath_err(common,
2348 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302349 goto err;
2350 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002351
Sujith797fe5cb2009-03-30 15:28:45 +05302352 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002353 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002355 ath_err(common,
2356 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302357 goto err;
2358 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002360 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2361
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002362 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2363 error = ath_tx_edma_init(sc);
2364 if (error)
2365 goto err;
2366 }
2367
Sujith797fe5cb2009-03-30 15:28:45 +05302368err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002369 if (error != 0)
2370 ath_tx_cleanup(sc);
2371
2372 return error;
2373}
2374
Sujith797fe5cb2009-03-30 15:28:45 +05302375void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376{
Sujithb77f4832008-12-07 21:44:03 +05302377 if (sc->beacon.bdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Sujithb77f4832008-12-07 21:44:03 +05302380 if (sc->tx.txdma.dd_desc_len != 0)
2381 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002382
2383 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2384 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385}
2386
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2388{
Sujithc5170162008-10-29 10:13:59 +05302389 struct ath_atx_tid *tid;
2390 struct ath_atx_ac *ac;
2391 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Sujith8ee5afb2008-12-07 21:43:36 +05302393 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302394 tidno < WME_NUM_TID;
2395 tidno++, tid++) {
2396 tid->an = an;
2397 tid->tidno = tidno;
2398 tid->seq_start = tid->seq_next = 0;
2399 tid->baw_size = WME_MAX_BA;
2400 tid->baw_head = tid->baw_tail = 0;
2401 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302402 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302403 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002404 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302405 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302406 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302407 tid->state &= ~AGGR_ADDBA_COMPLETE;
2408 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302409 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410
Sujith8ee5afb2008-12-07 21:43:36 +05302411 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302412 acno < WME_NUM_AC; acno++, ac++) {
2413 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002414 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302415 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002416 }
2417}
2418
Sujithb5aa9bf2008-10-29 10:13:31 +05302419void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420{
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 struct ath_atx_ac *ac;
2422 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002424 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302425
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 for (tidno = 0, tid = &an->tid[tidno];
2427 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428
Felix Fietkau2b409942010-07-07 19:42:08 +02002429 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002430 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431
Felix Fietkau2b409942010-07-07 19:42:08 +02002432 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433
Felix Fietkau2b409942010-07-07 19:42:08 +02002434 if (tid->sched) {
2435 list_del(&tid->list);
2436 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002437 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002438
2439 if (ac->sched) {
2440 list_del(&ac->list);
2441 tid->ac->sched = false;
2442 }
2443
2444 ath_tid_drain(sc, txq, tid);
2445 tid->state &= ~AGGR_ADDBA_COMPLETE;
2446 tid->state &= ~AGGR_CLEANUP;
2447
2448 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002449 }
2450}