blob: 7f8191eddebe91e83d14ba6ffe15f1f235cd627a [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530265 struct ieee80211_hdr *hdr;
266
Sujithfec247c2009-07-27 12:08:16 +0530267 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100268 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100269 return;
Sujithe8324352009-01-16 21:38:42 +0530270
Sujithe8324352009-01-16 21:38:42 +0530271 hdr = (struct ieee80211_hdr *)skb->data;
272 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
273}
274
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
276{
277 struct ath_buf *bf = NULL;
278
279 spin_lock_bh(&sc->tx.txbuflock);
280
281 if (unlikely(list_empty(&sc->tx.txbuf))) {
282 spin_unlock_bh(&sc->tx.txbuflock);
283 return NULL;
284 }
285
286 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
289 spin_unlock_bh(&sc->tx.txbuflock);
290
291 return bf;
292}
293
294static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
295{
296 spin_lock_bh(&sc->tx.txbuflock);
297 list_add_tail(&bf->list, &sc->tx.txbuf);
298 spin_unlock_bh(&sc->tx.txbuflock);
299}
300
Sujithd43f30152009-01-16 21:38:53 +0530301static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
302{
303 struct ath_buf *tbf;
304
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200305 tbf = ath_tx_get_buffer(sc);
306 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530307 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530308
309 ATH_TXBUF_RESET(tbf);
310
311 tbf->bf_mpdu = bf->bf_mpdu;
312 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400313 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530314 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530315
316 return tbf;
317}
318
Felix Fietkaub572d032010-11-14 15:20:07 +0100319static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
320 struct ath_tx_status *ts, int txok,
321 int *nframes, int *nbad)
322{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100323 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100324 u16 seq_st = 0;
325 u32 ba[WME_BA_BMP_SIZE >> 5];
326 int ba_index;
327 int isaggr = 0;
328
329 *nbad = 0;
330 *nframes = 0;
331
Felix Fietkaub572d032010-11-14 15:20:07 +0100332 isaggr = bf_isaggr(bf);
333 if (isaggr) {
334 seq_st = ts->ts_seqnum;
335 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
336 }
337
338 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100339 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200340 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100341
342 (*nframes)++;
343 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
344 (*nbad)++;
345
346 bf = bf->bf_next;
347 }
348}
349
350
Sujithd43f30152009-01-16 21:38:53 +0530351static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100353 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530354{
355 struct ath_node *an = NULL;
356 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530357 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100358 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530359 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800360 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530361 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530362 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200363 struct list_head bf_head;
364 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530365 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530366 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100370 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200371 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100372 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200373 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530374
Sujitha22be222009-03-30 15:28:36 +0530375 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530376 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530377
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800378 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379
Felix Fietkau78c46532010-06-25 01:26:16 +0200380 memcpy(rates, tx_info->control.rates, sizeof(rates));
381
Sujith1286ec62009-01-27 13:30:37 +0530382 rcu_read_lock();
383
Ben Greear686b9cb2010-09-23 09:44:36 -0700384 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530385 if (!sta) {
386 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200387
Felix Fietkau31e79a52010-07-12 23:16:34 +0200388 INIT_LIST_HEAD(&bf_head);
389 while (bf) {
390 bf_next = bf->bf_next;
391
Felix Fietkaufce041b2011-05-19 12:20:25 +0200392 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200393 list_move_tail(&bf->list, &bf_head);
394
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100395 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
397 0, 0);
398
399 bf = bf_next;
400 }
Sujith1286ec62009-01-27 13:30:37 +0530401 return;
Sujithe8324352009-01-16 21:38:42 +0530402 }
403
Sujith1286ec62009-01-27 13:30:37 +0530404 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100405 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
406 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530407
Felix Fietkaub11b1602010-07-11 12:48:44 +0200408 /*
409 * The hardware occasionally sends a tx status for the wrong TID.
410 * In this case, the BA status cannot be considered valid and all
411 * subframes need to be retransmitted
412 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100413 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200414 txok = false;
415
Sujithe8324352009-01-16 21:38:42 +0530416 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530417 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530418
Sujithd43f30152009-01-16 21:38:53 +0530419 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700420 if (ts->ts_flags & ATH9K_TX_BA) {
421 seq_st = ts->ts_seqnum;
422 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530423 } else {
Sujithd43f30152009-01-16 21:38:53 +0530424 /*
425 * AR5416 can become deaf/mute when BA
426 * issue happens. Chip needs to be reset.
427 * But AP code may have sychronization issues
428 * when perform internal reset in this routine.
429 * Only enable reset in STA mode for now.
430 */
Sujith2660b812009-02-09 13:27:26 +0530431 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530432 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530433 }
434 }
435
Felix Fietkau56dc6332011-08-28 00:32:22 +0200436 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530437
Felix Fietkaub572d032010-11-14 15:20:07 +0100438 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530439 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200440 u16 seqno = bf->bf_state.seqno;
441
Felix Fietkauf0b82202011-01-15 14:30:15 +0100442 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530443 bf_next = bf->bf_next;
444
Felix Fietkau78c46532010-06-25 01:26:16 +0200445 skb = bf->bf_mpdu;
446 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100447 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200448
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200449 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530450 /* transmit completion, subframe is
451 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 } else if (!isaggr && txok) {
454 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200457 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200463 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
464 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
465 !an->sleeping)
466 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
467
468 clear_filter = true;
469 txpending = 1;
470 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200471 txfail = 1;
472 sendbar = 1;
473 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530474 }
475 }
476
Felix Fietkaufce041b2011-05-19 12:20:25 +0200477 /*
478 * Make sure the last desc is reclaimed if it
479 * not a holding desc.
480 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200481 INIT_LIST_HEAD(&bf_head);
482 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
483 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530484 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530485
Felix Fietkau90fa5392010-09-20 13:45:38 +0200486 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530487 /*
488 * complete the acked-ones/xretried ones; update
489 * block-ack window
490 */
491 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200492 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530493 spin_unlock_bh(&txq->axq_lock);
494
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530495 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200496 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100497 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 rc_update = false;
499 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100500 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 }
502
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530505 } else {
Sujithd43f30152009-01-16 21:38:53 +0530506 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
508 if (bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530510
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 tbf = ath_clone_txbuf(sc, bf_last);
512 /*
513 * Update tx baw and complete the
514 * frame with failed status if we
515 * run out of tx buf.
516 */
517 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200519 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400520 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400521
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100522 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100523 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400524 ath_tx_complete_buf(sc, bf, txq,
525 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200526 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400527 break;
528 }
529
Felix Fietkau56dc6332011-08-28 00:32:22 +0200530 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400531 }
Sujithe8324352009-01-16 21:38:42 +0530532 }
533
534 /*
535 * Put this buffer to the temporary pending
536 * queue to retain ordering
537 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200538 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530539 }
540
541 bf = bf_next;
542 }
543
Felix Fietkau4cee7862010-07-23 03:53:16 +0200544 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200545 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200546 if (an->sleeping)
547 ieee80211_sta_set_tim(sta);
548
Felix Fietkau4cee7862010-07-23 03:53:16 +0200549 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200550 if (clear_filter)
551 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200552 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600553 if (!an->sleeping)
554 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530569 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200570 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530573static bool ath_lookup_legacy(struct ath_buf *bf)
574{
575 struct sk_buff *skb;
576 struct ieee80211_tx_info *tx_info;
577 struct ieee80211_tx_rate *rates;
578 int i;
579
580 skb = bf->bf_mpdu;
581 tx_info = IEEE80211_SKB_CB(skb);
582 rates = tx_info->control.rates;
583
Felix Fietkau059ee092011-08-27 10:25:27 +0200584 for (i = 0; i < 4; i++) {
585 if (!rates[i].count || rates[i].idx < 0)
586 break;
587
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530588 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
589 return true;
590 }
591
592 return false;
593}
594
Sujithe8324352009-01-16 21:38:42 +0530595static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
596 struct ath_atx_tid *tid)
597{
Sujithe8324352009-01-16 21:38:42 +0530598 struct sk_buff *skb;
599 struct ieee80211_tx_info *tx_info;
600 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530601 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530602 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530603 int i;
604
Sujitha22be222009-03-30 15:28:36 +0530605 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530606 tx_info = IEEE80211_SKB_CB(skb);
607 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530608
609 /*
610 * Find the lowest frame length among the rate series that will have a
611 * 4ms transmit duration.
612 * TODO - TXOP limit needs to be considered.
613 */
614 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
615
616 for (i = 0; i < 4; i++) {
617 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100618 int modeidx;
619 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530620 legacy = 1;
621 break;
622 }
623
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200624 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100625 modeidx = MCS_HT40;
626 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200627 modeidx = MCS_HT20;
628
629 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
630 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100631
632 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530633 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530634 }
635 }
636
637 /*
638 * limit aggregate size by the minimum rate if rate selected is
639 * not a probe rate, if rate selected is a probe rate then
640 * avoid aggregation of this packet.
641 */
642 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
643 return 0;
644
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530645 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
646 aggr_limit = min((max_4ms_framelen * 3) / 8,
647 (u32)ATH_AMPDU_LIMIT_MAX);
648 else
649 aggr_limit = min(max_4ms_framelen,
650 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530651
652 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300653 * h/w can accept aggregates up to 16 bit lengths (65535).
654 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530655 * as zero. Ignore 65536 since we are constrained by hw.
656 */
Sujith4ef70842009-07-23 15:32:41 +0530657 if (tid->an->maxampdu)
658 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530659
660 return aggr_limit;
661}
662
663/*
Sujithd43f30152009-01-16 21:38:53 +0530664 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530665 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530666 */
667static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530668 struct ath_buf *bf, u16 frmlen,
669 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530670{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530671#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530672 struct sk_buff *skb = bf->bf_mpdu;
673 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530674 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530675 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100676 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200677 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100678 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530679
680 /* Select standard number of delimiters based on frame length alone */
681 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
682
683 /*
684 * If encryption enabled, hardware requires some more padding between
685 * subframes.
686 * TODO - this could be improved to be dependent on the rate.
687 * The hardware can keep up at lower rates, but not higher rates
688 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530689 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
690 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530691 ndelim += ATH_AGGR_ENCRYPTDELIM;
692
693 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530694 * Add delimiter when using RTS/CTS with aggregation
695 * and non enterprise AR9003 card
696 */
Felix Fietkau34597312011-08-29 18:57:54 +0200697 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
698 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530699 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
700
701 /*
Sujithe8324352009-01-16 21:38:42 +0530702 * Convert desired mpdu density from microeconds to bytes based
703 * on highest rate in rate series (i.e. first rate) to determine
704 * required minimum length for subframe. Take into account
705 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530706 *
Sujithe8324352009-01-16 21:38:42 +0530707 * If there is no mpdu density restriction, no further calculation
708 * is needed.
709 */
Sujith4ef70842009-07-23 15:32:41 +0530710
711 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530712 return ndelim;
713
714 rix = tx_info->control.rates[0].idx;
715 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530716 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
717 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
718
719 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530720 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530721 else
Sujith4ef70842009-07-23 15:32:41 +0530722 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530723
724 if (nsymbols == 0)
725 nsymbols = 1;
726
Felix Fietkauc6663872010-04-19 19:57:33 +0200727 streams = HT_RC_2_STREAMS(rix);
728 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530729 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
730
Sujithe8324352009-01-16 21:38:42 +0530731 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530732 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
733 ndelim = max(mindelim, ndelim);
734 }
735
736 return ndelim;
737}
738
739static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530740 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530741 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100742 struct list_head *bf_q,
743 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530744{
745#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200746 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530747 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530748 u16 aggr_limit = 0, al = 0, bpad = 0,
749 al_delta, h_baw = tid->baw_size / 2;
750 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200751 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100752 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200753 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200754 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530755
756 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757 skb = skb_peek(&tid->buf_q);
758 fi = get_frame_info(skb);
759 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200760 if (!fi->bf)
761 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200762
Felix Fietkau44f1d262011-08-28 00:32:25 +0200763 if (!bf)
764 continue;
765
Felix Fietkau399c6482011-09-14 21:24:17 +0200766 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200767 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200768 if (!bf_first)
769 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530770
Sujithd43f30152009-01-16 21:38:53 +0530771 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200772 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530773 status = ATH_AGGR_BAW_CLOSED;
774 break;
775 }
776
777 if (!rl) {
778 aggr_limit = ath_lookup_rate(sc, bf, tid);
779 rl = 1;
780 }
781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100783 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530784
Sujithd43f30152009-01-16 21:38:53 +0530785 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530786 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
787 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530788 status = ATH_AGGR_LIMITED;
789 break;
790 }
791
Felix Fietkau0299a502010-10-21 02:47:24 +0200792 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
793 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
794 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
795 break;
796
Sujithd43f30152009-01-16 21:38:53 +0530797 /* do not exceed subframe limit */
798 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530799 status = ATH_AGGR_LIMITED;
800 break;
801 }
802
Sujithd43f30152009-01-16 21:38:53 +0530803 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530804 al += bpad + al_delta;
805
806 /*
807 * Get the delimiters needed to meet the MPDU
808 * density for this node.
809 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530810 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
811 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530812 bpad = PADBYTES(al_delta) + (ndelim << 2);
813
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530814 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530815 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530816
Sujithd43f30152009-01-16 21:38:53 +0530817 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100818 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200819 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200820 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200821
822 __skb_unlink(skb, &tid->buf_q);
823 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200824 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530825 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200826
Sujithe8324352009-01-16 21:38:42 +0530827 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530828
Felix Fietkau56dc6332011-08-28 00:32:22 +0200829 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530830
Felix Fietkau269c44b2010-11-14 15:20:06 +0100831 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530832
Sujithe8324352009-01-16 21:38:42 +0530833 return status;
834#undef PADBYTES
835}
836
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200837/*
838 * rix - rate index
839 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
840 * width - 0 for 20 MHz, 1 for 40 MHz
841 * half_gi - to use 4us v/s 3.6 us for symbol time
842 */
843static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
844 int width, int half_gi, bool shortPreamble)
845{
846 u32 nbits, nsymbits, duration, nsymbols;
847 int streams;
848
849 /* find number of symbols: PLCP + data */
850 streams = HT_RC_2_STREAMS(rix);
851 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
852 nsymbits = bits_per_symbol[rix % 8][width] * streams;
853 nsymbols = (nbits + nsymbits - 1) / nsymbits;
854
855 if (!half_gi)
856 duration = SYMBOL_TIME(nsymbols);
857 else
858 duration = SYMBOL_TIME_HALFGI(nsymbols);
859
860 /* addup duration for legacy/ht training and signal fields */
861 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
862
863 return duration;
864}
865
Felix Fietkau493cf042011-09-14 21:24:22 +0200866static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
867 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200868{
869 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200870 struct sk_buff *skb;
871 struct ieee80211_tx_info *tx_info;
872 struct ieee80211_tx_rate *rates;
873 const struct ieee80211_rate *rate;
874 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200875 int i;
876 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200877
878 skb = bf->bf_mpdu;
879 tx_info = IEEE80211_SKB_CB(skb);
880 rates = tx_info->control.rates;
881 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200882
883 /* set dur_update_en for l-sig computation except for PS-Poll frames */
884 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200885
886 /*
887 * We check if Short Preamble is needed for the CTS rate by
888 * checking the BSS's global flag.
889 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
890 */
891 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200892 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200893 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200894 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200895
896 for (i = 0; i < 4; i++) {
897 bool is_40, is_sgi, is_sp;
898 int phy;
899
900 if (!rates[i].count || (rates[i].idx < 0))
901 continue;
902
903 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200904 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200905
906 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200907 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
908 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200909 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200910 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
911 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200912 }
913
914 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200915 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200916 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200917 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200918
919 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
920 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
921 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
922
923 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
924 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200925 info->rates[i].Rate = rix | 0x80;
926 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
927 ah->txchainmask, info->rates[i].Rate);
928 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200929 is_40, is_sgi, is_sp);
930 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200931 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200932 continue;
933 }
934
935 /* legacy rates */
936 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
937 !(rate->flags & IEEE80211_RATE_ERP_G))
938 phy = WLAN_RC_PHY_CCK;
939 else
940 phy = WLAN_RC_PHY_OFDM;
941
942 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200943 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200944 if (rate->hw_value_short) {
945 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200946 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200947 } else {
948 is_sp = false;
949 }
950
951 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200952 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200953 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200954 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
955 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200956
Felix Fietkau493cf042011-09-14 21:24:22 +0200957 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200958 phy, rate->bitrate * 100, len, rix, is_sp);
959 }
960
961 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
962 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200963 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200964
965 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200966 if (info->flags & ATH9K_TXDESC_RTSENA)
967 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200968}
969
Felix Fietkau493cf042011-09-14 21:24:22 +0200970static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
971{
972 struct ieee80211_hdr *hdr;
973 enum ath9k_pkt_type htype;
974 __le16 fc;
975
976 hdr = (struct ieee80211_hdr *)skb->data;
977 fc = hdr->frame_control;
978
979 if (ieee80211_is_beacon(fc))
980 htype = ATH9K_PKT_TYPE_BEACON;
981 else if (ieee80211_is_probe_resp(fc))
982 htype = ATH9K_PKT_TYPE_PROBE_RESP;
983 else if (ieee80211_is_atim(fc))
984 htype = ATH9K_PKT_TYPE_ATIM;
985 else if (ieee80211_is_pspoll(fc))
986 htype = ATH9K_PKT_TYPE_PSPOLL;
987 else
988 htype = ATH9K_PKT_TYPE_NORMAL;
989
990 return htype;
991}
992
993static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
994 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200995{
996 struct ath_hw *ah = sc->sc_ah;
997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
998 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200999 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001000 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001001
Felix Fietkau493cf042011-09-14 21:24:22 +02001002 memset(&info, 0, sizeof(info));
1003 info.is_first = true;
1004 info.is_last = true;
1005 info.txpower = MAX_RATE_POWER;
1006 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001007
Felix Fietkau493cf042011-09-14 21:24:22 +02001008 info.flags = ATH9K_TXDESC_INTREQ;
1009 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1010 info.flags |= ATH9K_TXDESC_NOACK;
1011 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1012 info.flags |= ATH9K_TXDESC_LDPC;
1013
1014 ath_buf_set_rate(sc, bf, &info, len);
1015
1016 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1017 info.flags |= ATH9K_TXDESC_CLRDMASK;
1018
1019 if (bf->bf_state.bfs_paprd)
1020 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1021
Felix Fietkau399c6482011-09-14 21:24:17 +02001022
1023 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001024 struct sk_buff *skb = bf->bf_mpdu;
1025 struct ath_frame_info *fi = get_frame_info(skb);
1026
1027 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001028 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001029 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001030 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001031 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001032
Felix Fietkau493cf042011-09-14 21:24:22 +02001033 info.buf_addr[0] = bf->bf_buf_addr;
1034 info.buf_len[0] = skb->len;
1035 info.pkt_len = fi->framelen;
1036 info.keyix = fi->keyix;
1037 info.keytype = fi->keytype;
1038
1039 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001040 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001041 info.aggr = AGGR_BUF_FIRST;
1042 else if (!bf->bf_next)
1043 info.aggr = AGGR_BUF_LAST;
1044 else
1045 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001046
Felix Fietkau493cf042011-09-14 21:24:22 +02001047 info.ndelim = bf->bf_state.ndelim;
1048 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001049 }
1050
Felix Fietkau493cf042011-09-14 21:24:22 +02001051 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001052 bf = bf->bf_next;
1053 }
1054}
1055
Sujithe8324352009-01-16 21:38:42 +05301056static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1057 struct ath_atx_tid *tid)
1058{
Sujithd43f30152009-01-16 21:38:53 +05301059 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301060 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001061 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301062 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001063 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301064
1065 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001066 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301067 return;
1068
1069 INIT_LIST_HEAD(&bf_q);
1070
Felix Fietkau269c44b2010-11-14 15:20:06 +01001071 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301072
1073 /*
Sujithd43f30152009-01-16 21:38:53 +05301074 * no frames picked up to be aggregated;
1075 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301076 */
1077 if (list_empty(&bf_q))
1078 break;
1079
1080 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301081 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001082 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301083
Felix Fietkau55195412011-04-17 23:28:09 +02001084 if (tid->ac->clear_ps_filter) {
1085 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001086 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1087 } else {
1088 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001089 }
1090
Sujithd43f30152009-01-16 21:38:53 +05301091 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001092 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001093 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1094 bf->bf_state.bf_type = BUF_AMPDU;
1095 } else {
1096 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301097 }
1098
Felix Fietkau493cf042011-09-14 21:24:22 +02001099 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001100 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001101 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301102 status != ATH_AGGR_BAW_CLOSED);
1103}
1104
Felix Fietkau231c3a12010-09-20 19:35:28 +02001105int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1106 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301107{
1108 struct ath_atx_tid *txtid;
1109 struct ath_node *an;
1110
1111 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301112 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001113
1114 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1115 return -EAGAIN;
1116
Sujithf83da962009-07-23 15:32:37 +05301117 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001118 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001119 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001120
Felix Fietkau2ed72222011-01-10 17:05:49 -07001121 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1122 txtid->baw_head = txtid->baw_tail = 0;
1123
Felix Fietkau231c3a12010-09-20 19:35:28 +02001124 return 0;
Sujithe8324352009-01-16 21:38:42 +05301125}
1126
Sujithf83da962009-07-23 15:32:37 +05301127void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301128{
1129 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1130 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001131 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301132
1133 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301134 return;
Sujithe8324352009-01-16 21:38:42 +05301135
1136 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301137 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301138 return;
Sujithe8324352009-01-16 21:38:42 +05301139 }
1140
Sujithe8324352009-01-16 21:38:42 +05301141 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001142 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001143
1144 /*
1145 * If frames are still being transmitted for this TID, they will be
1146 * cleaned up during tx completion. To prevent race conditions, this
1147 * TID can only be reused after all in-progress subframes have been
1148 * completed.
1149 */
1150 if (txtid->baw_head != txtid->baw_tail)
1151 txtid->state |= AGGR_CLEANUP;
1152 else
1153 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301154 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301155
Felix Fietkau90fa5392010-09-20 13:45:38 +02001156 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301157}
1158
Felix Fietkau55195412011-04-17 23:28:09 +02001159bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1160{
1161 struct ath_atx_tid *tid;
1162 struct ath_atx_ac *ac;
1163 struct ath_txq *txq;
1164 bool buffered = false;
1165 int tidno;
1166
1167 for (tidno = 0, tid = &an->tid[tidno];
1168 tidno < WME_NUM_TID; tidno++, tid++) {
1169
1170 if (!tid->sched)
1171 continue;
1172
1173 ac = tid->ac;
1174 txq = ac->txq;
1175
1176 spin_lock_bh(&txq->axq_lock);
1177
Felix Fietkau56dc6332011-08-28 00:32:22 +02001178 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +02001179 buffered = true;
1180
1181 tid->sched = false;
1182 list_del(&tid->list);
1183
1184 if (ac->sched) {
1185 ac->sched = false;
1186 list_del(&ac->list);
1187 }
1188
1189 spin_unlock_bh(&txq->axq_lock);
1190 }
1191
1192 return buffered;
1193}
1194
1195void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1196{
1197 struct ath_atx_tid *tid;
1198 struct ath_atx_ac *ac;
1199 struct ath_txq *txq;
1200 int tidno;
1201
1202 for (tidno = 0, tid = &an->tid[tidno];
1203 tidno < WME_NUM_TID; tidno++, tid++) {
1204
1205 ac = tid->ac;
1206 txq = ac->txq;
1207
1208 spin_lock_bh(&txq->axq_lock);
1209 ac->clear_ps_filter = true;
1210
Felix Fietkau56dc6332011-08-28 00:32:22 +02001211 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001212 ath_tx_queue_tid(txq, tid);
1213 ath_txq_schedule(sc, txq);
1214 }
1215
1216 spin_unlock_bh(&txq->axq_lock);
1217 }
1218}
1219
Sujithe8324352009-01-16 21:38:42 +05301220void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1221{
1222 struct ath_atx_tid *txtid;
1223 struct ath_node *an;
1224
1225 an = (struct ath_node *)sta->drv_priv;
1226
1227 if (sc->sc_flags & SC_OP_TXAGGR) {
1228 txtid = ATH_AN_2_TID(an, tid);
1229 txtid->baw_size =
1230 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1231 txtid->state |= AGGR_ADDBA_COMPLETE;
1232 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1233 ath_tx_resume_tid(sc, txtid);
1234 }
1235}
1236
Sujithe8324352009-01-16 21:38:42 +05301237/********************/
1238/* Queue Management */
1239/********************/
1240
Sujithe8324352009-01-16 21:38:42 +05301241static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1242 struct ath_txq *txq)
1243{
1244 struct ath_atx_ac *ac, *ac_tmp;
1245 struct ath_atx_tid *tid, *tid_tmp;
1246
1247 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1248 list_del(&ac->list);
1249 ac->sched = false;
1250 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1251 list_del(&tid->list);
1252 tid->sched = false;
1253 ath_tid_drain(sc, txq, tid);
1254 }
1255 }
1256}
1257
1258struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1259{
Sujithcbe61d82009-02-09 13:27:12 +05301260 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001261 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301262 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001263 static const int subtype_txq_to_hwq[] = {
1264 [WME_AC_BE] = ATH_TXQ_AC_BE,
1265 [WME_AC_BK] = ATH_TXQ_AC_BK,
1266 [WME_AC_VI] = ATH_TXQ_AC_VI,
1267 [WME_AC_VO] = ATH_TXQ_AC_VO,
1268 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001269 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301270
1271 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001272 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301273 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1274 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1275 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1276 qi.tqi_physCompBuf = 0;
1277
1278 /*
1279 * Enable interrupts only for EOL and DESC conditions.
1280 * We mark tx descriptors to receive a DESC interrupt
1281 * when a tx queue gets deep; otherwise waiting for the
1282 * EOL to reap descriptors. Note that this is done to
1283 * reduce interrupt load and this only defers reaping
1284 * descriptors, never transmitting frames. Aside from
1285 * reducing interrupts this also permits more concurrency.
1286 * The only potential downside is if the tx queue backs
1287 * up in which case the top half of the kernel may backup
1288 * due to a lack of tx descriptors.
1289 *
1290 * The UAPSD queue is an exception, since we take a desc-
1291 * based intr on the EOSP frames.
1292 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001293 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1294 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1295 TXQ_FLAG_TXERRINT_ENABLE;
1296 } else {
1297 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1298 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1299 else
1300 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1301 TXQ_FLAG_TXDESCINT_ENABLE;
1302 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001303 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1304 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301305 /*
1306 * NB: don't print a message, this happens
1307 * normally on parts with too few tx queues
1308 */
1309 return NULL;
1310 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001311 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001312 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001313 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1314 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301315 return NULL;
1316 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001317 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1318 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301319
Ben Greear60f2d1d2011-01-09 23:11:52 -08001320 txq->axq_qnum = axq_qnum;
1321 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301322 txq->axq_link = NULL;
1323 INIT_LIST_HEAD(&txq->axq_q);
1324 INIT_LIST_HEAD(&txq->axq_acq);
1325 spin_lock_init(&txq->axq_lock);
1326 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001327 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001328 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001329 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001330
1331 txq->txq_headidx = txq->txq_tailidx = 0;
1332 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1333 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301334 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001335 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301336}
1337
Sujithe8324352009-01-16 21:38:42 +05301338int ath_txq_update(struct ath_softc *sc, int qnum,
1339 struct ath9k_tx_queue_info *qinfo)
1340{
Sujithcbe61d82009-02-09 13:27:12 +05301341 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301342 int error = 0;
1343 struct ath9k_tx_queue_info qi;
1344
1345 if (qnum == sc->beacon.beaconq) {
1346 /*
1347 * XXX: for beacon queue, we just save the parameter.
1348 * It will be picked up by ath_beaconq_config when
1349 * it's necessary.
1350 */
1351 sc->beacon.beacon_qi = *qinfo;
1352 return 0;
1353 }
1354
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001355 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301356
1357 ath9k_hw_get_txq_props(ah, qnum, &qi);
1358 qi.tqi_aifs = qinfo->tqi_aifs;
1359 qi.tqi_cwmin = qinfo->tqi_cwmin;
1360 qi.tqi_cwmax = qinfo->tqi_cwmax;
1361 qi.tqi_burstTime = qinfo->tqi_burstTime;
1362 qi.tqi_readyTime = qinfo->tqi_readyTime;
1363
1364 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001365 ath_err(ath9k_hw_common(sc->sc_ah),
1366 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301367 error = -EIO;
1368 } else {
1369 ath9k_hw_resettxqueue(ah, qnum);
1370 }
1371
1372 return error;
1373}
1374
1375int ath_cabq_update(struct ath_softc *sc)
1376{
1377 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001378 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301379 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301380
1381 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1382 /*
1383 * Ensure the readytime % is within the bounds.
1384 */
Sujith17d79042009-02-09 13:27:03 +05301385 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1386 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1387 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1388 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301389
Steve Brown9814f6b2011-02-07 17:10:39 -07001390 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301391 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301392 ath_txq_update(sc, qnum, &qi);
1393
1394 return 0;
1395}
1396
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001397static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1398{
1399 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1400 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1401}
1402
Felix Fietkaufce041b2011-05-19 12:20:25 +02001403static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1404 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301405 __releases(txq->axq_lock)
1406 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301407{
1408 struct ath_buf *bf, *lastbf;
1409 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001410 struct ath_tx_status ts;
1411
1412 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301413 INIT_LIST_HEAD(&bf_head);
1414
Felix Fietkaufce041b2011-05-19 12:20:25 +02001415 while (!list_empty(list)) {
1416 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301417
Felix Fietkaufce041b2011-05-19 12:20:25 +02001418 if (bf->bf_stale) {
1419 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301420
Felix Fietkaufce041b2011-05-19 12:20:25 +02001421 ath_tx_return_buffer(sc, bf);
1422 continue;
Sujithe8324352009-01-16 21:38:42 +05301423 }
1424
1425 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001426 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001427
Sujithe8324352009-01-16 21:38:42 +05301428 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001429 if (bf_is_ampdu_not_probing(bf))
1430 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301431
Felix Fietkaufce041b2011-05-19 12:20:25 +02001432 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301433 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001434 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1435 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301436 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001437 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001438 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001439 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001440}
1441
1442/*
1443 * Drain a given TX queue (could be Beacon or Data)
1444 *
1445 * This assumes output has been stopped and
1446 * we do not need to block ath_tx_tasklet.
1447 */
1448void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1449{
1450 spin_lock_bh(&txq->axq_lock);
1451 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1452 int idx = txq->txq_tailidx;
1453
1454 while (!list_empty(&txq->txq_fifo[idx])) {
1455 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1456 retry_tx);
1457
1458 INCR(idx, ATH_TXFIFO_DEPTH);
1459 }
1460 txq->txq_tailidx = idx;
1461 }
1462
1463 txq->axq_link = NULL;
1464 txq->axq_tx_inprogress = false;
1465 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001466
1467 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001468 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1469 ath_txq_drain_pending_buffers(sc, txq);
1470
1471 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301472}
1473
Felix Fietkau080e1a22010-12-05 20:17:53 +01001474bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301475{
Sujithcbe61d82009-02-09 13:27:12 +05301476 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001477 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301478 struct ath_txq *txq;
1479 int i, npend = 0;
1480
1481 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001482 return true;
Sujith043a0402009-01-16 21:38:47 +05301483
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001484 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301485
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001486 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301487 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001488 if (!ATH_TXQ_SETUP(sc, i))
1489 continue;
1490
1491 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301492 }
1493
Felix Fietkau080e1a22010-12-05 20:17:53 +01001494 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001495 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301496
1497 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001498 if (!ATH_TXQ_SETUP(sc, i))
1499 continue;
1500
1501 /*
1502 * The caller will resume queues with ieee80211_wake_queues.
1503 * Mark the queue as not stopped to prevent ath_tx_complete
1504 * from waking the queue too early.
1505 */
1506 txq = &sc->tx.txq[i];
1507 txq->stopped = false;
1508 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301509 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001510
1511 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301512}
1513
Sujithe8324352009-01-16 21:38:42 +05301514void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1515{
1516 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1517 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1518}
1519
Ben Greear7755bad2011-01-18 17:30:00 -08001520/* For each axq_acq entry, for each tid, try to schedule packets
1521 * for transmit until ampdu_depth has reached min Q depth.
1522 */
Sujithe8324352009-01-16 21:38:42 +05301523void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1524{
Ben Greear7755bad2011-01-18 17:30:00 -08001525 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1526 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301527
Felix Fietkau236de512011-09-03 01:40:25 +02001528 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001529 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301530 return;
1531
1532 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001533 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301534
Ben Greear7755bad2011-01-18 17:30:00 -08001535 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1536 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1537 list_del(&ac->list);
1538 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301539
Ben Greear7755bad2011-01-18 17:30:00 -08001540 while (!list_empty(&ac->tid_q)) {
1541 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1542 list);
1543 list_del(&tid->list);
1544 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301545
Ben Greear7755bad2011-01-18 17:30:00 -08001546 if (tid->paused)
1547 continue;
Sujithe8324352009-01-16 21:38:42 +05301548
Ben Greear7755bad2011-01-18 17:30:00 -08001549 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301550
Ben Greear7755bad2011-01-18 17:30:00 -08001551 /*
1552 * add tid to round-robin queue if more frames
1553 * are pending for the tid
1554 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001555 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001556 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301557
Ben Greear7755bad2011-01-18 17:30:00 -08001558 if (tid == last_tid ||
1559 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1560 break;
Sujithe8324352009-01-16 21:38:42 +05301561 }
Ben Greear7755bad2011-01-18 17:30:00 -08001562
1563 if (!list_empty(&ac->tid_q)) {
1564 if (!ac->sched) {
1565 ac->sched = true;
1566 list_add_tail(&ac->list, &txq->axq_acq);
1567 }
1568 }
1569
1570 if (ac == last_ac ||
1571 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1572 return;
Sujithe8324352009-01-16 21:38:42 +05301573 }
1574}
1575
Sujithe8324352009-01-16 21:38:42 +05301576/***********/
1577/* TX, DMA */
1578/***********/
1579
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001581 * Insert a chain of ath_buf (descriptors) on a txq and
1582 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583 */
Sujith102e0572008-10-29 10:15:16 +05301584static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001585 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001586{
Sujithcbe61d82009-02-09 13:27:12 +05301587 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001588 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001589 struct ath_buf *bf, *bf_last;
1590 bool puttxbuf = false;
1591 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301592
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001593 /*
1594 * Insert the frame on the outbound list and
1595 * pass it on to the hardware.
1596 */
1597
1598 if (list_empty(head))
1599 return;
1600
Felix Fietkaufce041b2011-05-19 12:20:25 +02001601 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001603 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001604
Joe Perches226afe62010-12-02 19:12:37 -08001605 ath_dbg(common, ATH_DBG_QUEUE,
1606 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607
Felix Fietkaufce041b2011-05-19 12:20:25 +02001608 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1609 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001610 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001611 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001612 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001613 list_splice_tail_init(head, &txq->axq_q);
1614
Felix Fietkaufce041b2011-05-19 12:20:25 +02001615 if (txq->axq_link) {
1616 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001617 ath_dbg(common, ATH_DBG_XMIT,
1618 "link[%u] (%p)=%llx (%p)\n",
1619 txq->axq_qnum, txq->axq_link,
1620 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001621 } else if (!edma)
1622 puttxbuf = true;
1623
1624 txq->axq_link = bf_last->bf_desc;
1625 }
1626
1627 if (puttxbuf) {
1628 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1629 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1630 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1631 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1632 }
1633
1634 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001635 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001636 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001637 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001638
1639 if (!internal) {
1640 txq->axq_depth++;
1641 if (bf_is_ampdu_not_probing(bf))
1642 txq->axq_ampdu_depth++;
1643 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001644}
1645
Sujithe8324352009-01-16 21:38:42 +05301646static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001647 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301648{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001649 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001650 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001651 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301652
1653 /*
1654 * Do not queue to h/w when any of the following conditions is true:
1655 * - there are pending frames in software queue
1656 * - the TID is currently paused for ADDBA/BAR request
1657 * - seqno is not within block-ack window
1658 * - h/w queue depth exceeds low water mark
1659 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001660 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001661 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001662 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001663 /*
Sujithe8324352009-01-16 21:38:42 +05301664 * Add this frame to software queue for scheduling later
1665 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001666 */
Ben Greearbda8add2011-01-09 23:11:48 -08001667 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001668 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001669 if (!txctl->an || !txctl->an->sleeping)
1670 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301671 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001672 }
1673
Felix Fietkau44f1d262011-08-28 00:32:25 +02001674 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1675 if (!bf)
1676 return;
1677
Felix Fietkau399c6482011-09-14 21:24:17 +02001678 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001679 INIT_LIST_HEAD(&bf_head);
1680 list_add(&bf->list, &bf_head);
1681
Sujithe8324352009-01-16 21:38:42 +05301682 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001683 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301684
1685 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001686 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301687 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001688 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001689 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301690}
1691
Felix Fietkau82b873a2010-11-11 03:18:37 +01001692static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001693 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001694{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001695 struct ath_frame_info *fi = get_frame_info(skb);
1696 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301697 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001698
Felix Fietkau44f1d262011-08-28 00:32:25 +02001699 bf = fi->bf;
1700 if (!bf)
1701 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1702
1703 if (!bf)
1704 return;
1705
1706 INIT_LIST_HEAD(&bf_head);
1707 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001708 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301709
1710 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001711 if (tid)
1712 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301713
Sujithd43f30152009-01-16 21:38:53 +05301714 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001715 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001716 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301717 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001718}
1719
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001720static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1721 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301722{
1723 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001724 struct ieee80211_sta *sta = tx_info->control.sta;
1725 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001726 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001727 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001728 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001729 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301730
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001731 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301732
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001733 if (sta)
1734 an = (struct ath_node *) sta->drv_priv;
1735
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001736 memset(fi, 0, sizeof(*fi));
1737 if (hw_key)
1738 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001739 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1740 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001741 else
1742 fi->keyix = ATH9K_TXKEYIX_INVALID;
1743 fi->keytype = keytype;
1744 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301745}
1746
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301747u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1748{
1749 struct ath_hw *ah = sc->sc_ah;
1750 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301751 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1752 (curchan->channelFlags & CHANNEL_5GHZ) &&
1753 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301754 return 0x3;
1755 else
1756 return chainmask;
1757}
1758
Felix Fietkau44f1d262011-08-28 00:32:25 +02001759/*
1760 * Assign a descriptor (and sequence number if necessary,
1761 * and map buffer for DMA. Frees skb on error
1762 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001763static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001764 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001765 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001766 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301767{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001768 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001769 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001770 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001771 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001772 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001773
1774 bf = ath_tx_get_buffer(sc);
1775 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001776 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001777 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001778 }
Sujithe8324352009-01-16 21:38:42 +05301779
Sujithe8324352009-01-16 21:38:42 +05301780 ATH_TXBUF_RESET(bf);
1781
Felix Fietkaufa05f872011-08-28 00:32:24 +02001782 if (tid) {
1783 seqno = tid->seq_next;
1784 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1785 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1786 bf->bf_state.seqno = seqno;
1787 }
1788
Sujithe8324352009-01-16 21:38:42 +05301789 bf->bf_mpdu = skb;
1790
Ben Greearc1739eb32010-10-14 12:45:29 -07001791 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1792 skb->len, DMA_TO_DEVICE);
1793 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301794 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001795 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001796 ath_err(ath9k_hw_common(sc->sc_ah),
1797 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001798 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001799 goto error;
Sujithe8324352009-01-16 21:38:42 +05301800 }
1801
Felix Fietkau56dc6332011-08-28 00:32:22 +02001802 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001803
1804 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001805
1806error:
1807 dev_kfree_skb_any(skb);
1808 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001809}
1810
1811/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001812static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001813 struct ath_tx_control *txctl)
1814{
Felix Fietkau04caf862010-11-14 15:20:12 +01001815 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1816 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001817 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001818 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001819 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301820
Sujithe8324352009-01-16 21:38:42 +05301821 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301822 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1823 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001824 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1825 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001826 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001827
Felix Fietkau066dae92010-11-07 14:59:39 +01001828 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001829 }
1830
1831 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001832 /*
1833 * Try aggregation if it's a unicast data frame
1834 * and the destination is HT capable.
1835 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001836 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301837 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001838 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1839 if (!bf)
1840 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001841
Felix Fietkau82b873a2010-11-11 03:18:37 +01001842 bf->bf_state.bfs_paprd = txctl->paprd;
1843
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301844 if (txctl->paprd)
1845 bf->bf_state.bfs_paprd_timestamp = jiffies;
1846
Felix Fietkau44f1d262011-08-28 00:32:25 +02001847 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301848 }
1849
Felix Fietkaufa05f872011-08-28 00:32:24 +02001850out:
Sujithe8324352009-01-16 21:38:42 +05301851 spin_unlock_bh(&txctl->txq->axq_lock);
1852}
1853
1854/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001855int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301856 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001857{
Felix Fietkau28d16702010-11-14 15:20:10 +01001858 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1859 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001860 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001861 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001862 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001863 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001864 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001865 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001866 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001867
Ben Greeara9927ba2010-12-06 21:13:49 -08001868 /* NOTE: sta can be NULL according to net/mac80211.h */
1869 if (sta)
1870 txctl->an = (struct ath_node *)sta->drv_priv;
1871
Felix Fietkau04caf862010-11-14 15:20:12 +01001872 if (info->control.hw_key)
1873 frmlen += info->control.hw_key->icv_len;
1874
Felix Fietkau28d16702010-11-14 15:20:10 +01001875 /*
1876 * As a temporary workaround, assign seq# here; this will likely need
1877 * to be cleaned up to work better with Beacon transmission and virtual
1878 * BSSes.
1879 */
1880 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1881 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1882 sc->tx.seq_no += 0x10;
1883 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1884 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1885 }
1886
1887 /* Add the padding after the header if this is not already done */
1888 padpos = ath9k_cmn_padpos(hdr->frame_control);
1889 padsize = padpos & 3;
1890 if (padsize && skb->len > padpos) {
1891 if (skb_headroom(skb) < padsize)
1892 return -ENOMEM;
1893
1894 skb_push(skb, padsize);
1895 memmove(skb->data, skb->data + padsize, padpos);
1896 }
1897
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001898 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1899 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1900 !ieee80211_is_data(hdr->frame_control))
1901 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1902
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001903 setup_frame_info(hw, skb, frmlen);
1904
1905 /*
1906 * At this point, the vif, hw_key and sta pointers in the tx control
1907 * info are no longer valid (overwritten by the ath_frame_info data.
1908 */
1909
Felix Fietkau066dae92010-11-07 14:59:39 +01001910 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001911 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001912 if (txq == sc->tx.txq_map[q] &&
1913 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001914 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001915 txq->stopped = 1;
1916 }
1917 spin_unlock_bh(&txq->axq_lock);
1918
Felix Fietkau44f1d262011-08-28 00:32:25 +02001919 ath_tx_start_dma(sc, skb, txctl);
1920 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001921}
1922
Sujithe8324352009-01-16 21:38:42 +05301923/*****************/
1924/* TX Completion */
1925/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001926
Sujithe8324352009-01-16 21:38:42 +05301927static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301928 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001929{
Sujithe8324352009-01-16 21:38:42 +05301930 struct ieee80211_hw *hw = sc->hw;
1931 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001932 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001933 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001934 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301935
Joe Perches226afe62010-12-02 19:12:37 -08001936 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301937
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301938 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301939 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301940
Felix Fietkau55797b12011-09-14 21:24:16 +02001941 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301942 /* Frame was ACKed */
1943 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301944
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001945 padpos = ath9k_cmn_padpos(hdr->frame_control);
1946 padsize = padpos & 3;
1947 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301948 /*
1949 * Remove MAC header padding before giving the frame back to
1950 * mac80211.
1951 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001952 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301953 skb_pull(skb, padsize);
1954 }
1955
Sujith1b04b932010-01-08 10:36:05 +05301956 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1957 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001958 ath_dbg(common, ATH_DBG_PS,
1959 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301960 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1961 PS_WAIT_FOR_CAB |
1962 PS_WAIT_FOR_PSPOLL_DATA |
1963 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001964 }
1965
Felix Fietkau7545daf2011-01-24 19:23:16 +01001966 q = skb_get_queue_mapping(skb);
1967 if (txq == sc->tx.txq_map[q]) {
1968 spin_lock_bh(&txq->axq_lock);
1969 if (WARN_ON(--txq->pending_frames < 0))
1970 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001971
Felix Fietkau7545daf2011-01-24 19:23:16 +01001972 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1973 ieee80211_wake_queue(sc->hw, q);
1974 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001975 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001976 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001977 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001978
1979 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301980}
1981
1982static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001983 struct ath_txq *txq, struct list_head *bf_q,
1984 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301985{
1986 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301987 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301988 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301989
Sujithe8324352009-01-16 21:38:42 +05301990 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301991 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301992
Felix Fietkau55797b12011-09-14 21:24:16 +02001993 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301994 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301995
Ben Greearc1739eb32010-10-14 12:45:29 -07001996 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001997 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001998
1999 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302000 if (time_after(jiffies,
2001 bf->bf_state.bfs_paprd_timestamp +
2002 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002003 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002004 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002005 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002006 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002007 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302008 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002009 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002010 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2011 * accidentally reference it later.
2012 */
2013 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302014
2015 /*
2016 * Return the list of ath_buf of this mpdu to free queue
2017 */
2018 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2019 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2020 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2021}
2022
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002023static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2024 struct ath_tx_status *ts, int nframes, int nbad,
2025 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302026{
Sujitha22be222009-03-30 15:28:36 +05302027 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302028 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302029 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002030 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002031 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302032 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302033
Sujith95e4acb2009-03-13 08:56:09 +05302034 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002035 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302036
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002037 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302038 WARN_ON(tx_rateindex >= hw->max_rates);
2039
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002040 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302041 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002042 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002043 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302044
Felix Fietkaub572d032010-11-14 15:20:07 +01002045 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002046
Felix Fietkaub572d032010-11-14 15:20:07 +01002047 tx_info->status.ampdu_len = nframes;
2048 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002049 }
2050
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002051 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau493cf042011-09-14 21:24:22 +02002052 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002053 /*
2054 * If an underrun error is seen assume it as an excessive
2055 * retry only if max frame trigger level has been reached
2056 * (2 KB for single stream, and 4 KB for dual stream).
2057 * Adjust the long retry as if the frame was tried
2058 * hw->max_rate_tries times to affect how rate control updates
2059 * PER for the failed rate.
2060 * In case of congestion on the bus penalizing this type of
2061 * underruns should help hardware actually transmit new frames
2062 * successfully by eventually preferring slower rates.
2063 * This itself should also alleviate congestion on the bus.
2064 */
2065 if (ieee80211_is_data(hdr->frame_control) &&
2066 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2067 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002068 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002069 tx_info->status.rates[tx_rateindex].count =
2070 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302071 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302072
Felix Fietkau545750d2009-11-23 22:21:01 +01002073 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302074 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002075 tx_info->status.rates[i].idx = -1;
2076 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302077
Felix Fietkau78c46532010-06-25 01:26:16 +02002078 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302079}
2080
Felix Fietkaufce041b2011-05-19 12:20:25 +02002081static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2082 struct ath_tx_status *ts, struct ath_buf *bf,
2083 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302084 __releases(txq->axq_lock)
2085 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002086{
2087 int txok;
2088
2089 txq->axq_depth--;
2090 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2091 txq->axq_tx_inprogress = false;
2092 if (bf_is_ampdu_not_probing(bf))
2093 txq->axq_ampdu_depth--;
2094
2095 spin_unlock_bh(&txq->axq_lock);
2096
2097 if (!bf_isampdu(bf)) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002098 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2099 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2100 } else
2101 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2102
2103 spin_lock_bh(&txq->axq_lock);
2104
2105 if (sc->sc_flags & SC_OP_TXAGGR)
2106 ath_txq_schedule(sc, txq);
2107}
2108
Sujithc4288392008-11-18 09:09:30 +05302109static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002110{
Sujithcbe61d82009-02-09 13:27:12 +05302111 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002112 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2114 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302115 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002116 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002117 int status;
2118
Joe Perches226afe62010-12-02 19:12:37 -08002119 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2120 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2121 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002122
Felix Fietkaufce041b2011-05-19 12:20:25 +02002123 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002125 if (work_pending(&sc->hw_reset_work))
2126 break;
2127
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 if (list_empty(&txq->axq_q)) {
2129 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002130 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002131 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002132 break;
2133 }
2134 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2135
2136 /*
2137 * There is a race condition that a BH gets scheduled
2138 * after sw writes TxE and before hw re-load the last
2139 * descriptor to get the newly chained one.
2140 * Software must keep the last DONE descriptor as a
2141 * holding descriptor - software does so by marking
2142 * it with the STALE flag.
2143 */
2144 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302145 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002146 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002147 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002149
2150 bf = list_entry(bf_held->list.next, struct ath_buf,
2151 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002152 }
2153
2154 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302155 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156
Felix Fietkau29bffa92010-03-29 20:14:23 -07002157 memset(&ts, 0, sizeof(ts));
2158 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002159 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002160 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002161
Ben Greear2dac4fb2011-01-09 23:11:45 -08002162 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163
2164 /*
2165 * Remove ath_buf's of the same transmit unit from txq,
2166 * however leave the last descriptor back as the holding
2167 * descriptor for hw.
2168 */
Sujitha119cc42009-03-30 15:28:38 +05302169 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 if (!list_is_singular(&lastbf->list))
2172 list_cut_position(&bf_head,
2173 &txq->axq_q, lastbf->list.prev);
2174
Felix Fietkaufce041b2011-05-19 12:20:25 +02002175 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002176 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002177 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178 }
Johannes Berge6a98542008-10-21 12:40:02 +02002179
Felix Fietkaufce041b2011-05-19 12:20:25 +02002180 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002182 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183}
2184
Sujith305fe472009-07-23 15:32:29 +05302185static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002186{
2187 struct ath_softc *sc = container_of(work, struct ath_softc,
2188 tx_complete_work.work);
2189 struct ath_txq *txq;
2190 int i;
2191 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002192#ifdef CONFIG_ATH9K_DEBUGFS
2193 sc->tx_complete_poll_work_seen++;
2194#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002195
2196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2197 if (ATH_TXQ_SETUP(sc, i)) {
2198 txq = &sc->tx.txq[i];
2199 spin_lock_bh(&txq->axq_lock);
2200 if (txq->axq_depth) {
2201 if (txq->axq_tx_inprogress) {
2202 needreset = true;
2203 spin_unlock_bh(&txq->axq_lock);
2204 break;
2205 } else {
2206 txq->axq_tx_inprogress = true;
2207 }
2208 }
2209 spin_unlock_bh(&txq->axq_lock);
2210 }
2211
2212 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002213 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2214 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002215 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002216 }
2217
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002218 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002219 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2220}
2221
2222
Sujithe8324352009-01-16 21:38:42 +05302223
2224void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002225{
Sujithe8324352009-01-16 21:38:42 +05302226 int i;
2227 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002228
Sujithe8324352009-01-16 21:38:42 +05302229 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002230
2231 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302232 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2233 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002234 }
2235}
2236
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002237void ath_tx_edma_tasklet(struct ath_softc *sc)
2238{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002239 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002240 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2241 struct ath_hw *ah = sc->sc_ah;
2242 struct ath_txq *txq;
2243 struct ath_buf *bf, *lastbf;
2244 struct list_head bf_head;
2245 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002246
2247 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002248 if (work_pending(&sc->hw_reset_work))
2249 break;
2250
Felix Fietkaufce041b2011-05-19 12:20:25 +02002251 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002252 if (status == -EINPROGRESS)
2253 break;
2254 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002255 ath_dbg(common, ATH_DBG_XMIT,
2256 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002257 break;
2258 }
2259
2260 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002261 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002262 continue;
2263
Felix Fietkaufce041b2011-05-19 12:20:25 +02002264 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002265
2266 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002267
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002268 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2269 spin_unlock_bh(&txq->axq_lock);
2270 return;
2271 }
2272
2273 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2274 struct ath_buf, list);
2275 lastbf = bf->bf_lastbf;
2276
2277 INIT_LIST_HEAD(&bf_head);
2278 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2279 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002280
Felix Fietkaufce041b2011-05-19 12:20:25 +02002281 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2282 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002283
Felix Fietkaufce041b2011-05-19 12:20:25 +02002284 if (!list_empty(&txq->axq_q)) {
2285 struct list_head bf_q;
2286
2287 INIT_LIST_HEAD(&bf_q);
2288 txq->axq_link = NULL;
2289 list_splice_tail_init(&txq->axq_q, &bf_q);
2290 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2291 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002292 }
2293
Felix Fietkaufce041b2011-05-19 12:20:25 +02002294 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002295 spin_unlock_bh(&txq->axq_lock);
2296 }
2297}
2298
Sujithe8324352009-01-16 21:38:42 +05302299/*****************/
2300/* Init, Cleanup */
2301/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002302
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002303static int ath_txstatus_setup(struct ath_softc *sc, int size)
2304{
2305 struct ath_descdma *dd = &sc->txsdma;
2306 u8 txs_len = sc->sc_ah->caps.txs_len;
2307
2308 dd->dd_desc_len = size * txs_len;
2309 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2310 &dd->dd_desc_paddr, GFP_KERNEL);
2311 if (!dd->dd_desc)
2312 return -ENOMEM;
2313
2314 return 0;
2315}
2316
2317static int ath_tx_edma_init(struct ath_softc *sc)
2318{
2319 int err;
2320
2321 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2322 if (!err)
2323 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2324 sc->txsdma.dd_desc_paddr,
2325 ATH_TXSTATUS_RING_SIZE);
2326
2327 return err;
2328}
2329
2330static void ath_tx_edma_cleanup(struct ath_softc *sc)
2331{
2332 struct ath_descdma *dd = &sc->txsdma;
2333
2334 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2335 dd->dd_desc_paddr);
2336}
2337
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002338int ath_tx_init(struct ath_softc *sc, int nbufs)
2339{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002340 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002341 int error = 0;
2342
Sujith797fe5cb2009-03-30 15:28:45 +05302343 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344
Sujith797fe5cb2009-03-30 15:28:45 +05302345 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002346 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302347 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002348 ath_err(common,
2349 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302350 goto err;
2351 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352
Sujith797fe5cb2009-03-30 15:28:45 +05302353 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002354 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302355 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002356 ath_err(common,
2357 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302358 goto err;
2359 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002360
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002361 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2362
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002363 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2364 error = ath_tx_edma_init(sc);
2365 if (error)
2366 goto err;
2367 }
2368
Sujith797fe5cb2009-03-30 15:28:45 +05302369err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002370 if (error != 0)
2371 ath_tx_cleanup(sc);
2372
2373 return error;
2374}
2375
Sujith797fe5cb2009-03-30 15:28:45 +05302376void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002377{
Sujithb77f4832008-12-07 21:44:03 +05302378 if (sc->beacon.bdma.dd_desc_len != 0)
2379 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380
Sujithb77f4832008-12-07 21:44:03 +05302381 if (sc->tx.txdma.dd_desc_len != 0)
2382 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002383
2384 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2385 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002386}
2387
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2389{
Sujithc5170162008-10-29 10:13:59 +05302390 struct ath_atx_tid *tid;
2391 struct ath_atx_ac *ac;
2392 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393
Sujith8ee5afb2008-12-07 21:43:36 +05302394 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302395 tidno < WME_NUM_TID;
2396 tidno++, tid++) {
2397 tid->an = an;
2398 tid->tidno = tidno;
2399 tid->seq_start = tid->seq_next = 0;
2400 tid->baw_size = WME_MAX_BA;
2401 tid->baw_head = tid->baw_tail = 0;
2402 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302403 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302404 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002405 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302406 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302407 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302408 tid->state &= ~AGGR_ADDBA_COMPLETE;
2409 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302410 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002411
Sujith8ee5afb2008-12-07 21:43:36 +05302412 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302413 acno < WME_NUM_AC; acno++, ac++) {
2414 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002415 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302416 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417 }
2418}
2419
Sujithb5aa9bf2008-10-29 10:13:31 +05302420void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002421{
Felix Fietkau2b409942010-07-07 19:42:08 +02002422 struct ath_atx_ac *ac;
2423 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002425 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302426
Felix Fietkau2b409942010-07-07 19:42:08 +02002427 for (tidno = 0, tid = &an->tid[tidno];
2428 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002429
Felix Fietkau2b409942010-07-07 19:42:08 +02002430 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002431 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002432
Felix Fietkau2b409942010-07-07 19:42:08 +02002433 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002434
Felix Fietkau2b409942010-07-07 19:42:08 +02002435 if (tid->sched) {
2436 list_del(&tid->list);
2437 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002438 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002439
2440 if (ac->sched) {
2441 list_del(&ac->list);
2442 tid->ac->sched = false;
2443 }
2444
2445 ath_tid_drain(sc, txq, tid);
2446 tid->state &= ~AGGR_ADDBA_COMPLETE;
2447 tid->state &= ~AGGR_CLEANUP;
2448
2449 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002450 }
2451}