blob: 2c6aefad3728f9dc7fe05a93b06862e9ba38704a [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530265 struct ieee80211_hdr *hdr;
266
Sujithfec247c2009-07-27 12:08:16 +0530267 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100268 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100269 return;
Sujithe8324352009-01-16 21:38:42 +0530270
Sujithe8324352009-01-16 21:38:42 +0530271 hdr = (struct ieee80211_hdr *)skb->data;
272 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
273}
274
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
276{
277 struct ath_buf *bf = NULL;
278
279 spin_lock_bh(&sc->tx.txbuflock);
280
281 if (unlikely(list_empty(&sc->tx.txbuf))) {
282 spin_unlock_bh(&sc->tx.txbuflock);
283 return NULL;
284 }
285
286 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
289 spin_unlock_bh(&sc->tx.txbuflock);
290
291 return bf;
292}
293
294static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
295{
296 spin_lock_bh(&sc->tx.txbuflock);
297 list_add_tail(&bf->list, &sc->tx.txbuf);
298 spin_unlock_bh(&sc->tx.txbuflock);
299}
300
Sujithd43f30152009-01-16 21:38:53 +0530301static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
302{
303 struct ath_buf *tbf;
304
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200305 tbf = ath_tx_get_buffer(sc);
306 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530307 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530308
309 ATH_TXBUF_RESET(tbf);
310
311 tbf->bf_mpdu = bf->bf_mpdu;
312 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400313 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530314 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530315
316 return tbf;
317}
318
Felix Fietkaub572d032010-11-14 15:20:07 +0100319static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
320 struct ath_tx_status *ts, int txok,
321 int *nframes, int *nbad)
322{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100323 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100324 u16 seq_st = 0;
325 u32 ba[WME_BA_BMP_SIZE >> 5];
326 int ba_index;
327 int isaggr = 0;
328
329 *nbad = 0;
330 *nframes = 0;
331
Felix Fietkaub572d032010-11-14 15:20:07 +0100332 isaggr = bf_isaggr(bf);
333 if (isaggr) {
334 seq_st = ts->ts_seqnum;
335 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
336 }
337
338 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100339 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200340 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100341
342 (*nframes)++;
343 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
344 (*nbad)++;
345
346 bf = bf->bf_next;
347 }
348}
349
350
Sujithd43f30152009-01-16 21:38:53 +0530351static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100353 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530354{
355 struct ath_node *an = NULL;
356 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530357 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100358 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530359 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800360 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530361 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530362 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200363 struct list_head bf_head;
364 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530365 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530366 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100370 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200371 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100372 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200373 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530374
Sujitha22be222009-03-30 15:28:36 +0530375 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530376 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530377
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800378 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379
Felix Fietkau78c46532010-06-25 01:26:16 +0200380 memcpy(rates, tx_info->control.rates, sizeof(rates));
381
Sujith1286ec62009-01-27 13:30:37 +0530382 rcu_read_lock();
383
Ben Greear686b9cb2010-09-23 09:44:36 -0700384 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530385 if (!sta) {
386 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200387
Felix Fietkau31e79a52010-07-12 23:16:34 +0200388 INIT_LIST_HEAD(&bf_head);
389 while (bf) {
390 bf_next = bf->bf_next;
391
Felix Fietkaufce041b2011-05-19 12:20:25 +0200392 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200393 list_move_tail(&bf->list, &bf_head);
394
Felix Fietkau31e79a52010-07-12 23:16:34 +0200395 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
396 0, 0);
397
398 bf = bf_next;
399 }
Sujith1286ec62009-01-27 13:30:37 +0530400 return;
Sujithe8324352009-01-16 21:38:42 +0530401 }
402
Sujith1286ec62009-01-27 13:30:37 +0530403 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
405 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530406
Felix Fietkaub11b1602010-07-11 12:48:44 +0200407 /*
408 * The hardware occasionally sends a tx status for the wrong TID.
409 * In this case, the BA status cannot be considered valid and all
410 * subframes need to be retransmitted
411 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100412 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200413 txok = false;
414
Sujithe8324352009-01-16 21:38:42 +0530415 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530416 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530417
Sujithd43f30152009-01-16 21:38:53 +0530418 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700419 if (ts->ts_flags & ATH9K_TX_BA) {
420 seq_st = ts->ts_seqnum;
421 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530422 } else {
Sujithd43f30152009-01-16 21:38:53 +0530423 /*
424 * AR5416 can become deaf/mute when BA
425 * issue happens. Chip needs to be reset.
426 * But AP code may have sychronization issues
427 * when perform internal reset in this routine.
428 * Only enable reset in STA mode for now.
429 */
Sujith2660b812009-02-09 13:27:26 +0530430 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530431 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530432 }
433 }
434
Felix Fietkau56dc6332011-08-28 00:32:22 +0200435 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530436
Felix Fietkaub572d032010-11-14 15:20:07 +0100437 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530438 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200439 u16 seqno = bf->bf_state.seqno;
440
Felix Fietkauf0b82202011-01-15 14:30:15 +0100441 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530442 bf_next = bf->bf_next;
443
Felix Fietkau78c46532010-06-25 01:26:16 +0200444 skb = bf->bf_mpdu;
445 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100446 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200447
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200448 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530449 /* transmit completion, subframe is
450 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530451 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530452 } else if (!isaggr && txok) {
453 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200456 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530457 /*
458 * cleanup in progress, just fail
459 * the un-acked sub-frames
460 */
461 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200462 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
463 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
464 !an->sleeping)
465 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
466
467 clear_filter = true;
468 txpending = 1;
469 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200470 txfail = 1;
471 sendbar = 1;
472 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530473 }
474 }
475
Felix Fietkaufce041b2011-05-19 12:20:25 +0200476 /*
477 * Make sure the last desc is reclaimed if it
478 * not a holding desc.
479 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200480 INIT_LIST_HEAD(&bf_head);
481 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
482 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530483 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530484
Felix Fietkau90fa5392010-09-20 13:45:38 +0200485 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530486 /*
487 * complete the acked-ones/xretried ones; update
488 * block-ack window
489 */
490 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200491 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530492 spin_unlock_bh(&txq->axq_lock);
493
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530494 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200495 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200496 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530497 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 }
499
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700500 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
501 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530502 } else {
Sujithd43f30152009-01-16 21:38:53 +0530503 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400504 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
505 if (bf->bf_next == NULL && bf_last->bf_stale) {
506 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530507
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400508 tbf = ath_clone_txbuf(sc, bf_last);
509 /*
510 * Update tx baw and complete the
511 * frame with failed status if we
512 * run out of tx buf.
513 */
514 if (!tbf) {
515 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200516 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400518
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400519 ath_tx_complete_buf(sc, bf, txq,
520 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200521 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 break;
523 }
524
Felix Fietkau56dc6332011-08-28 00:32:22 +0200525 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400526 }
Sujithe8324352009-01-16 21:38:42 +0530527 }
528
529 /*
530 * Put this buffer to the temporary pending
531 * queue to retain ordering
532 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200533 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530534 }
535
536 bf = bf_next;
537 }
538
Felix Fietkau4cee7862010-07-23 03:53:16 +0200539 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200540 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200541 if (an->sleeping)
542 ieee80211_sta_set_tim(sta);
543
Felix Fietkau4cee7862010-07-23 03:53:16 +0200544 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200545 if (clear_filter)
546 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200547 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600548 if (!an->sleeping)
549 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 spin_unlock_bh(&txq->axq_lock);
551 }
552
Sujithe8324352009-01-16 21:38:42 +0530553 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200554 ath_tx_flush_tid(sc, tid);
555
Sujithe8324352009-01-16 21:38:42 +0530556 if (tid->baw_head == tid->baw_tail) {
557 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530558 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530559 }
Sujithe8324352009-01-16 21:38:42 +0530560 }
561
Sujith1286ec62009-01-27 13:30:37 +0530562 rcu_read_unlock();
563
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530564 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200565 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530566}
567
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530568static bool ath_lookup_legacy(struct ath_buf *bf)
569{
570 struct sk_buff *skb;
571 struct ieee80211_tx_info *tx_info;
572 struct ieee80211_tx_rate *rates;
573 int i;
574
575 skb = bf->bf_mpdu;
576 tx_info = IEEE80211_SKB_CB(skb);
577 rates = tx_info->control.rates;
578
Felix Fietkau059ee092011-08-27 10:25:27 +0200579 for (i = 0; i < 4; i++) {
580 if (!rates[i].count || rates[i].idx < 0)
581 break;
582
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530583 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
584 return true;
585 }
586
587 return false;
588}
589
Sujithe8324352009-01-16 21:38:42 +0530590static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
591 struct ath_atx_tid *tid)
592{
Sujithe8324352009-01-16 21:38:42 +0530593 struct sk_buff *skb;
594 struct ieee80211_tx_info *tx_info;
595 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530596 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530597 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530598 int i;
599
Sujitha22be222009-03-30 15:28:36 +0530600 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530601 tx_info = IEEE80211_SKB_CB(skb);
602 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530603
604 /*
605 * Find the lowest frame length among the rate series that will have a
606 * 4ms transmit duration.
607 * TODO - TXOP limit needs to be considered.
608 */
609 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
610
611 for (i = 0; i < 4; i++) {
612 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100613 int modeidx;
614 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530615 legacy = 1;
616 break;
617 }
618
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200619 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100620 modeidx = MCS_HT40;
621 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200622 modeidx = MCS_HT20;
623
624 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
625 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100626
627 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530628 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530629 }
630 }
631
632 /*
633 * limit aggregate size by the minimum rate if rate selected is
634 * not a probe rate, if rate selected is a probe rate then
635 * avoid aggregation of this packet.
636 */
637 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
638 return 0;
639
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530640 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
641 aggr_limit = min((max_4ms_framelen * 3) / 8,
642 (u32)ATH_AMPDU_LIMIT_MAX);
643 else
644 aggr_limit = min(max_4ms_framelen,
645 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530646
647 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300648 * h/w can accept aggregates up to 16 bit lengths (65535).
649 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530650 * as zero. Ignore 65536 since we are constrained by hw.
651 */
Sujith4ef70842009-07-23 15:32:41 +0530652 if (tid->an->maxampdu)
653 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530654
655 return aggr_limit;
656}
657
658/*
Sujithd43f30152009-01-16 21:38:53 +0530659 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530660 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530661 */
662static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530663 struct ath_buf *bf, u16 frmlen,
664 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530665{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530666#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530667 struct sk_buff *skb = bf->bf_mpdu;
668 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530669 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530670 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100671 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200672 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100673 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530674
675 /* Select standard number of delimiters based on frame length alone */
676 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
677
678 /*
679 * If encryption enabled, hardware requires some more padding between
680 * subframes.
681 * TODO - this could be improved to be dependent on the rate.
682 * The hardware can keep up at lower rates, but not higher rates
683 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530684 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
685 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530686 ndelim += ATH_AGGR_ENCRYPTDELIM;
687
688 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530689 * Add delimiter when using RTS/CTS with aggregation
690 * and non enterprise AR9003 card
691 */
Felix Fietkau34597312011-08-29 18:57:54 +0200692 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
693 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530694 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
695
696 /*
Sujithe8324352009-01-16 21:38:42 +0530697 * Convert desired mpdu density from microeconds to bytes based
698 * on highest rate in rate series (i.e. first rate) to determine
699 * required minimum length for subframe. Take into account
700 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530701 *
Sujithe8324352009-01-16 21:38:42 +0530702 * If there is no mpdu density restriction, no further calculation
703 * is needed.
704 */
Sujith4ef70842009-07-23 15:32:41 +0530705
706 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530707 return ndelim;
708
709 rix = tx_info->control.rates[0].idx;
710 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530711 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
712 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
713
714 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530715 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530716 else
Sujith4ef70842009-07-23 15:32:41 +0530717 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530718
719 if (nsymbols == 0)
720 nsymbols = 1;
721
Felix Fietkauc6663872010-04-19 19:57:33 +0200722 streams = HT_RC_2_STREAMS(rix);
723 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530724 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
725
Sujithe8324352009-01-16 21:38:42 +0530726 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530727 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
728 ndelim = max(mindelim, ndelim);
729 }
730
731 return ndelim;
732}
733
734static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530735 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530736 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100737 struct list_head *bf_q,
738 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530739{
740#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200741 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530742 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530743 u16 aggr_limit = 0, al = 0, bpad = 0,
744 al_delta, h_baw = tid->baw_size / 2;
745 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200746 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100747 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200748 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200749 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530750
751 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200752 skb = skb_peek(&tid->buf_q);
753 fi = get_frame_info(skb);
754 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200755 if (!fi->bf)
756 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757
Felix Fietkau44f1d262011-08-28 00:32:25 +0200758 if (!bf)
759 continue;
760
Felix Fietkau399c6482011-09-14 21:24:17 +0200761 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200762 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200763 if (!bf_first)
764 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530765
Sujithd43f30152009-01-16 21:38:53 +0530766 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200767 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530768 status = ATH_AGGR_BAW_CLOSED;
769 break;
770 }
771
772 if (!rl) {
773 aggr_limit = ath_lookup_rate(sc, bf, tid);
774 rl = 1;
775 }
776
Sujithd43f30152009-01-16 21:38:53 +0530777 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100778 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530779
Sujithd43f30152009-01-16 21:38:53 +0530780 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530781 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
782 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530783 status = ATH_AGGR_LIMITED;
784 break;
785 }
786
Felix Fietkau0299a502010-10-21 02:47:24 +0200787 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200788 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200789 break;
790
Sujithd43f30152009-01-16 21:38:53 +0530791 /* do not exceed subframe limit */
792 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530793 status = ATH_AGGR_LIMITED;
794 break;
795 }
796
Sujithd43f30152009-01-16 21:38:53 +0530797 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530798 al += bpad + al_delta;
799
800 /*
801 * Get the delimiters needed to meet the MPDU
802 * density for this node.
803 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530804 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
805 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530806 bpad = PADBYTES(al_delta) + (ndelim << 2);
807
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530808 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530809 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530810
Sujithd43f30152009-01-16 21:38:53 +0530811 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100812 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200813 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200814 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200815
816 __skb_unlink(skb, &tid->buf_q);
817 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200818 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530819 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200820
Sujithe8324352009-01-16 21:38:42 +0530821 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530822
Felix Fietkau56dc6332011-08-28 00:32:22 +0200823 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530824
Felix Fietkau269c44b2010-11-14 15:20:06 +0100825 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530826
Sujithe8324352009-01-16 21:38:42 +0530827 return status;
828#undef PADBYTES
829}
830
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200831/*
832 * rix - rate index
833 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
834 * width - 0 for 20 MHz, 1 for 40 MHz
835 * half_gi - to use 4us v/s 3.6 us for symbol time
836 */
837static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
838 int width, int half_gi, bool shortPreamble)
839{
840 u32 nbits, nsymbits, duration, nsymbols;
841 int streams;
842
843 /* find number of symbols: PLCP + data */
844 streams = HT_RC_2_STREAMS(rix);
845 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
846 nsymbits = bits_per_symbol[rix % 8][width] * streams;
847 nsymbols = (nbits + nsymbits - 1) / nsymbits;
848
849 if (!half_gi)
850 duration = SYMBOL_TIME(nsymbols);
851 else
852 duration = SYMBOL_TIME_HALFGI(nsymbols);
853
854 /* addup duration for legacy/ht training and signal fields */
855 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
856
857 return duration;
858}
859
Felix Fietkau493cf042011-09-14 21:24:22 +0200860static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
861 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200862{
863 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200864 struct sk_buff *skb;
865 struct ieee80211_tx_info *tx_info;
866 struct ieee80211_tx_rate *rates;
867 const struct ieee80211_rate *rate;
868 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200869 int i;
870 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200871
872 skb = bf->bf_mpdu;
873 tx_info = IEEE80211_SKB_CB(skb);
874 rates = tx_info->control.rates;
875 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200876
877 /* set dur_update_en for l-sig computation except for PS-Poll frames */
878 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200879
880 /*
881 * We check if Short Preamble is needed for the CTS rate by
882 * checking the BSS's global flag.
883 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
884 */
885 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200886 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200887 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200888 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200889
890 for (i = 0; i < 4; i++) {
891 bool is_40, is_sgi, is_sp;
892 int phy;
893
894 if (!rates[i].count || (rates[i].idx < 0))
895 continue;
896
897 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200898 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200899
900 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200901 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
902 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200903 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200904 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
905 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200906 }
907
908 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200909 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200910 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200911 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200912
913 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
914 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
915 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
916
917 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
918 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200919 info->rates[i].Rate = rix | 0x80;
920 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
921 ah->txchainmask, info->rates[i].Rate);
922 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200923 is_40, is_sgi, is_sp);
924 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200925 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200926 continue;
927 }
928
929 /* legacy rates */
930 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
931 !(rate->flags & IEEE80211_RATE_ERP_G))
932 phy = WLAN_RC_PHY_CCK;
933 else
934 phy = WLAN_RC_PHY_OFDM;
935
936 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200937 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200938 if (rate->hw_value_short) {
939 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200940 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200941 } else {
942 is_sp = false;
943 }
944
945 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200946 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200947 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200948 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
949 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200950
Felix Fietkau493cf042011-09-14 21:24:22 +0200951 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200952 phy, rate->bitrate * 100, len, rix, is_sp);
953 }
954
955 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
956 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200957 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200958
959 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200960 if (info->flags & ATH9K_TXDESC_RTSENA)
961 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200962}
963
Felix Fietkau493cf042011-09-14 21:24:22 +0200964static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
965{
966 struct ieee80211_hdr *hdr;
967 enum ath9k_pkt_type htype;
968 __le16 fc;
969
970 hdr = (struct ieee80211_hdr *)skb->data;
971 fc = hdr->frame_control;
972
973 if (ieee80211_is_beacon(fc))
974 htype = ATH9K_PKT_TYPE_BEACON;
975 else if (ieee80211_is_probe_resp(fc))
976 htype = ATH9K_PKT_TYPE_PROBE_RESP;
977 else if (ieee80211_is_atim(fc))
978 htype = ATH9K_PKT_TYPE_ATIM;
979 else if (ieee80211_is_pspoll(fc))
980 htype = ATH9K_PKT_TYPE_PSPOLL;
981 else
982 htype = ATH9K_PKT_TYPE_NORMAL;
983
984 return htype;
985}
986
987static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
988 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200989{
990 struct ath_hw *ah = sc->sc_ah;
991 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
992 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200993 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +0200994 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +0200995
Felix Fietkau493cf042011-09-14 21:24:22 +0200996 memset(&info, 0, sizeof(info));
997 info.is_first = true;
998 info.is_last = true;
999 info.txpower = MAX_RATE_POWER;
1000 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001001
Felix Fietkau493cf042011-09-14 21:24:22 +02001002 info.flags = ATH9K_TXDESC_INTREQ;
1003 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1004 info.flags |= ATH9K_TXDESC_NOACK;
1005 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1006 info.flags |= ATH9K_TXDESC_LDPC;
1007
1008 ath_buf_set_rate(sc, bf, &info, len);
1009
1010 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1011 info.flags |= ATH9K_TXDESC_CLRDMASK;
1012
1013 if (bf->bf_state.bfs_paprd)
1014 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1015
Felix Fietkau399c6482011-09-14 21:24:17 +02001016
1017 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001018 struct sk_buff *skb = bf->bf_mpdu;
1019 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau4245d312011-09-14 21:24:27 +02001020 struct ieee80211_hdr *hdr;
1021 int padpos, padsize;
Felix Fietkau493cf042011-09-14 21:24:22 +02001022
1023 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001024 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001025 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001026 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001027 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001028
Felix Fietkau4245d312011-09-14 21:24:27 +02001029 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1030 hdr = (struct ieee80211_hdr *)skb->data;
1031 padpos = ath9k_cmn_padpos(hdr->frame_control);
1032 padsize = padpos & 3;
1033
1034 info.buf_addr[0] = bf->bf_buf_addr;
1035 info.buf_len[0] = padpos + padsize;
1036 info.buf_addr[1] = info.buf_addr[0] + padpos;
1037 info.buf_len[1] = skb->len - padpos;
1038 } else {
1039 info.buf_addr[0] = bf->bf_buf_addr;
1040 info.buf_len[0] = skb->len;
1041 }
1042
Felix Fietkau493cf042011-09-14 21:24:22 +02001043 info.pkt_len = fi->framelen;
1044 info.keyix = fi->keyix;
1045 info.keytype = fi->keytype;
1046
1047 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001048 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001049 info.aggr = AGGR_BUF_FIRST;
1050 else if (!bf->bf_next)
1051 info.aggr = AGGR_BUF_LAST;
1052 else
1053 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001054
Felix Fietkau493cf042011-09-14 21:24:22 +02001055 info.ndelim = bf->bf_state.ndelim;
1056 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001057 }
1058
Felix Fietkau493cf042011-09-14 21:24:22 +02001059 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001060 bf = bf->bf_next;
1061 }
1062}
1063
Sujithe8324352009-01-16 21:38:42 +05301064static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1065 struct ath_atx_tid *tid)
1066{
Sujithd43f30152009-01-16 21:38:53 +05301067 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301068 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001069 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301070 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001071 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301072
1073 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001074 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301075 return;
1076
1077 INIT_LIST_HEAD(&bf_q);
1078
Felix Fietkau269c44b2010-11-14 15:20:06 +01001079 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301080
1081 /*
Sujithd43f30152009-01-16 21:38:53 +05301082 * no frames picked up to be aggregated;
1083 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301084 */
1085 if (list_empty(&bf_q))
1086 break;
1087
1088 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301089 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001090 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301091
Felix Fietkau55195412011-04-17 23:28:09 +02001092 if (tid->ac->clear_ps_filter) {
1093 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001094 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1095 } else {
1096 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001097 }
1098
Sujithd43f30152009-01-16 21:38:53 +05301099 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001100 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001101 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1102 bf->bf_state.bf_type = BUF_AMPDU;
1103 } else {
1104 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301105 }
1106
Felix Fietkau493cf042011-09-14 21:24:22 +02001107 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001108 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001109 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301110 status != ATH_AGGR_BAW_CLOSED);
1111}
1112
Felix Fietkau231c3a12010-09-20 19:35:28 +02001113int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1114 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301115{
1116 struct ath_atx_tid *txtid;
1117 struct ath_node *an;
1118
1119 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301120 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001121
1122 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1123 return -EAGAIN;
1124
Sujithf83da962009-07-23 15:32:37 +05301125 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001126 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001127 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001128
Felix Fietkau2ed72222011-01-10 17:05:49 -07001129 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1130 txtid->baw_head = txtid->baw_tail = 0;
1131
Felix Fietkau231c3a12010-09-20 19:35:28 +02001132 return 0;
Sujithe8324352009-01-16 21:38:42 +05301133}
1134
Sujithf83da962009-07-23 15:32:37 +05301135void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301136{
1137 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1138 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001139 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301140
1141 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301142 return;
Sujithe8324352009-01-16 21:38:42 +05301143
1144 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301145 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301146 return;
Sujithe8324352009-01-16 21:38:42 +05301147 }
1148
Sujithe8324352009-01-16 21:38:42 +05301149 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001150 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001151
1152 /*
1153 * If frames are still being transmitted for this TID, they will be
1154 * cleaned up during tx completion. To prevent race conditions, this
1155 * TID can only be reused after all in-progress subframes have been
1156 * completed.
1157 */
1158 if (txtid->baw_head != txtid->baw_tail)
1159 txtid->state |= AGGR_CLEANUP;
1160 else
1161 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301162 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301163
Felix Fietkau90fa5392010-09-20 13:45:38 +02001164 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301165}
1166
Felix Fietkau55195412011-04-17 23:28:09 +02001167bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1168{
1169 struct ath_atx_tid *tid;
1170 struct ath_atx_ac *ac;
1171 struct ath_txq *txq;
1172 bool buffered = false;
1173 int tidno;
1174
1175 for (tidno = 0, tid = &an->tid[tidno];
1176 tidno < WME_NUM_TID; tidno++, tid++) {
1177
1178 if (!tid->sched)
1179 continue;
1180
1181 ac = tid->ac;
1182 txq = ac->txq;
1183
1184 spin_lock_bh(&txq->axq_lock);
1185
Felix Fietkau56dc6332011-08-28 00:32:22 +02001186 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +02001187 buffered = true;
1188
1189 tid->sched = false;
1190 list_del(&tid->list);
1191
1192 if (ac->sched) {
1193 ac->sched = false;
1194 list_del(&ac->list);
1195 }
1196
1197 spin_unlock_bh(&txq->axq_lock);
1198 }
1199
1200 return buffered;
1201}
1202
1203void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1204{
1205 struct ath_atx_tid *tid;
1206 struct ath_atx_ac *ac;
1207 struct ath_txq *txq;
1208 int tidno;
1209
1210 for (tidno = 0, tid = &an->tid[tidno];
1211 tidno < WME_NUM_TID; tidno++, tid++) {
1212
1213 ac = tid->ac;
1214 txq = ac->txq;
1215
1216 spin_lock_bh(&txq->axq_lock);
1217 ac->clear_ps_filter = true;
1218
Felix Fietkau56dc6332011-08-28 00:32:22 +02001219 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001220 ath_tx_queue_tid(txq, tid);
1221 ath_txq_schedule(sc, txq);
1222 }
1223
1224 spin_unlock_bh(&txq->axq_lock);
1225 }
1226}
1227
Sujithe8324352009-01-16 21:38:42 +05301228void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1229{
1230 struct ath_atx_tid *txtid;
1231 struct ath_node *an;
1232
1233 an = (struct ath_node *)sta->drv_priv;
1234
1235 if (sc->sc_flags & SC_OP_TXAGGR) {
1236 txtid = ATH_AN_2_TID(an, tid);
1237 txtid->baw_size =
1238 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1239 txtid->state |= AGGR_ADDBA_COMPLETE;
1240 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1241 ath_tx_resume_tid(sc, txtid);
1242 }
1243}
1244
Sujithe8324352009-01-16 21:38:42 +05301245/********************/
1246/* Queue Management */
1247/********************/
1248
Sujithe8324352009-01-16 21:38:42 +05301249static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1250 struct ath_txq *txq)
1251{
1252 struct ath_atx_ac *ac, *ac_tmp;
1253 struct ath_atx_tid *tid, *tid_tmp;
1254
1255 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1256 list_del(&ac->list);
1257 ac->sched = false;
1258 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1259 list_del(&tid->list);
1260 tid->sched = false;
1261 ath_tid_drain(sc, txq, tid);
1262 }
1263 }
1264}
1265
1266struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1267{
Sujithcbe61d82009-02-09 13:27:12 +05301268 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001269 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301270 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001271 static const int subtype_txq_to_hwq[] = {
1272 [WME_AC_BE] = ATH_TXQ_AC_BE,
1273 [WME_AC_BK] = ATH_TXQ_AC_BK,
1274 [WME_AC_VI] = ATH_TXQ_AC_VI,
1275 [WME_AC_VO] = ATH_TXQ_AC_VO,
1276 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001277 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301278
1279 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001280 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301281 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1282 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1283 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1284 qi.tqi_physCompBuf = 0;
1285
1286 /*
1287 * Enable interrupts only for EOL and DESC conditions.
1288 * We mark tx descriptors to receive a DESC interrupt
1289 * when a tx queue gets deep; otherwise waiting for the
1290 * EOL to reap descriptors. Note that this is done to
1291 * reduce interrupt load and this only defers reaping
1292 * descriptors, never transmitting frames. Aside from
1293 * reducing interrupts this also permits more concurrency.
1294 * The only potential downside is if the tx queue backs
1295 * up in which case the top half of the kernel may backup
1296 * due to a lack of tx descriptors.
1297 *
1298 * The UAPSD queue is an exception, since we take a desc-
1299 * based intr on the EOSP frames.
1300 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001301 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1302 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1303 TXQ_FLAG_TXERRINT_ENABLE;
1304 } else {
1305 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1306 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1307 else
1308 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1309 TXQ_FLAG_TXDESCINT_ENABLE;
1310 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001311 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1312 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301313 /*
1314 * NB: don't print a message, this happens
1315 * normally on parts with too few tx queues
1316 */
1317 return NULL;
1318 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001319 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001320 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001321 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1322 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301323 return NULL;
1324 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001325 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1326 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301327
Ben Greear60f2d1d2011-01-09 23:11:52 -08001328 txq->axq_qnum = axq_qnum;
1329 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301330 txq->axq_link = NULL;
1331 INIT_LIST_HEAD(&txq->axq_q);
1332 INIT_LIST_HEAD(&txq->axq_acq);
1333 spin_lock_init(&txq->axq_lock);
1334 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001335 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001336 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001337 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001338
1339 txq->txq_headidx = txq->txq_tailidx = 0;
1340 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1341 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301342 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001343 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301344}
1345
Sujithe8324352009-01-16 21:38:42 +05301346int ath_txq_update(struct ath_softc *sc, int qnum,
1347 struct ath9k_tx_queue_info *qinfo)
1348{
Sujithcbe61d82009-02-09 13:27:12 +05301349 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301350 int error = 0;
1351 struct ath9k_tx_queue_info qi;
1352
1353 if (qnum == sc->beacon.beaconq) {
1354 /*
1355 * XXX: for beacon queue, we just save the parameter.
1356 * It will be picked up by ath_beaconq_config when
1357 * it's necessary.
1358 */
1359 sc->beacon.beacon_qi = *qinfo;
1360 return 0;
1361 }
1362
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001363 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301364
1365 ath9k_hw_get_txq_props(ah, qnum, &qi);
1366 qi.tqi_aifs = qinfo->tqi_aifs;
1367 qi.tqi_cwmin = qinfo->tqi_cwmin;
1368 qi.tqi_cwmax = qinfo->tqi_cwmax;
1369 qi.tqi_burstTime = qinfo->tqi_burstTime;
1370 qi.tqi_readyTime = qinfo->tqi_readyTime;
1371
1372 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001373 ath_err(ath9k_hw_common(sc->sc_ah),
1374 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301375 error = -EIO;
1376 } else {
1377 ath9k_hw_resettxqueue(ah, qnum);
1378 }
1379
1380 return error;
1381}
1382
1383int ath_cabq_update(struct ath_softc *sc)
1384{
1385 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001386 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301387 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301388
1389 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1390 /*
1391 * Ensure the readytime % is within the bounds.
1392 */
Sujith17d79042009-02-09 13:27:03 +05301393 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1394 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1395 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1396 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301397
Steve Brown9814f6b2011-02-07 17:10:39 -07001398 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301399 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301400 ath_txq_update(sc, qnum, &qi);
1401
1402 return 0;
1403}
1404
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001405static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1406{
1407 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1408 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1409}
1410
Felix Fietkaufce041b2011-05-19 12:20:25 +02001411static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1412 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301413 __releases(txq->axq_lock)
1414 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301415{
1416 struct ath_buf *bf, *lastbf;
1417 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001418 struct ath_tx_status ts;
1419
1420 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301421 INIT_LIST_HEAD(&bf_head);
1422
Felix Fietkaufce041b2011-05-19 12:20:25 +02001423 while (!list_empty(list)) {
1424 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301425
Felix Fietkaufce041b2011-05-19 12:20:25 +02001426 if (bf->bf_stale) {
1427 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301428
Felix Fietkaufce041b2011-05-19 12:20:25 +02001429 ath_tx_return_buffer(sc, bf);
1430 continue;
Sujithe8324352009-01-16 21:38:42 +05301431 }
1432
1433 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001434 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001435
Sujithe8324352009-01-16 21:38:42 +05301436 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001437 if (bf_is_ampdu_not_probing(bf))
1438 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301439
Felix Fietkaufce041b2011-05-19 12:20:25 +02001440 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301441 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001442 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1443 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301444 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001445 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001446 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001447 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001448}
1449
1450/*
1451 * Drain a given TX queue (could be Beacon or Data)
1452 *
1453 * This assumes output has been stopped and
1454 * we do not need to block ath_tx_tasklet.
1455 */
1456void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1457{
1458 spin_lock_bh(&txq->axq_lock);
1459 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1460 int idx = txq->txq_tailidx;
1461
1462 while (!list_empty(&txq->txq_fifo[idx])) {
1463 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1464 retry_tx);
1465
1466 INCR(idx, ATH_TXFIFO_DEPTH);
1467 }
1468 txq->txq_tailidx = idx;
1469 }
1470
1471 txq->axq_link = NULL;
1472 txq->axq_tx_inprogress = false;
1473 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001474
1475 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001476 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1477 ath_txq_drain_pending_buffers(sc, txq);
1478
1479 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301480}
1481
Felix Fietkau080e1a22010-12-05 20:17:53 +01001482bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301483{
Sujithcbe61d82009-02-09 13:27:12 +05301484 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001485 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301486 struct ath_txq *txq;
1487 int i, npend = 0;
1488
1489 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001490 return true;
Sujith043a0402009-01-16 21:38:47 +05301491
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001492 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301493
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001494 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301495 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001496 if (!ATH_TXQ_SETUP(sc, i))
1497 continue;
1498
1499 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301500 }
1501
Felix Fietkau080e1a22010-12-05 20:17:53 +01001502 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001503 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301504
1505 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001506 if (!ATH_TXQ_SETUP(sc, i))
1507 continue;
1508
1509 /*
1510 * The caller will resume queues with ieee80211_wake_queues.
1511 * Mark the queue as not stopped to prevent ath_tx_complete
1512 * from waking the queue too early.
1513 */
1514 txq = &sc->tx.txq[i];
1515 txq->stopped = false;
1516 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301517 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001518
1519 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301520}
1521
Sujithe8324352009-01-16 21:38:42 +05301522void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1523{
1524 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1525 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1526}
1527
Ben Greear7755bad2011-01-18 17:30:00 -08001528/* For each axq_acq entry, for each tid, try to schedule packets
1529 * for transmit until ampdu_depth has reached min Q depth.
1530 */
Sujithe8324352009-01-16 21:38:42 +05301531void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1532{
Ben Greear7755bad2011-01-18 17:30:00 -08001533 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1534 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301535
Felix Fietkau236de512011-09-03 01:40:25 +02001536 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001537 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301538 return;
1539
1540 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001541 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301542
Ben Greear7755bad2011-01-18 17:30:00 -08001543 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1544 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1545 list_del(&ac->list);
1546 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301547
Ben Greear7755bad2011-01-18 17:30:00 -08001548 while (!list_empty(&ac->tid_q)) {
1549 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1550 list);
1551 list_del(&tid->list);
1552 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301553
Ben Greear7755bad2011-01-18 17:30:00 -08001554 if (tid->paused)
1555 continue;
Sujithe8324352009-01-16 21:38:42 +05301556
Ben Greear7755bad2011-01-18 17:30:00 -08001557 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301558
Ben Greear7755bad2011-01-18 17:30:00 -08001559 /*
1560 * add tid to round-robin queue if more frames
1561 * are pending for the tid
1562 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001563 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001564 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301565
Ben Greear7755bad2011-01-18 17:30:00 -08001566 if (tid == last_tid ||
1567 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1568 break;
Sujithe8324352009-01-16 21:38:42 +05301569 }
Ben Greear7755bad2011-01-18 17:30:00 -08001570
1571 if (!list_empty(&ac->tid_q)) {
1572 if (!ac->sched) {
1573 ac->sched = true;
1574 list_add_tail(&ac->list, &txq->axq_acq);
1575 }
1576 }
1577
1578 if (ac == last_ac ||
1579 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1580 return;
Sujithe8324352009-01-16 21:38:42 +05301581 }
1582}
1583
Sujithe8324352009-01-16 21:38:42 +05301584/***********/
1585/* TX, DMA */
1586/***********/
1587
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001588/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001589 * Insert a chain of ath_buf (descriptors) on a txq and
1590 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001591 */
Sujith102e0572008-10-29 10:15:16 +05301592static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001593 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001594{
Sujithcbe61d82009-02-09 13:27:12 +05301595 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001596 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001597 struct ath_buf *bf, *bf_last;
1598 bool puttxbuf = false;
1599 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301600
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001601 /*
1602 * Insert the frame on the outbound list and
1603 * pass it on to the hardware.
1604 */
1605
1606 if (list_empty(head))
1607 return;
1608
Felix Fietkaufce041b2011-05-19 12:20:25 +02001609 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001610 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001611 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001612
Joe Perches226afe62010-12-02 19:12:37 -08001613 ath_dbg(common, ATH_DBG_QUEUE,
1614 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001615
Felix Fietkaufce041b2011-05-19 12:20:25 +02001616 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1617 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001618 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001619 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001620 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001621 list_splice_tail_init(head, &txq->axq_q);
1622
Felix Fietkaufce041b2011-05-19 12:20:25 +02001623 if (txq->axq_link) {
1624 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001625 ath_dbg(common, ATH_DBG_XMIT,
1626 "link[%u] (%p)=%llx (%p)\n",
1627 txq->axq_qnum, txq->axq_link,
1628 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001629 } else if (!edma)
1630 puttxbuf = true;
1631
1632 txq->axq_link = bf_last->bf_desc;
1633 }
1634
1635 if (puttxbuf) {
1636 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1637 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1638 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1639 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1640 }
1641
1642 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001643 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001644 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001645 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001646
1647 if (!internal) {
1648 txq->axq_depth++;
1649 if (bf_is_ampdu_not_probing(bf))
1650 txq->axq_ampdu_depth++;
1651 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001652}
1653
Sujithe8324352009-01-16 21:38:42 +05301654static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001655 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301656{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001657 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001658 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001659 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301660
1661 /*
1662 * Do not queue to h/w when any of the following conditions is true:
1663 * - there are pending frames in software queue
1664 * - the TID is currently paused for ADDBA/BAR request
1665 * - seqno is not within block-ack window
1666 * - h/w queue depth exceeds low water mark
1667 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001668 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001669 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001670 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001671 /*
Sujithe8324352009-01-16 21:38:42 +05301672 * Add this frame to software queue for scheduling later
1673 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001674 */
Ben Greearbda8add2011-01-09 23:11:48 -08001675 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001676 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001677 if (!txctl->an || !txctl->an->sleeping)
1678 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301679 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001680 }
1681
Felix Fietkau44f1d262011-08-28 00:32:25 +02001682 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1683 if (!bf)
1684 return;
1685
Felix Fietkau399c6482011-09-14 21:24:17 +02001686 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001687 INIT_LIST_HEAD(&bf_head);
1688 list_add(&bf->list, &bf_head);
1689
Sujithe8324352009-01-16 21:38:42 +05301690 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001691 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301692
1693 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001694 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301695 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001696 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001697 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301698}
1699
Felix Fietkau82b873a2010-11-11 03:18:37 +01001700static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001701 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001702{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001703 struct ath_frame_info *fi = get_frame_info(skb);
1704 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301705 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001706
Felix Fietkau44f1d262011-08-28 00:32:25 +02001707 bf = fi->bf;
1708 if (!bf)
1709 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1710
1711 if (!bf)
1712 return;
1713
1714 INIT_LIST_HEAD(&bf_head);
1715 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001716 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301717
1718 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001719 if (tid)
1720 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301721
Sujithd43f30152009-01-16 21:38:53 +05301722 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001723 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001724 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301725 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001726}
1727
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001728static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1729 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301730{
1731 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001732 struct ieee80211_sta *sta = tx_info->control.sta;
1733 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001734 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001735 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001736 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001737 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301738
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001739 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301740
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001741 if (sta)
1742 an = (struct ath_node *) sta->drv_priv;
1743
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001744 memset(fi, 0, sizeof(*fi));
1745 if (hw_key)
1746 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001747 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1748 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001749 else
1750 fi->keyix = ATH9K_TXKEYIX_INVALID;
1751 fi->keytype = keytype;
1752 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301753}
1754
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301755u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1756{
1757 struct ath_hw *ah = sc->sc_ah;
1758 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301759 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1760 (curchan->channelFlags & CHANNEL_5GHZ) &&
1761 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301762 return 0x3;
1763 else
1764 return chainmask;
1765}
1766
Felix Fietkau44f1d262011-08-28 00:32:25 +02001767/*
1768 * Assign a descriptor (and sequence number if necessary,
1769 * and map buffer for DMA. Frees skb on error
1770 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001771static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001772 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001773 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001774 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301775{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001776 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001777 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001778 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001779 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001780 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001781
1782 bf = ath_tx_get_buffer(sc);
1783 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001784 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001785 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001786 }
Sujithe8324352009-01-16 21:38:42 +05301787
Sujithe8324352009-01-16 21:38:42 +05301788 ATH_TXBUF_RESET(bf);
1789
Felix Fietkaufa05f872011-08-28 00:32:24 +02001790 if (tid) {
1791 seqno = tid->seq_next;
1792 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1793 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1794 bf->bf_state.seqno = seqno;
1795 }
1796
Sujithe8324352009-01-16 21:38:42 +05301797 bf->bf_mpdu = skb;
1798
Ben Greearc1739eb32010-10-14 12:45:29 -07001799 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1800 skb->len, DMA_TO_DEVICE);
1801 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301802 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001803 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001804 ath_err(ath9k_hw_common(sc->sc_ah),
1805 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001806 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001807 goto error;
Sujithe8324352009-01-16 21:38:42 +05301808 }
1809
Felix Fietkau56dc6332011-08-28 00:32:22 +02001810 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001811
1812 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001813
1814error:
1815 dev_kfree_skb_any(skb);
1816 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001817}
1818
1819/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001820static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001821 struct ath_tx_control *txctl)
1822{
Felix Fietkau04caf862010-11-14 15:20:12 +01001823 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1824 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001825 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001826 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001827 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301828
Sujithe8324352009-01-16 21:38:42 +05301829 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301830 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1831 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001832 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1833 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001834 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001835
Felix Fietkau066dae92010-11-07 14:59:39 +01001836 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001837 }
1838
1839 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001840 /*
1841 * Try aggregation if it's a unicast data frame
1842 * and the destination is HT capable.
1843 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001844 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301845 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001846 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1847 if (!bf)
1848 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001849
Felix Fietkau82b873a2010-11-11 03:18:37 +01001850 bf->bf_state.bfs_paprd = txctl->paprd;
1851
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301852 if (txctl->paprd)
1853 bf->bf_state.bfs_paprd_timestamp = jiffies;
1854
Felix Fietkau44f1d262011-08-28 00:32:25 +02001855 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301856 }
1857
Felix Fietkaufa05f872011-08-28 00:32:24 +02001858out:
Sujithe8324352009-01-16 21:38:42 +05301859 spin_unlock_bh(&txctl->txq->axq_lock);
1860}
1861
1862/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001863int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301864 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001865{
Felix Fietkau28d16702010-11-14 15:20:10 +01001866 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1867 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001868 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001869 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001870 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001871 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001872 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001873 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001874 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001875
Ben Greeara9927ba2010-12-06 21:13:49 -08001876 /* NOTE: sta can be NULL according to net/mac80211.h */
1877 if (sta)
1878 txctl->an = (struct ath_node *)sta->drv_priv;
1879
Felix Fietkau04caf862010-11-14 15:20:12 +01001880 if (info->control.hw_key)
1881 frmlen += info->control.hw_key->icv_len;
1882
Felix Fietkau28d16702010-11-14 15:20:10 +01001883 /*
1884 * As a temporary workaround, assign seq# here; this will likely need
1885 * to be cleaned up to work better with Beacon transmission and virtual
1886 * BSSes.
1887 */
1888 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1889 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1890 sc->tx.seq_no += 0x10;
1891 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1892 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1893 }
1894
Felix Fietkau4245d312011-09-14 21:24:27 +02001895 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
1896 /* Add the padding after the header if this is not already done */
1897 padpos = ath9k_cmn_padpos(hdr->frame_control);
1898 padsize = padpos & 3;
1899 if (padsize && skb->len > padpos) {
1900 if (skb_headroom(skb) < padsize)
1901 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001902
Felix Fietkau4245d312011-09-14 21:24:27 +02001903 skb_push(skb, padsize);
1904 memmove(skb->data, skb->data + padsize, padpos);
1905 }
Felix Fietkau28d16702010-11-14 15:20:10 +01001906 }
1907
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001908 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1909 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1910 !ieee80211_is_data(hdr->frame_control))
1911 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1912
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001913 setup_frame_info(hw, skb, frmlen);
1914
1915 /*
1916 * At this point, the vif, hw_key and sta pointers in the tx control
1917 * info are no longer valid (overwritten by the ath_frame_info data.
1918 */
1919
Felix Fietkau066dae92010-11-07 14:59:39 +01001920 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001921 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001922 if (txq == sc->tx.txq_map[q] &&
1923 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001924 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001925 txq->stopped = 1;
1926 }
1927 spin_unlock_bh(&txq->axq_lock);
1928
Felix Fietkau44f1d262011-08-28 00:32:25 +02001929 ath_tx_start_dma(sc, skb, txctl);
1930 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001931}
1932
Sujithe8324352009-01-16 21:38:42 +05301933/*****************/
1934/* TX Completion */
1935/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001936
Sujithe8324352009-01-16 21:38:42 +05301937static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301938 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001939{
Sujithe8324352009-01-16 21:38:42 +05301940 struct ieee80211_hw *hw = sc->hw;
1941 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001942 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001943 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001944 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301945
Joe Perches226afe62010-12-02 19:12:37 -08001946 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301947
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301948 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301949 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301950
Felix Fietkau55797b12011-09-14 21:24:16 +02001951 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301952 /* Frame was ACKed */
1953 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301954
Felix Fietkau4245d312011-09-14 21:24:27 +02001955 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
1956 padpos = ath9k_cmn_padpos(hdr->frame_control);
1957 padsize = padpos & 3;
1958 if (padsize && skb->len>padpos+padsize) {
1959 /*
1960 * Remove MAC header padding before giving the frame back to
1961 * mac80211.
1962 */
1963 memmove(skb->data + padsize, skb->data, padpos);
1964 skb_pull(skb, padsize);
1965 }
Sujithe8324352009-01-16 21:38:42 +05301966 }
1967
Sujith1b04b932010-01-08 10:36:05 +05301968 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1969 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001970 ath_dbg(common, ATH_DBG_PS,
1971 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301972 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1973 PS_WAIT_FOR_CAB |
1974 PS_WAIT_FOR_PSPOLL_DATA |
1975 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001976 }
1977
Felix Fietkau7545daf2011-01-24 19:23:16 +01001978 q = skb_get_queue_mapping(skb);
1979 if (txq == sc->tx.txq_map[q]) {
1980 spin_lock_bh(&txq->axq_lock);
1981 if (WARN_ON(--txq->pending_frames < 0))
1982 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001983
Felix Fietkau7545daf2011-01-24 19:23:16 +01001984 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1985 ieee80211_wake_queue(sc->hw, q);
1986 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001987 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001988 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001989 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001990
1991 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301992}
1993
1994static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001995 struct ath_txq *txq, struct list_head *bf_q,
1996 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301997{
1998 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001999 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05302000 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302001 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302002
Sujithe8324352009-01-16 21:38:42 +05302003 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302004 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05302005
Felix Fietkau55797b12011-09-14 21:24:16 +02002006 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302007 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302008
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002009 if (ts->ts_status & ATH9K_TXERR_FILT)
2010 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2011
Ben Greearc1739eb32010-10-14 12:45:29 -07002012 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002013 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002014
2015 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302016 if (time_after(jiffies,
2017 bf->bf_state.bfs_paprd_timestamp +
2018 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002019 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002020 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002021 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002022 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002023 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302024 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002025 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002026 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2027 * accidentally reference it later.
2028 */
2029 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302030
2031 /*
2032 * Return the list of ath_buf of this mpdu to free queue
2033 */
2034 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2035 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2036 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2037}
2038
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002039static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2040 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002041 int txok)
Sujithc4288392008-11-18 09:09:30 +05302042{
Sujitha22be222009-03-30 15:28:36 +05302043 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302044 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302045 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002046 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002047 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302048 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302049
Sujith95e4acb2009-03-13 08:56:09 +05302050 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002051 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302052
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002053 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302054 WARN_ON(tx_rateindex >= hw->max_rates);
2055
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002056 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002057 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302058
Felix Fietkaub572d032010-11-14 15:20:07 +01002059 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002060
Felix Fietkaub572d032010-11-14 15:20:07 +01002061 tx_info->status.ampdu_len = nframes;
2062 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002063 }
2064
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002065 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002066 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002067 /*
2068 * If an underrun error is seen assume it as an excessive
2069 * retry only if max frame trigger level has been reached
2070 * (2 KB for single stream, and 4 KB for dual stream).
2071 * Adjust the long retry as if the frame was tried
2072 * hw->max_rate_tries times to affect how rate control updates
2073 * PER for the failed rate.
2074 * In case of congestion on the bus penalizing this type of
2075 * underruns should help hardware actually transmit new frames
2076 * successfully by eventually preferring slower rates.
2077 * This itself should also alleviate congestion on the bus.
2078 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002079 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2080 ATH9K_TX_DELIM_UNDERRUN)) &&
2081 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002082 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002083 tx_info->status.rates[tx_rateindex].count =
2084 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302085 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302086
Felix Fietkau545750d2009-11-23 22:21:01 +01002087 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302088 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002089 tx_info->status.rates[i].idx = -1;
2090 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302091
Felix Fietkau78c46532010-06-25 01:26:16 +02002092 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302093}
2094
Felix Fietkaufce041b2011-05-19 12:20:25 +02002095static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2096 struct ath_tx_status *ts, struct ath_buf *bf,
2097 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302098 __releases(txq->axq_lock)
2099 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002100{
2101 int txok;
2102
2103 txq->axq_depth--;
2104 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2105 txq->axq_tx_inprogress = false;
2106 if (bf_is_ampdu_not_probing(bf))
2107 txq->axq_ampdu_depth--;
2108
2109 spin_unlock_bh(&txq->axq_lock);
2110
2111 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002112 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002113 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2114 } else
2115 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2116
2117 spin_lock_bh(&txq->axq_lock);
2118
2119 if (sc->sc_flags & SC_OP_TXAGGR)
2120 ath_txq_schedule(sc, txq);
2121}
2122
Sujithc4288392008-11-18 09:09:30 +05302123static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124{
Sujithcbe61d82009-02-09 13:27:12 +05302125 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002126 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2128 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302129 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002130 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 int status;
2132
Joe Perches226afe62010-12-02 19:12:37 -08002133 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2134 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2135 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136
Felix Fietkaufce041b2011-05-19 12:20:25 +02002137 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002139 if (work_pending(&sc->hw_reset_work))
2140 break;
2141
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142 if (list_empty(&txq->axq_q)) {
2143 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002144 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002145 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002146 break;
2147 }
2148 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2149
2150 /*
2151 * There is a race condition that a BH gets scheduled
2152 * after sw writes TxE and before hw re-load the last
2153 * descriptor to get the newly chained one.
2154 * Software must keep the last DONE descriptor as a
2155 * holding descriptor - software does so by marking
2156 * it with the STALE flag.
2157 */
2158 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302159 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002160 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002161 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002163
2164 bf = list_entry(bf_held->list.next, struct ath_buf,
2165 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 }
2167
2168 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302169 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170
Felix Fietkau29bffa92010-03-29 20:14:23 -07002171 memset(&ts, 0, sizeof(ts));
2172 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002173 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002175
Ben Greear2dac4fb2011-01-09 23:11:45 -08002176 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177
2178 /*
2179 * Remove ath_buf's of the same transmit unit from txq,
2180 * however leave the last descriptor back as the holding
2181 * descriptor for hw.
2182 */
Sujitha119cc42009-03-30 15:28:38 +05302183 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002184 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002185 if (!list_is_singular(&lastbf->list))
2186 list_cut_position(&bf_head,
2187 &txq->axq_q, lastbf->list.prev);
2188
Felix Fietkaufce041b2011-05-19 12:20:25 +02002189 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002190 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002191 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 }
Johannes Berge6a98542008-10-21 12:40:02 +02002193
Felix Fietkaufce041b2011-05-19 12:20:25 +02002194 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002195 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002196 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002197}
2198
Sujith305fe472009-07-23 15:32:29 +05302199static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002200{
2201 struct ath_softc *sc = container_of(work, struct ath_softc,
2202 tx_complete_work.work);
2203 struct ath_txq *txq;
2204 int i;
2205 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002206#ifdef CONFIG_ATH9K_DEBUGFS
2207 sc->tx_complete_poll_work_seen++;
2208#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002209
2210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2211 if (ATH_TXQ_SETUP(sc, i)) {
2212 txq = &sc->tx.txq[i];
2213 spin_lock_bh(&txq->axq_lock);
2214 if (txq->axq_depth) {
2215 if (txq->axq_tx_inprogress) {
2216 needreset = true;
2217 spin_unlock_bh(&txq->axq_lock);
2218 break;
2219 } else {
2220 txq->axq_tx_inprogress = true;
2221 }
2222 }
2223 spin_unlock_bh(&txq->axq_lock);
2224 }
2225
2226 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002227 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2228 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002229 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002230 }
2231
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002232 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002233 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2234}
2235
2236
Sujithe8324352009-01-16 21:38:42 +05302237
2238void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002239{
Sujithe8324352009-01-16 21:38:42 +05302240 int i;
2241 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002242
Sujithe8324352009-01-16 21:38:42 +05302243 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002244
2245 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302246 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2247 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002248 }
2249}
2250
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251void ath_tx_edma_tasklet(struct ath_softc *sc)
2252{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002253 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002254 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2255 struct ath_hw *ah = sc->sc_ah;
2256 struct ath_txq *txq;
2257 struct ath_buf *bf, *lastbf;
2258 struct list_head bf_head;
2259 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002260
2261 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002262 if (work_pending(&sc->hw_reset_work))
2263 break;
2264
Felix Fietkaufce041b2011-05-19 12:20:25 +02002265 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002266 if (status == -EINPROGRESS)
2267 break;
2268 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002269 ath_dbg(common, ATH_DBG_XMIT,
2270 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002271 break;
2272 }
2273
2274 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002275 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002276 continue;
2277
Felix Fietkaufce041b2011-05-19 12:20:25 +02002278 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279
2280 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002281
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2283 spin_unlock_bh(&txq->axq_lock);
2284 return;
2285 }
2286
2287 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2288 struct ath_buf, list);
2289 lastbf = bf->bf_lastbf;
2290
2291 INIT_LIST_HEAD(&bf_head);
2292 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2293 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002294
Felix Fietkaufce041b2011-05-19 12:20:25 +02002295 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2296 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002297
Felix Fietkaufce041b2011-05-19 12:20:25 +02002298 if (!list_empty(&txq->axq_q)) {
2299 struct list_head bf_q;
2300
2301 INIT_LIST_HEAD(&bf_q);
2302 txq->axq_link = NULL;
2303 list_splice_tail_init(&txq->axq_q, &bf_q);
2304 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2305 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002306 }
2307
Felix Fietkaufce041b2011-05-19 12:20:25 +02002308 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002309 spin_unlock_bh(&txq->axq_lock);
2310 }
2311}
2312
Sujithe8324352009-01-16 21:38:42 +05302313/*****************/
2314/* Init, Cleanup */
2315/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002317static int ath_txstatus_setup(struct ath_softc *sc, int size)
2318{
2319 struct ath_descdma *dd = &sc->txsdma;
2320 u8 txs_len = sc->sc_ah->caps.txs_len;
2321
2322 dd->dd_desc_len = size * txs_len;
2323 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2324 &dd->dd_desc_paddr, GFP_KERNEL);
2325 if (!dd->dd_desc)
2326 return -ENOMEM;
2327
2328 return 0;
2329}
2330
2331static int ath_tx_edma_init(struct ath_softc *sc)
2332{
2333 int err;
2334
2335 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2336 if (!err)
2337 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2338 sc->txsdma.dd_desc_paddr,
2339 ATH_TXSTATUS_RING_SIZE);
2340
2341 return err;
2342}
2343
2344static void ath_tx_edma_cleanup(struct ath_softc *sc)
2345{
2346 struct ath_descdma *dd = &sc->txsdma;
2347
2348 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2349 dd->dd_desc_paddr);
2350}
2351
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352int ath_tx_init(struct ath_softc *sc, int nbufs)
2353{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002354 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355 int error = 0;
2356
Sujith797fe5cb2009-03-30 15:28:45 +05302357 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002358
Sujith797fe5cb2009-03-30 15:28:45 +05302359 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002360 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302361 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002362 ath_err(common,
2363 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302364 goto err;
2365 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujith797fe5cb2009-03-30 15:28:45 +05302367 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002368 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302369 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002370 ath_err(common,
2371 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302372 goto err;
2373 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002375 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2376
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002377 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2378 error = ath_tx_edma_init(sc);
2379 if (error)
2380 goto err;
2381 }
2382
Sujith797fe5cb2009-03-30 15:28:45 +05302383err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384 if (error != 0)
2385 ath_tx_cleanup(sc);
2386
2387 return error;
2388}
2389
Sujith797fe5cb2009-03-30 15:28:45 +05302390void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391{
Sujithb77f4832008-12-07 21:44:03 +05302392 if (sc->beacon.bdma.dd_desc_len != 0)
2393 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394
Sujithb77f4832008-12-07 21:44:03 +05302395 if (sc->tx.txdma.dd_desc_len != 0)
2396 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002397
2398 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2399 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002400}
2401
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002402void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2403{
Sujithc5170162008-10-29 10:13:59 +05302404 struct ath_atx_tid *tid;
2405 struct ath_atx_ac *ac;
2406 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407
Sujith8ee5afb2008-12-07 21:43:36 +05302408 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302409 tidno < WME_NUM_TID;
2410 tidno++, tid++) {
2411 tid->an = an;
2412 tid->tidno = tidno;
2413 tid->seq_start = tid->seq_next = 0;
2414 tid->baw_size = WME_MAX_BA;
2415 tid->baw_head = tid->baw_tail = 0;
2416 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302417 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302418 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002419 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302420 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302421 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302422 tid->state &= ~AGGR_ADDBA_COMPLETE;
2423 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302424 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425
Sujith8ee5afb2008-12-07 21:43:36 +05302426 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302427 acno < WME_NUM_AC; acno++, ac++) {
2428 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002429 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302430 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431 }
2432}
2433
Sujithb5aa9bf2008-10-29 10:13:31 +05302434void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435{
Felix Fietkau2b409942010-07-07 19:42:08 +02002436 struct ath_atx_ac *ac;
2437 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002438 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002439 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302440
Felix Fietkau2b409942010-07-07 19:42:08 +02002441 for (tidno = 0, tid = &an->tid[tidno];
2442 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443
Felix Fietkau2b409942010-07-07 19:42:08 +02002444 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002445 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446
Felix Fietkau2b409942010-07-07 19:42:08 +02002447 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002448
Felix Fietkau2b409942010-07-07 19:42:08 +02002449 if (tid->sched) {
2450 list_del(&tid->list);
2451 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002452 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002453
2454 if (ac->sched) {
2455 list_del(&ac->list);
2456 tid->ac->sched = false;
2457 }
2458
2459 ath_tid_drain(sc, txq, tid);
2460 tid->state &= ~AGGR_ADDBA_COMPLETE;
2461 tid->state &= ~AGGR_CLEANUP;
2462
2463 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002464 }
2465}