blob: 9e65c3198ca7b9272f323fc21cb1d50fa203ac74 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500182 if (tid->baw_head == tid->baw_tail) {
183 tid->state &= ~AGGR_ADDBA_COMPLETE;
184 tid->state &= ~AGGR_CLEANUP;
185 }
186
Sujithe8324352009-01-16 21:38:42 +0530187 spin_unlock_bh(&txq->axq_lock);
188}
189
190static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
191 int seqno)
192{
193 int index, cindex;
194
195 index = ATH_BA_INDEX(tid->seq_start, seqno);
196 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
197
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200198 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530199
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200200 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530201 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
202 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
203 }
204}
205
206static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100207 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530208{
209 int index, cindex;
210
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100211 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530212 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200213 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530214
215 if (index >= ((tid->baw_tail - tid->baw_head) &
216 (ATH_TID_MAX_BUFS - 1))) {
217 tid->baw_tail = cindex;
218 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
219 }
220}
221
222/*
223 * TODO: For frame(s) that are in the retry state, we will reuse the
224 * sequence number(s) without setting the retry bit. The
225 * alternative is to give up on these and BAR the receiver's window
226 * forward.
227 */
228static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
229 struct ath_atx_tid *tid)
230
231{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200232 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530233 struct ath_buf *bf;
234 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700235 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700237
238 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530239 INIT_LIST_HEAD(&bf_head);
240
Felix Fietkau56dc6332011-08-28 00:32:22 +0200241 while ((skb = __skb_dequeue(&tid->buf_q))) {
242 fi = get_frame_info(skb);
243 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530244
Felix Fietkau44f1d262011-08-28 00:32:25 +0200245 if (!bf) {
246 spin_unlock(&txq->axq_lock);
247 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
248 spin_lock(&txq->axq_lock);
249 continue;
250 }
251
Felix Fietkau56dc6332011-08-28 00:32:22 +0200252 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530253
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100254 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200255 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530256
257 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700258 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530259 spin_lock(&txq->axq_lock);
260 }
261
262 tid->seq_next = tid->seq_start;
263 tid->baw_tail = tid->baw_head;
264}
265
Sujithfec247c2009-07-27 12:08:16 +0530266static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100267 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530268{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200270 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530271 struct ieee80211_hdr *hdr;
272
Sujithfec247c2009-07-27 12:08:16 +0530273 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100274 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100275 return;
Sujithe8324352009-01-16 21:38:42 +0530276
Sujithe8324352009-01-16 21:38:42 +0530277 hdr = (struct ieee80211_hdr *)skb->data;
278 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200279 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
280 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530281}
282
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200283static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
284{
285 struct ath_buf *bf = NULL;
286
287 spin_lock_bh(&sc->tx.txbuflock);
288
289 if (unlikely(list_empty(&sc->tx.txbuf))) {
290 spin_unlock_bh(&sc->tx.txbuflock);
291 return NULL;
292 }
293
294 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
295 list_del(&bf->list);
296
297 spin_unlock_bh(&sc->tx.txbuflock);
298
299 return bf;
300}
301
302static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
303{
304 spin_lock_bh(&sc->tx.txbuflock);
305 list_add_tail(&bf->list, &sc->tx.txbuf);
306 spin_unlock_bh(&sc->tx.txbuflock);
307}
308
Sujithd43f30152009-01-16 21:38:53 +0530309static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
310{
311 struct ath_buf *tbf;
312
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200313 tbf = ath_tx_get_buffer(sc);
314 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530315 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530316
317 ATH_TXBUF_RESET(tbf);
318
319 tbf->bf_mpdu = bf->bf_mpdu;
320 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400321 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530322 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530323
324 return tbf;
325}
326
Felix Fietkaub572d032010-11-14 15:20:07 +0100327static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
328 struct ath_tx_status *ts, int txok,
329 int *nframes, int *nbad)
330{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100331 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100332 u16 seq_st = 0;
333 u32 ba[WME_BA_BMP_SIZE >> 5];
334 int ba_index;
335 int isaggr = 0;
336
337 *nbad = 0;
338 *nframes = 0;
339
Felix Fietkaub572d032010-11-14 15:20:07 +0100340 isaggr = bf_isaggr(bf);
341 if (isaggr) {
342 seq_st = ts->ts_seqnum;
343 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
344 }
345
346 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100347 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200348 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100349
350 (*nframes)++;
351 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
352 (*nbad)++;
353
354 bf = bf->bf_next;
355 }
356}
357
358
Sujithd43f30152009-01-16 21:38:53 +0530359static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
360 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100361 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530362{
363 struct ath_node *an = NULL;
364 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530365 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100366 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530367 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530369 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530370 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200371 struct list_head bf_head;
372 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530373 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530374 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530375 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
376 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200377 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100378 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200379 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100380 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200381 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Sujithe8324352009-01-16 21:38:42 +0530382
Sujitha22be222009-03-30 15:28:36 +0530383 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530384 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530385
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800386 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800387
Felix Fietkau78c46532010-06-25 01:26:16 +0200388 memcpy(rates, tx_info->control.rates, sizeof(rates));
389
Sujith1286ec62009-01-27 13:30:37 +0530390 rcu_read_lock();
391
Ben Greear686b9cb2010-09-23 09:44:36 -0700392 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530393 if (!sta) {
394 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200395
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 INIT_LIST_HEAD(&bf_head);
397 while (bf) {
398 bf_next = bf->bf_next;
399
Felix Fietkaufce041b2011-05-19 12:20:25 +0200400 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200401 list_move_tail(&bf->list, &bf_head);
402
Felix Fietkau31e79a52010-07-12 23:16:34 +0200403 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
404 0, 0);
405
406 bf = bf_next;
407 }
Sujith1286ec62009-01-27 13:30:37 +0530408 return;
Sujithe8324352009-01-16 21:38:42 +0530409 }
410
Sujith1286ec62009-01-27 13:30:37 +0530411 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100412 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
413 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530414
Felix Fietkaub11b1602010-07-11 12:48:44 +0200415 /*
416 * The hardware occasionally sends a tx status for the wrong TID.
417 * In this case, the BA status cannot be considered valid and all
418 * subframes need to be retransmitted
419 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100420 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200421 txok = false;
422
Sujithe8324352009-01-16 21:38:42 +0530423 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530424 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530425
Sujithd43f30152009-01-16 21:38:53 +0530426 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700427 if (ts->ts_flags & ATH9K_TX_BA) {
428 seq_st = ts->ts_seqnum;
429 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530430 } else {
Sujithd43f30152009-01-16 21:38:53 +0530431 /*
432 * AR5416 can become deaf/mute when BA
433 * issue happens. Chip needs to be reset.
434 * But AP code may have sychronization issues
435 * when perform internal reset in this routine.
436 * Only enable reset in STA mode for now.
437 */
Sujith2660b812009-02-09 13:27:26 +0530438 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530439 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530440 }
441 }
442
Felix Fietkau56dc6332011-08-28 00:32:22 +0200443 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530444
Felix Fietkaub572d032010-11-14 15:20:07 +0100445 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530446 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200447 u16 seqno = bf->bf_state.seqno;
448
Felix Fietkauf0b82202011-01-15 14:30:15 +0100449 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530450 bf_next = bf->bf_next;
451
Felix Fietkau78c46532010-06-25 01:26:16 +0200452 skb = bf->bf_mpdu;
453 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100454 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200455
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200456 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530457 /* transmit completion, subframe is
458 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530459 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530460 } else if (!isaggr && txok) {
461 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530462 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530463 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200464 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530465 /*
466 * cleanup in progress, just fail
467 * the un-acked sub-frames
468 */
469 txfail = 1;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200470 } else if (flush) {
471 txpending = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200472 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
Felix Fietkau26a64252011-10-07 02:28:14 +0200473 if (txok || !an->sleeping)
Felix Fietkau55195412011-04-17 23:28:09 +0200474 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
475
Felix Fietkau55195412011-04-17 23:28:09 +0200476 txpending = 1;
477 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200478 txfail = 1;
479 sendbar = 1;
480 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530481 }
482 }
483
Felix Fietkaufce041b2011-05-19 12:20:25 +0200484 /*
485 * Make sure the last desc is reclaimed if it
486 * not a holding desc.
487 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200488 INIT_LIST_HEAD(&bf_head);
489 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
490 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530491 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530492
Felix Fietkau90fa5392010-09-20 13:45:38 +0200493 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530494 /*
495 * complete the acked-ones/xretried ones; update
496 * block-ack window
497 */
498 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200499 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530500 spin_unlock_bh(&txq->axq_lock);
501
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530502 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200503 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200504 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530505 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530506 }
507
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700508 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
509 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530510 } else {
Sujithd43f30152009-01-16 21:38:53 +0530511 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
513 if (bf->bf_next == NULL && bf_last->bf_stale) {
514 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530515
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400516 tbf = ath_clone_txbuf(sc, bf_last);
517 /*
518 * Update tx baw and complete the
519 * frame with failed status if we
520 * run out of tx buf.
521 */
522 if (!tbf) {
523 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200524 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400525 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400526
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400527 ath_tx_complete_buf(sc, bf, txq,
528 &bf_head,
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200529 ts, 0,
530 !flush);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400531 break;
532 }
533
Felix Fietkau56dc6332011-08-28 00:32:22 +0200534 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400535 }
Sujithe8324352009-01-16 21:38:42 +0530536 }
537
538 /*
539 * Put this buffer to the temporary pending
540 * queue to retain ordering
541 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200542 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530543 }
544
545 bf = bf_next;
546 }
547
Felix Fietkau4cee7862010-07-23 03:53:16 +0200548 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200549 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200550 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200551 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200552
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 spin_lock_bh(&txq->axq_lock);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200554 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200555 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600556 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200557
558 if (ts->ts_status & ATH9K_TXERR_FILT)
559 tid->ac->clear_ps_filter = true;
560 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200561 spin_unlock_bh(&txq->axq_lock);
562 }
563
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500564 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200565 ath_tx_flush_tid(sc, tid);
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Felix Fietkau030d6292011-10-07 02:28:13 +0200569 if (needreset) {
570 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200571 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200572 }
Sujithe8324352009-01-16 21:38:42 +0530573}
574
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530575static bool ath_lookup_legacy(struct ath_buf *bf)
576{
577 struct sk_buff *skb;
578 struct ieee80211_tx_info *tx_info;
579 struct ieee80211_tx_rate *rates;
580 int i;
581
582 skb = bf->bf_mpdu;
583 tx_info = IEEE80211_SKB_CB(skb);
584 rates = tx_info->control.rates;
585
Felix Fietkau059ee092011-08-27 10:25:27 +0200586 for (i = 0; i < 4; i++) {
587 if (!rates[i].count || rates[i].idx < 0)
588 break;
589
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530590 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
591 return true;
592 }
593
594 return false;
595}
596
Sujithe8324352009-01-16 21:38:42 +0530597static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
598 struct ath_atx_tid *tid)
599{
Sujithe8324352009-01-16 21:38:42 +0530600 struct sk_buff *skb;
601 struct ieee80211_tx_info *tx_info;
602 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530603 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530604 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530605 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530606 int i;
607
Sujitha22be222009-03-30 15:28:36 +0530608 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530609 tx_info = IEEE80211_SKB_CB(skb);
610 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530611
612 /*
613 * Find the lowest frame length among the rate series that will have a
614 * 4ms transmit duration.
615 * TODO - TXOP limit needs to be considered.
616 */
617 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
618
619 for (i = 0; i < 4; i++) {
620 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100621 int modeidx;
622 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530623 legacy = 1;
624 break;
625 }
626
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200627 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100628 modeidx = MCS_HT40;
629 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200630 modeidx = MCS_HT20;
631
632 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
633 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100634
635 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530636 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530637 }
638 }
639
640 /*
641 * limit aggregate size by the minimum rate if rate selected is
642 * not a probe rate, if rate selected is a probe rate then
643 * avoid aggregation of this packet.
644 */
645 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
646 return 0;
647
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530648 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
649 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
650 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530651 aggr_limit = min((max_4ms_framelen * 3) / 8,
652 (u32)ATH_AMPDU_LIMIT_MAX);
653 else
654 aggr_limit = min(max_4ms_framelen,
655 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530656
657 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300658 * h/w can accept aggregates up to 16 bit lengths (65535).
659 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530660 * as zero. Ignore 65536 since we are constrained by hw.
661 */
Sujith4ef70842009-07-23 15:32:41 +0530662 if (tid->an->maxampdu)
663 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530664
665 return aggr_limit;
666}
667
668/*
Sujithd43f30152009-01-16 21:38:53 +0530669 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530670 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530671 */
672static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530673 struct ath_buf *bf, u16 frmlen,
674 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530675{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530676#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530677 struct sk_buff *skb = bf->bf_mpdu;
678 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530679 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530680 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100681 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200682 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100683 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530684
685 /* Select standard number of delimiters based on frame length alone */
686 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
687
688 /*
689 * If encryption enabled, hardware requires some more padding between
690 * subframes.
691 * TODO - this could be improved to be dependent on the rate.
692 * The hardware can keep up at lower rates, but not higher rates
693 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530694 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
695 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530696 ndelim += ATH_AGGR_ENCRYPTDELIM;
697
698 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530699 * Add delimiter when using RTS/CTS with aggregation
700 * and non enterprise AR9003 card
701 */
Felix Fietkau34597312011-08-29 18:57:54 +0200702 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
703 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530704 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
705
706 /*
Sujithe8324352009-01-16 21:38:42 +0530707 * Convert desired mpdu density from microeconds to bytes based
708 * on highest rate in rate series (i.e. first rate) to determine
709 * required minimum length for subframe. Take into account
710 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530711 *
Sujithe8324352009-01-16 21:38:42 +0530712 * If there is no mpdu density restriction, no further calculation
713 * is needed.
714 */
Sujith4ef70842009-07-23 15:32:41 +0530715
716 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530717 return ndelim;
718
719 rix = tx_info->control.rates[0].idx;
720 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530721 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
722 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
723
724 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530725 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530726 else
Sujith4ef70842009-07-23 15:32:41 +0530727 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530728
729 if (nsymbols == 0)
730 nsymbols = 1;
731
Felix Fietkauc6663872010-04-19 19:57:33 +0200732 streams = HT_RC_2_STREAMS(rix);
733 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530734 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
735
Sujithe8324352009-01-16 21:38:42 +0530736 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530737 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
738 ndelim = max(mindelim, ndelim);
739 }
740
741 return ndelim;
742}
743
744static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530745 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530746 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100747 struct list_head *bf_q,
748 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530749{
750#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200751 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530752 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530753 u16 aggr_limit = 0, al = 0, bpad = 0,
754 al_delta, h_baw = tid->baw_size / 2;
755 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200756 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100757 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200758 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200759 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530760
761 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200762 skb = skb_peek(&tid->buf_q);
763 fi = get_frame_info(skb);
764 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200765 if (!fi->bf)
766 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200767
Felix Fietkau44f1d262011-08-28 00:32:25 +0200768 if (!bf)
769 continue;
770
Felix Fietkau399c6482011-09-14 21:24:17 +0200771 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200772 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200773 if (!bf_first)
774 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530775
Sujithd43f30152009-01-16 21:38:53 +0530776 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200777 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530778 status = ATH_AGGR_BAW_CLOSED;
779 break;
780 }
781
782 if (!rl) {
783 aggr_limit = ath_lookup_rate(sc, bf, tid);
784 rl = 1;
785 }
786
Sujithd43f30152009-01-16 21:38:53 +0530787 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100788 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530789
Sujithd43f30152009-01-16 21:38:53 +0530790 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530791 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
792 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530793 status = ATH_AGGR_LIMITED;
794 break;
795 }
796
Felix Fietkau0299a502010-10-21 02:47:24 +0200797 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200798 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200799 break;
800
Sujithd43f30152009-01-16 21:38:53 +0530801 /* do not exceed subframe limit */
802 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530803 status = ATH_AGGR_LIMITED;
804 break;
805 }
806
Sujithd43f30152009-01-16 21:38:53 +0530807 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530808 al += bpad + al_delta;
809
810 /*
811 * Get the delimiters needed to meet the MPDU
812 * density for this node.
813 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530814 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
815 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530816 bpad = PADBYTES(al_delta) + (ndelim << 2);
817
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530818 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530819 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530820
Sujithd43f30152009-01-16 21:38:53 +0530821 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100822 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200823 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200824 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200825
826 __skb_unlink(skb, &tid->buf_q);
827 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200828 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530829 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200830
Sujithe8324352009-01-16 21:38:42 +0530831 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530832
Felix Fietkau56dc6332011-08-28 00:32:22 +0200833 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530834
Felix Fietkau269c44b2010-11-14 15:20:06 +0100835 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530836
Sujithe8324352009-01-16 21:38:42 +0530837 return status;
838#undef PADBYTES
839}
840
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200841/*
842 * rix - rate index
843 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
844 * width - 0 for 20 MHz, 1 for 40 MHz
845 * half_gi - to use 4us v/s 3.6 us for symbol time
846 */
847static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
848 int width, int half_gi, bool shortPreamble)
849{
850 u32 nbits, nsymbits, duration, nsymbols;
851 int streams;
852
853 /* find number of symbols: PLCP + data */
854 streams = HT_RC_2_STREAMS(rix);
855 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
856 nsymbits = bits_per_symbol[rix % 8][width] * streams;
857 nsymbols = (nbits + nsymbits - 1) / nsymbits;
858
859 if (!half_gi)
860 duration = SYMBOL_TIME(nsymbols);
861 else
862 duration = SYMBOL_TIME_HALFGI(nsymbols);
863
864 /* addup duration for legacy/ht training and signal fields */
865 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
866
867 return duration;
868}
869
Felix Fietkau493cf042011-09-14 21:24:22 +0200870static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
871 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200872{
873 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200874 struct sk_buff *skb;
875 struct ieee80211_tx_info *tx_info;
876 struct ieee80211_tx_rate *rates;
877 const struct ieee80211_rate *rate;
878 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200879 int i;
880 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200881
882 skb = bf->bf_mpdu;
883 tx_info = IEEE80211_SKB_CB(skb);
884 rates = tx_info->control.rates;
885 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200886
887 /* set dur_update_en for l-sig computation except for PS-Poll frames */
888 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200889
890 /*
891 * We check if Short Preamble is needed for the CTS rate by
892 * checking the BSS's global flag.
893 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
894 */
895 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200896 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200897 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200898 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200899
900 for (i = 0; i < 4; i++) {
901 bool is_40, is_sgi, is_sp;
902 int phy;
903
904 if (!rates[i].count || (rates[i].idx < 0))
905 continue;
906
907 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200908 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200909
910 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200911 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
912 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200913 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200914 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
915 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200916 }
917
918 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200919 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200920 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200921 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200922
923 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
924 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
925 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
926
927 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
928 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200929 info->rates[i].Rate = rix | 0x80;
930 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
931 ah->txchainmask, info->rates[i].Rate);
932 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200933 is_40, is_sgi, is_sp);
934 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200935 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200936 continue;
937 }
938
939 /* legacy rates */
940 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
941 !(rate->flags & IEEE80211_RATE_ERP_G))
942 phy = WLAN_RC_PHY_CCK;
943 else
944 phy = WLAN_RC_PHY_OFDM;
945
946 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200947 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200948 if (rate->hw_value_short) {
949 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200950 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200951 } else {
952 is_sp = false;
953 }
954
955 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200956 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200957 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200958 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
959 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200960
Felix Fietkau493cf042011-09-14 21:24:22 +0200961 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200962 phy, rate->bitrate * 100, len, rix, is_sp);
963 }
964
965 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
966 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200967 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200968
969 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200970 if (info->flags & ATH9K_TXDESC_RTSENA)
971 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200972}
973
Felix Fietkau493cf042011-09-14 21:24:22 +0200974static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
975{
976 struct ieee80211_hdr *hdr;
977 enum ath9k_pkt_type htype;
978 __le16 fc;
979
980 hdr = (struct ieee80211_hdr *)skb->data;
981 fc = hdr->frame_control;
982
983 if (ieee80211_is_beacon(fc))
984 htype = ATH9K_PKT_TYPE_BEACON;
985 else if (ieee80211_is_probe_resp(fc))
986 htype = ATH9K_PKT_TYPE_PROBE_RESP;
987 else if (ieee80211_is_atim(fc))
988 htype = ATH9K_PKT_TYPE_ATIM;
989 else if (ieee80211_is_pspoll(fc))
990 htype = ATH9K_PKT_TYPE_PSPOLL;
991 else
992 htype = ATH9K_PKT_TYPE_NORMAL;
993
994 return htype;
995}
996
997static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
998 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200999{
1000 struct ath_hw *ah = sc->sc_ah;
1001 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1002 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001003 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001004 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001005
Felix Fietkau493cf042011-09-14 21:24:22 +02001006 memset(&info, 0, sizeof(info));
1007 info.is_first = true;
1008 info.is_last = true;
1009 info.txpower = MAX_RATE_POWER;
1010 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001011
Felix Fietkau493cf042011-09-14 21:24:22 +02001012 info.flags = ATH9K_TXDESC_INTREQ;
1013 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1014 info.flags |= ATH9K_TXDESC_NOACK;
1015 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1016 info.flags |= ATH9K_TXDESC_LDPC;
1017
1018 ath_buf_set_rate(sc, bf, &info, len);
1019
1020 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1021 info.flags |= ATH9K_TXDESC_CLRDMASK;
1022
1023 if (bf->bf_state.bfs_paprd)
1024 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1025
Felix Fietkau399c6482011-09-14 21:24:17 +02001026
1027 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001028 struct sk_buff *skb = bf->bf_mpdu;
1029 struct ath_frame_info *fi = get_frame_info(skb);
1030
1031 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001032 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001033 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001034 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001035 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001036
John W. Linville42cecc32011-09-19 15:42:31 -04001037 info.buf_addr[0] = bf->bf_buf_addr;
1038 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001039 info.pkt_len = fi->framelen;
1040 info.keyix = fi->keyix;
1041 info.keytype = fi->keytype;
1042
1043 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001044 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001045 info.aggr = AGGR_BUF_FIRST;
1046 else if (!bf->bf_next)
1047 info.aggr = AGGR_BUF_LAST;
1048 else
1049 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001050
Felix Fietkau493cf042011-09-14 21:24:22 +02001051 info.ndelim = bf->bf_state.ndelim;
1052 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001053 }
1054
Felix Fietkau493cf042011-09-14 21:24:22 +02001055 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001056 bf = bf->bf_next;
1057 }
1058}
1059
Sujithe8324352009-01-16 21:38:42 +05301060static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1061 struct ath_atx_tid *tid)
1062{
Sujithd43f30152009-01-16 21:38:53 +05301063 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301064 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001065 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301066 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001067 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301068
1069 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001070 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301071 return;
1072
1073 INIT_LIST_HEAD(&bf_q);
1074
Felix Fietkau269c44b2010-11-14 15:20:06 +01001075 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301076
1077 /*
Sujithd43f30152009-01-16 21:38:53 +05301078 * no frames picked up to be aggregated;
1079 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301080 */
1081 if (list_empty(&bf_q))
1082 break;
1083
1084 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301085 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001086 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301087
Felix Fietkau55195412011-04-17 23:28:09 +02001088 if (tid->ac->clear_ps_filter) {
1089 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001090 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1091 } else {
1092 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001093 }
1094
Sujithd43f30152009-01-16 21:38:53 +05301095 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001096 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001097 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1098 bf->bf_state.bf_type = BUF_AMPDU;
1099 } else {
1100 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301101 }
1102
Felix Fietkau493cf042011-09-14 21:24:22 +02001103 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001104 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001105 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301106 status != ATH_AGGR_BAW_CLOSED);
1107}
1108
Felix Fietkau231c3a12010-09-20 19:35:28 +02001109int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1110 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301111{
1112 struct ath_atx_tid *txtid;
1113 struct ath_node *an;
1114
1115 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301116 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001117
1118 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1119 return -EAGAIN;
1120
Sujithf83da962009-07-23 15:32:37 +05301121 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001122 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001123 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001124
Felix Fietkau2ed72222011-01-10 17:05:49 -07001125 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1126 txtid->baw_head = txtid->baw_tail = 0;
1127
Felix Fietkau231c3a12010-09-20 19:35:28 +02001128 return 0;
Sujithe8324352009-01-16 21:38:42 +05301129}
1130
Sujithf83da962009-07-23 15:32:37 +05301131void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301132{
1133 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1134 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001135 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301136
1137 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301138 return;
Sujithe8324352009-01-16 21:38:42 +05301139
1140 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301141 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301142 return;
Sujithe8324352009-01-16 21:38:42 +05301143 }
1144
Sujithe8324352009-01-16 21:38:42 +05301145 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001146 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001147
1148 /*
1149 * If frames are still being transmitted for this TID, they will be
1150 * cleaned up during tx completion. To prevent race conditions, this
1151 * TID can only be reused after all in-progress subframes have been
1152 * completed.
1153 */
1154 if (txtid->baw_head != txtid->baw_tail)
1155 txtid->state |= AGGR_CLEANUP;
1156 else
1157 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301158 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301159
Felix Fietkau90fa5392010-09-20 13:45:38 +02001160 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301161}
1162
Johannes Berg042ec452011-09-29 16:04:26 +02001163void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1164 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001165{
1166 struct ath_atx_tid *tid;
1167 struct ath_atx_ac *ac;
1168 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001169 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001170 int tidno;
1171
1172 for (tidno = 0, tid = &an->tid[tidno];
1173 tidno < WME_NUM_TID; tidno++, tid++) {
1174
1175 if (!tid->sched)
1176 continue;
1177
1178 ac = tid->ac;
1179 txq = ac->txq;
1180
1181 spin_lock_bh(&txq->axq_lock);
1182
Johannes Berg042ec452011-09-29 16:04:26 +02001183 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001184
1185 tid->sched = false;
1186 list_del(&tid->list);
1187
1188 if (ac->sched) {
1189 ac->sched = false;
1190 list_del(&ac->list);
1191 }
1192
1193 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001194
Johannes Berg042ec452011-09-29 16:04:26 +02001195 ieee80211_sta_set_buffered(sta, tidno, buffered);
1196 }
Felix Fietkau55195412011-04-17 23:28:09 +02001197}
1198
1199void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1200{
1201 struct ath_atx_tid *tid;
1202 struct ath_atx_ac *ac;
1203 struct ath_txq *txq;
1204 int tidno;
1205
1206 for (tidno = 0, tid = &an->tid[tidno];
1207 tidno < WME_NUM_TID; tidno++, tid++) {
1208
1209 ac = tid->ac;
1210 txq = ac->txq;
1211
1212 spin_lock_bh(&txq->axq_lock);
1213 ac->clear_ps_filter = true;
1214
Felix Fietkau56dc6332011-08-28 00:32:22 +02001215 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001216 ath_tx_queue_tid(txq, tid);
1217 ath_txq_schedule(sc, txq);
1218 }
1219
1220 spin_unlock_bh(&txq->axq_lock);
1221 }
1222}
1223
Sujithe8324352009-01-16 21:38:42 +05301224void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1225{
1226 struct ath_atx_tid *txtid;
1227 struct ath_node *an;
1228
1229 an = (struct ath_node *)sta->drv_priv;
1230
1231 if (sc->sc_flags & SC_OP_TXAGGR) {
1232 txtid = ATH_AN_2_TID(an, tid);
1233 txtid->baw_size =
1234 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1235 txtid->state |= AGGR_ADDBA_COMPLETE;
1236 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1237 ath_tx_resume_tid(sc, txtid);
1238 }
1239}
1240
Sujithe8324352009-01-16 21:38:42 +05301241/********************/
1242/* Queue Management */
1243/********************/
1244
Sujithe8324352009-01-16 21:38:42 +05301245static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1246 struct ath_txq *txq)
1247{
1248 struct ath_atx_ac *ac, *ac_tmp;
1249 struct ath_atx_tid *tid, *tid_tmp;
1250
1251 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1252 list_del(&ac->list);
1253 ac->sched = false;
1254 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1255 list_del(&tid->list);
1256 tid->sched = false;
1257 ath_tid_drain(sc, txq, tid);
1258 }
1259 }
1260}
1261
1262struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1263{
Sujithcbe61d82009-02-09 13:27:12 +05301264 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301265 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001266 static const int subtype_txq_to_hwq[] = {
1267 [WME_AC_BE] = ATH_TXQ_AC_BE,
1268 [WME_AC_BK] = ATH_TXQ_AC_BK,
1269 [WME_AC_VI] = ATH_TXQ_AC_VI,
1270 [WME_AC_VO] = ATH_TXQ_AC_VO,
1271 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001272 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301273
1274 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001275 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301276 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1277 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1278 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1279 qi.tqi_physCompBuf = 0;
1280
1281 /*
1282 * Enable interrupts only for EOL and DESC conditions.
1283 * We mark tx descriptors to receive a DESC interrupt
1284 * when a tx queue gets deep; otherwise waiting for the
1285 * EOL to reap descriptors. Note that this is done to
1286 * reduce interrupt load and this only defers reaping
1287 * descriptors, never transmitting frames. Aside from
1288 * reducing interrupts this also permits more concurrency.
1289 * The only potential downside is if the tx queue backs
1290 * up in which case the top half of the kernel may backup
1291 * due to a lack of tx descriptors.
1292 *
1293 * The UAPSD queue is an exception, since we take a desc-
1294 * based intr on the EOSP frames.
1295 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001296 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1297 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1298 TXQ_FLAG_TXERRINT_ENABLE;
1299 } else {
1300 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1301 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1302 else
1303 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1304 TXQ_FLAG_TXDESCINT_ENABLE;
1305 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001306 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1307 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301308 /*
1309 * NB: don't print a message, this happens
1310 * normally on parts with too few tx queues
1311 */
1312 return NULL;
1313 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001314 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1315 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301316
Ben Greear60f2d1d2011-01-09 23:11:52 -08001317 txq->axq_qnum = axq_qnum;
1318 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301319 txq->axq_link = NULL;
1320 INIT_LIST_HEAD(&txq->axq_q);
1321 INIT_LIST_HEAD(&txq->axq_acq);
1322 spin_lock_init(&txq->axq_lock);
1323 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001324 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001325 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001326 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001327
1328 txq->txq_headidx = txq->txq_tailidx = 0;
1329 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1330 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301331 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001332 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301333}
1334
Sujithe8324352009-01-16 21:38:42 +05301335int ath_txq_update(struct ath_softc *sc, int qnum,
1336 struct ath9k_tx_queue_info *qinfo)
1337{
Sujithcbe61d82009-02-09 13:27:12 +05301338 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301339 int error = 0;
1340 struct ath9k_tx_queue_info qi;
1341
1342 if (qnum == sc->beacon.beaconq) {
1343 /*
1344 * XXX: for beacon queue, we just save the parameter.
1345 * It will be picked up by ath_beaconq_config when
1346 * it's necessary.
1347 */
1348 sc->beacon.beacon_qi = *qinfo;
1349 return 0;
1350 }
1351
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001352 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301353
1354 ath9k_hw_get_txq_props(ah, qnum, &qi);
1355 qi.tqi_aifs = qinfo->tqi_aifs;
1356 qi.tqi_cwmin = qinfo->tqi_cwmin;
1357 qi.tqi_cwmax = qinfo->tqi_cwmax;
1358 qi.tqi_burstTime = qinfo->tqi_burstTime;
1359 qi.tqi_readyTime = qinfo->tqi_readyTime;
1360
1361 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001362 ath_err(ath9k_hw_common(sc->sc_ah),
1363 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301364 error = -EIO;
1365 } else {
1366 ath9k_hw_resettxqueue(ah, qnum);
1367 }
1368
1369 return error;
1370}
1371
1372int ath_cabq_update(struct ath_softc *sc)
1373{
1374 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001375 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301376 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301377
1378 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1379 /*
1380 * Ensure the readytime % is within the bounds.
1381 */
Sujith17d79042009-02-09 13:27:03 +05301382 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1383 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1384 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1385 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301386
Steve Brown9814f6b2011-02-07 17:10:39 -07001387 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301388 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301389 ath_txq_update(sc, qnum, &qi);
1390
1391 return 0;
1392}
1393
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001394static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1395{
1396 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1397 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1398}
1399
Felix Fietkaufce041b2011-05-19 12:20:25 +02001400static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1401 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301402 __releases(txq->axq_lock)
1403 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301404{
1405 struct ath_buf *bf, *lastbf;
1406 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001407 struct ath_tx_status ts;
1408
1409 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001410 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301411 INIT_LIST_HEAD(&bf_head);
1412
Felix Fietkaufce041b2011-05-19 12:20:25 +02001413 while (!list_empty(list)) {
1414 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301415
Felix Fietkaufce041b2011-05-19 12:20:25 +02001416 if (bf->bf_stale) {
1417 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301418
Felix Fietkaufce041b2011-05-19 12:20:25 +02001419 ath_tx_return_buffer(sc, bf);
1420 continue;
Sujithe8324352009-01-16 21:38:42 +05301421 }
1422
1423 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001424 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001425
Sujithe8324352009-01-16 21:38:42 +05301426 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001427 if (bf_is_ampdu_not_probing(bf))
1428 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301429
Felix Fietkaufce041b2011-05-19 12:20:25 +02001430 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301431 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001432 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1433 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301434 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001435 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001436 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001437 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001438}
1439
1440/*
1441 * Drain a given TX queue (could be Beacon or Data)
1442 *
1443 * This assumes output has been stopped and
1444 * we do not need to block ath_tx_tasklet.
1445 */
1446void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1447{
1448 spin_lock_bh(&txq->axq_lock);
1449 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1450 int idx = txq->txq_tailidx;
1451
1452 while (!list_empty(&txq->txq_fifo[idx])) {
1453 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1454 retry_tx);
1455
1456 INCR(idx, ATH_TXFIFO_DEPTH);
1457 }
1458 txq->txq_tailidx = idx;
1459 }
1460
1461 txq->axq_link = NULL;
1462 txq->axq_tx_inprogress = false;
1463 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001464
1465 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001466 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1467 ath_txq_drain_pending_buffers(sc, txq);
1468
1469 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301470}
1471
Felix Fietkau080e1a22010-12-05 20:17:53 +01001472bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301473{
Sujithcbe61d82009-02-09 13:27:12 +05301474 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001475 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301476 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001477 int i;
1478 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301479
1480 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001481 return true;
Sujith043a0402009-01-16 21:38:47 +05301482
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001483 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301484
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001485 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301486 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001487 if (!ATH_TXQ_SETUP(sc, i))
1488 continue;
1489
Felix Fietkau34d25812011-10-07 02:28:12 +02001490 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1491 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301492 }
1493
Felix Fietkau080e1a22010-12-05 20:17:53 +01001494 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001495 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301496
1497 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001498 if (!ATH_TXQ_SETUP(sc, i))
1499 continue;
1500
1501 /*
1502 * The caller will resume queues with ieee80211_wake_queues.
1503 * Mark the queue as not stopped to prevent ath_tx_complete
1504 * from waking the queue too early.
1505 */
1506 txq = &sc->tx.txq[i];
1507 txq->stopped = false;
1508 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301509 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001510
1511 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301512}
1513
Sujithe8324352009-01-16 21:38:42 +05301514void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1515{
1516 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1517 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1518}
1519
Ben Greear7755bad2011-01-18 17:30:00 -08001520/* For each axq_acq entry, for each tid, try to schedule packets
1521 * for transmit until ampdu_depth has reached min Q depth.
1522 */
Sujithe8324352009-01-16 21:38:42 +05301523void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1524{
Ben Greear7755bad2011-01-18 17:30:00 -08001525 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1526 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301527
Felix Fietkau236de512011-09-03 01:40:25 +02001528 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001529 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301530 return;
1531
1532 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001533 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301534
Ben Greear7755bad2011-01-18 17:30:00 -08001535 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1536 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1537 list_del(&ac->list);
1538 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301539
Ben Greear7755bad2011-01-18 17:30:00 -08001540 while (!list_empty(&ac->tid_q)) {
1541 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1542 list);
1543 list_del(&tid->list);
1544 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301545
Ben Greear7755bad2011-01-18 17:30:00 -08001546 if (tid->paused)
1547 continue;
Sujithe8324352009-01-16 21:38:42 +05301548
Ben Greear7755bad2011-01-18 17:30:00 -08001549 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301550
Ben Greear7755bad2011-01-18 17:30:00 -08001551 /*
1552 * add tid to round-robin queue if more frames
1553 * are pending for the tid
1554 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001555 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001556 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301557
Ben Greear7755bad2011-01-18 17:30:00 -08001558 if (tid == last_tid ||
1559 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1560 break;
Sujithe8324352009-01-16 21:38:42 +05301561 }
Ben Greear7755bad2011-01-18 17:30:00 -08001562
1563 if (!list_empty(&ac->tid_q)) {
1564 if (!ac->sched) {
1565 ac->sched = true;
1566 list_add_tail(&ac->list, &txq->axq_acq);
1567 }
1568 }
1569
1570 if (ac == last_ac ||
1571 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1572 return;
Sujithe8324352009-01-16 21:38:42 +05301573 }
1574}
1575
Sujithe8324352009-01-16 21:38:42 +05301576/***********/
1577/* TX, DMA */
1578/***********/
1579
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001581 * Insert a chain of ath_buf (descriptors) on a txq and
1582 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583 */
Sujith102e0572008-10-29 10:15:16 +05301584static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001585 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001586{
Sujithcbe61d82009-02-09 13:27:12 +05301587 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001588 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001589 struct ath_buf *bf, *bf_last;
1590 bool puttxbuf = false;
1591 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301592
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001593 /*
1594 * Insert the frame on the outbound list and
1595 * pass it on to the hardware.
1596 */
1597
1598 if (list_empty(head))
1599 return;
1600
Felix Fietkaufce041b2011-05-19 12:20:25 +02001601 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001603 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001604
Joe Perches226afe62010-12-02 19:12:37 -08001605 ath_dbg(common, ATH_DBG_QUEUE,
1606 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607
Felix Fietkaufce041b2011-05-19 12:20:25 +02001608 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1609 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001610 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001611 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001612 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001613 list_splice_tail_init(head, &txq->axq_q);
1614
Felix Fietkaufce041b2011-05-19 12:20:25 +02001615 if (txq->axq_link) {
1616 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001617 ath_dbg(common, ATH_DBG_XMIT,
1618 "link[%u] (%p)=%llx (%p)\n",
1619 txq->axq_qnum, txq->axq_link,
1620 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001621 } else if (!edma)
1622 puttxbuf = true;
1623
1624 txq->axq_link = bf_last->bf_desc;
1625 }
1626
1627 if (puttxbuf) {
1628 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1629 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1630 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1631 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1632 }
1633
1634 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001635 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001636 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001637 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001638
1639 if (!internal) {
1640 txq->axq_depth++;
1641 if (bf_is_ampdu_not_probing(bf))
1642 txq->axq_ampdu_depth++;
1643 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001644}
1645
Sujithe8324352009-01-16 21:38:42 +05301646static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001647 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301648{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001649 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001650 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001651 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301652
1653 /*
1654 * Do not queue to h/w when any of the following conditions is true:
1655 * - there are pending frames in software queue
1656 * - the TID is currently paused for ADDBA/BAR request
1657 * - seqno is not within block-ack window
1658 * - h/w queue depth exceeds low water mark
1659 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001660 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001661 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001662 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001663 /*
Sujithe8324352009-01-16 21:38:42 +05301664 * Add this frame to software queue for scheduling later
1665 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001666 */
Ben Greearbda8add2011-01-09 23:11:48 -08001667 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001668 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001669 if (!txctl->an || !txctl->an->sleeping)
1670 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301671 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001672 }
1673
Felix Fietkau44f1d262011-08-28 00:32:25 +02001674 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1675 if (!bf)
1676 return;
1677
Felix Fietkau399c6482011-09-14 21:24:17 +02001678 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001679 INIT_LIST_HEAD(&bf_head);
1680 list_add(&bf->list, &bf_head);
1681
Sujithe8324352009-01-16 21:38:42 +05301682 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001683 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301684
1685 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001686 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301687 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001688 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001689 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301690}
1691
Felix Fietkau82b873a2010-11-11 03:18:37 +01001692static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001693 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001694{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001695 struct ath_frame_info *fi = get_frame_info(skb);
1696 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301697 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001698
Felix Fietkau44f1d262011-08-28 00:32:25 +02001699 bf = fi->bf;
1700 if (!bf)
1701 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1702
1703 if (!bf)
1704 return;
1705
1706 INIT_LIST_HEAD(&bf_head);
1707 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001708 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301709
1710 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001711 if (tid)
1712 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301713
Sujithd43f30152009-01-16 21:38:53 +05301714 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001715 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001716 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301717 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001718}
1719
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001720static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1721 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301722{
1723 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001724 struct ieee80211_sta *sta = tx_info->control.sta;
1725 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001726 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001727 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001728 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001729 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301730
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001731 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301732
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001733 if (sta)
1734 an = (struct ath_node *) sta->drv_priv;
1735
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001736 memset(fi, 0, sizeof(*fi));
1737 if (hw_key)
1738 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001739 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1740 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001741 else
1742 fi->keyix = ATH9K_TXKEYIX_INVALID;
1743 fi->keytype = keytype;
1744 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301745}
1746
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301747u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1748{
1749 struct ath_hw *ah = sc->sc_ah;
1750 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301751 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1752 (curchan->channelFlags & CHANNEL_5GHZ) &&
1753 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301754 return 0x3;
1755 else
1756 return chainmask;
1757}
1758
Felix Fietkau44f1d262011-08-28 00:32:25 +02001759/*
1760 * Assign a descriptor (and sequence number if necessary,
1761 * and map buffer for DMA. Frees skb on error
1762 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001763static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001764 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001765 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001766 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301767{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001768 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001769 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001770 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001771 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001772 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001773
1774 bf = ath_tx_get_buffer(sc);
1775 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001776 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001777 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001778 }
Sujithe8324352009-01-16 21:38:42 +05301779
Sujithe8324352009-01-16 21:38:42 +05301780 ATH_TXBUF_RESET(bf);
1781
Felix Fietkaufa05f872011-08-28 00:32:24 +02001782 if (tid) {
1783 seqno = tid->seq_next;
1784 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1785 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1786 bf->bf_state.seqno = seqno;
1787 }
1788
Sujithe8324352009-01-16 21:38:42 +05301789 bf->bf_mpdu = skb;
1790
Ben Greearc1739eb32010-10-14 12:45:29 -07001791 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1792 skb->len, DMA_TO_DEVICE);
1793 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301794 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001795 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001796 ath_err(ath9k_hw_common(sc->sc_ah),
1797 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001798 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001799 goto error;
Sujithe8324352009-01-16 21:38:42 +05301800 }
1801
Felix Fietkau56dc6332011-08-28 00:32:22 +02001802 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001803
1804 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001805
1806error:
1807 dev_kfree_skb_any(skb);
1808 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001809}
1810
1811/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001812static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001813 struct ath_tx_control *txctl)
1814{
Felix Fietkau04caf862010-11-14 15:20:12 +01001815 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1816 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001817 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001818 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001819 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301820
Sujithe8324352009-01-16 21:38:42 +05301821 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301822 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1823 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001824 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1825 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001826 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001827
Felix Fietkau066dae92010-11-07 14:59:39 +01001828 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001829 }
1830
1831 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001832 /*
1833 * Try aggregation if it's a unicast data frame
1834 * and the destination is HT capable.
1835 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001836 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301837 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001838 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1839 if (!bf)
1840 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001841
Felix Fietkau82b873a2010-11-11 03:18:37 +01001842 bf->bf_state.bfs_paprd = txctl->paprd;
1843
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301844 if (txctl->paprd)
1845 bf->bf_state.bfs_paprd_timestamp = jiffies;
1846
Felix Fietkau44f1d262011-08-28 00:32:25 +02001847 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301848 }
1849
Felix Fietkaufa05f872011-08-28 00:32:24 +02001850out:
Sujithe8324352009-01-16 21:38:42 +05301851 spin_unlock_bh(&txctl->txq->axq_lock);
1852}
1853
1854/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001855int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301856 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001857{
Felix Fietkau28d16702010-11-14 15:20:10 +01001858 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1859 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001860 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001861 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001862 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001863 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001864 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001865 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001866 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001867
Ben Greeara9927ba2010-12-06 21:13:49 -08001868 /* NOTE: sta can be NULL according to net/mac80211.h */
1869 if (sta)
1870 txctl->an = (struct ath_node *)sta->drv_priv;
1871
Felix Fietkau04caf862010-11-14 15:20:12 +01001872 if (info->control.hw_key)
1873 frmlen += info->control.hw_key->icv_len;
1874
Felix Fietkau28d16702010-11-14 15:20:10 +01001875 /*
1876 * As a temporary workaround, assign seq# here; this will likely need
1877 * to be cleaned up to work better with Beacon transmission and virtual
1878 * BSSes.
1879 */
1880 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1881 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1882 sc->tx.seq_no += 0x10;
1883 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1884 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1885 }
1886
John W. Linville42cecc32011-09-19 15:42:31 -04001887 /* Add the padding after the header if this is not already done */
1888 padpos = ath9k_cmn_padpos(hdr->frame_control);
1889 padsize = padpos & 3;
1890 if (padsize && skb->len > padpos) {
1891 if (skb_headroom(skb) < padsize)
1892 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001893
John W. Linville42cecc32011-09-19 15:42:31 -04001894 skb_push(skb, padsize);
1895 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001896 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001897 }
1898
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001899 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1900 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1901 !ieee80211_is_data(hdr->frame_control))
1902 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1903
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001904 setup_frame_info(hw, skb, frmlen);
1905
1906 /*
1907 * At this point, the vif, hw_key and sta pointers in the tx control
1908 * info are no longer valid (overwritten by the ath_frame_info data.
1909 */
1910
Felix Fietkau066dae92010-11-07 14:59:39 +01001911 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001912 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001913 if (txq == sc->tx.txq_map[q] &&
1914 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001915 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001916 txq->stopped = 1;
1917 }
1918 spin_unlock_bh(&txq->axq_lock);
1919
Felix Fietkau44f1d262011-08-28 00:32:25 +02001920 ath_tx_start_dma(sc, skb, txctl);
1921 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001922}
1923
Sujithe8324352009-01-16 21:38:42 +05301924/*****************/
1925/* TX Completion */
1926/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001927
Sujithe8324352009-01-16 21:38:42 +05301928static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301929 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001930{
Sujithe8324352009-01-16 21:38:42 +05301931 struct ieee80211_hw *hw = sc->hw;
1932 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001933 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001934 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001935 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301936
Joe Perches226afe62010-12-02 19:12:37 -08001937 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301938
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301939 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301940 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301941
Felix Fietkau55797b12011-09-14 21:24:16 +02001942 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301943 /* Frame was ACKed */
1944 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301945
John W. Linville42cecc32011-09-19 15:42:31 -04001946 padpos = ath9k_cmn_padpos(hdr->frame_control);
1947 padsize = padpos & 3;
1948 if (padsize && skb->len>padpos+padsize) {
1949 /*
1950 * Remove MAC header padding before giving the frame back to
1951 * mac80211.
1952 */
1953 memmove(skb->data + padsize, skb->data, padpos);
1954 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301955 }
1956
Felix Fietkauc8e88682011-11-16 13:08:40 +01001957 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05301958 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001959 ath_dbg(common, ATH_DBG_PS,
1960 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301961 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1962 PS_WAIT_FOR_CAB |
1963 PS_WAIT_FOR_PSPOLL_DATA |
1964 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001965 }
1966
Felix Fietkau7545daf2011-01-24 19:23:16 +01001967 q = skb_get_queue_mapping(skb);
1968 if (txq == sc->tx.txq_map[q]) {
1969 spin_lock_bh(&txq->axq_lock);
1970 if (WARN_ON(--txq->pending_frames < 0))
1971 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001972
Felix Fietkau7545daf2011-01-24 19:23:16 +01001973 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1974 ieee80211_wake_queue(sc->hw, q);
1975 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001976 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001977 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001978 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001979
1980 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301981}
1982
1983static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001984 struct ath_txq *txq, struct list_head *bf_q,
1985 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301986{
1987 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001988 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301989 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301990 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301991
Sujithe8324352009-01-16 21:38:42 +05301992 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301993 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301994
Felix Fietkau55797b12011-09-14 21:24:16 +02001995 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301996 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301997
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001998 if (ts->ts_status & ATH9K_TXERR_FILT)
1999 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2000
Ben Greearc1739eb32010-10-14 12:45:29 -07002001 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002002 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002003
2004 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302005 if (time_after(jiffies,
2006 bf->bf_state.bfs_paprd_timestamp +
2007 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002008 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002009 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002010 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002011 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002012 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302013 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002014 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002015 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2016 * accidentally reference it later.
2017 */
2018 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302019
2020 /*
2021 * Return the list of ath_buf of this mpdu to free queue
2022 */
2023 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2024 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2025 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2026}
2027
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002028static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2029 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002030 int txok)
Sujithc4288392008-11-18 09:09:30 +05302031{
Sujitha22be222009-03-30 15:28:36 +05302032 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302033 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302034 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002035 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002036 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302037 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302038
Sujith95e4acb2009-03-13 08:56:09 +05302039 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002040 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302041
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002042 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302043 WARN_ON(tx_rateindex >= hw->max_rates);
2044
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002045 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002046 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302047
Felix Fietkaub572d032010-11-14 15:20:07 +01002048 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002049 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302050 tx_info->status.ampdu_len = nframes;
2051 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002052
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002053 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002054 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002055 /*
2056 * If an underrun error is seen assume it as an excessive
2057 * retry only if max frame trigger level has been reached
2058 * (2 KB for single stream, and 4 KB for dual stream).
2059 * Adjust the long retry as if the frame was tried
2060 * hw->max_rate_tries times to affect how rate control updates
2061 * PER for the failed rate.
2062 * In case of congestion on the bus penalizing this type of
2063 * underruns should help hardware actually transmit new frames
2064 * successfully by eventually preferring slower rates.
2065 * This itself should also alleviate congestion on the bus.
2066 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002067 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2068 ATH9K_TX_DELIM_UNDERRUN)) &&
2069 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002070 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002071 tx_info->status.rates[tx_rateindex].count =
2072 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302073 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302074
Felix Fietkau545750d2009-11-23 22:21:01 +01002075 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302076 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002077 tx_info->status.rates[i].idx = -1;
2078 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302079
Felix Fietkau78c46532010-06-25 01:26:16 +02002080 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302081}
2082
Felix Fietkaufce041b2011-05-19 12:20:25 +02002083static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2084 struct ath_tx_status *ts, struct ath_buf *bf,
2085 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302086 __releases(txq->axq_lock)
2087 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002088{
2089 int txok;
2090
2091 txq->axq_depth--;
2092 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2093 txq->axq_tx_inprogress = false;
2094 if (bf_is_ampdu_not_probing(bf))
2095 txq->axq_ampdu_depth--;
2096
2097 spin_unlock_bh(&txq->axq_lock);
2098
2099 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002100 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002101 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2102 } else
2103 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2104
2105 spin_lock_bh(&txq->axq_lock);
2106
2107 if (sc->sc_flags & SC_OP_TXAGGR)
2108 ath_txq_schedule(sc, txq);
2109}
2110
Sujithc4288392008-11-18 09:09:30 +05302111static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112{
Sujithcbe61d82009-02-09 13:27:12 +05302113 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002114 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002115 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2116 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302117 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002118 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002119 int status;
2120
Joe Perches226afe62010-12-02 19:12:37 -08002121 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2122 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2123 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124
Felix Fietkaufce041b2011-05-19 12:20:25 +02002125 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002126 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002127 if (work_pending(&sc->hw_reset_work))
2128 break;
2129
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002130 if (list_empty(&txq->axq_q)) {
2131 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002132 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002133 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002134 break;
2135 }
2136 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2137
2138 /*
2139 * There is a race condition that a BH gets scheduled
2140 * after sw writes TxE and before hw re-load the last
2141 * descriptor to get the newly chained one.
2142 * Software must keep the last DONE descriptor as a
2143 * holding descriptor - software does so by marking
2144 * it with the STALE flag.
2145 */
2146 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302147 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002149 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002150 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002151
2152 bf = list_entry(bf_held->list.next, struct ath_buf,
2153 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002154 }
2155
2156 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302157 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158
Felix Fietkau29bffa92010-03-29 20:14:23 -07002159 memset(&ts, 0, sizeof(ts));
2160 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002161 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002163
Ben Greear2dac4fb2011-01-09 23:11:45 -08002164 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002165
2166 /*
2167 * Remove ath_buf's of the same transmit unit from txq,
2168 * however leave the last descriptor back as the holding
2169 * descriptor for hw.
2170 */
Sujitha119cc42009-03-30 15:28:38 +05302171 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002172 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173 if (!list_is_singular(&lastbf->list))
2174 list_cut_position(&bf_head,
2175 &txq->axq_q, lastbf->list.prev);
2176
Felix Fietkaufce041b2011-05-19 12:20:25 +02002177 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002178 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002179 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180 }
Johannes Berge6a98542008-10-21 12:40:02 +02002181
Felix Fietkaufce041b2011-05-19 12:20:25 +02002182 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002184 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002185}
2186
Sujith305fe472009-07-23 15:32:29 +05302187static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002188{
2189 struct ath_softc *sc = container_of(work, struct ath_softc,
2190 tx_complete_work.work);
2191 struct ath_txq *txq;
2192 int i;
2193 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002194#ifdef CONFIG_ATH9K_DEBUGFS
2195 sc->tx_complete_poll_work_seen++;
2196#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002197
2198 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2199 if (ATH_TXQ_SETUP(sc, i)) {
2200 txq = &sc->tx.txq[i];
2201 spin_lock_bh(&txq->axq_lock);
2202 if (txq->axq_depth) {
2203 if (txq->axq_tx_inprogress) {
2204 needreset = true;
2205 spin_unlock_bh(&txq->axq_lock);
2206 break;
2207 } else {
2208 txq->axq_tx_inprogress = true;
2209 }
2210 }
2211 spin_unlock_bh(&txq->axq_lock);
2212 }
2213
2214 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002215 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2216 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002217 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002218 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002219 }
2220
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002221 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002222 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2223}
2224
2225
Sujithe8324352009-01-16 21:38:42 +05302226
2227void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002228{
Sujithe8324352009-01-16 21:38:42 +05302229 int i;
2230 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002231
Sujithe8324352009-01-16 21:38:42 +05302232 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002233
2234 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302235 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2236 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002237 }
2238}
2239
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002240void ath_tx_edma_tasklet(struct ath_softc *sc)
2241{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002242 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002243 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2244 struct ath_hw *ah = sc->sc_ah;
2245 struct ath_txq *txq;
2246 struct ath_buf *bf, *lastbf;
2247 struct list_head bf_head;
2248 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249
2250 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002251 if (work_pending(&sc->hw_reset_work))
2252 break;
2253
Felix Fietkaufce041b2011-05-19 12:20:25 +02002254 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002255 if (status == -EINPROGRESS)
2256 break;
2257 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002258 ath_dbg(common, ATH_DBG_XMIT,
2259 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002260 break;
2261 }
2262
2263 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002264 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002265 continue;
2266
Felix Fietkaufce041b2011-05-19 12:20:25 +02002267 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002268
2269 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002270
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002271 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2272 spin_unlock_bh(&txq->axq_lock);
2273 return;
2274 }
2275
2276 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2277 struct ath_buf, list);
2278 lastbf = bf->bf_lastbf;
2279
2280 INIT_LIST_HEAD(&bf_head);
2281 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2282 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002283
Felix Fietkaufce041b2011-05-19 12:20:25 +02002284 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2285 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002286
Felix Fietkaufce041b2011-05-19 12:20:25 +02002287 if (!list_empty(&txq->axq_q)) {
2288 struct list_head bf_q;
2289
2290 INIT_LIST_HEAD(&bf_q);
2291 txq->axq_link = NULL;
2292 list_splice_tail_init(&txq->axq_q, &bf_q);
2293 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2294 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002295 }
2296
Felix Fietkaufce041b2011-05-19 12:20:25 +02002297 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002298 spin_unlock_bh(&txq->axq_lock);
2299 }
2300}
2301
Sujithe8324352009-01-16 21:38:42 +05302302/*****************/
2303/* Init, Cleanup */
2304/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002305
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002306static int ath_txstatus_setup(struct ath_softc *sc, int size)
2307{
2308 struct ath_descdma *dd = &sc->txsdma;
2309 u8 txs_len = sc->sc_ah->caps.txs_len;
2310
2311 dd->dd_desc_len = size * txs_len;
2312 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2313 &dd->dd_desc_paddr, GFP_KERNEL);
2314 if (!dd->dd_desc)
2315 return -ENOMEM;
2316
2317 return 0;
2318}
2319
2320static int ath_tx_edma_init(struct ath_softc *sc)
2321{
2322 int err;
2323
2324 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2325 if (!err)
2326 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2327 sc->txsdma.dd_desc_paddr,
2328 ATH_TXSTATUS_RING_SIZE);
2329
2330 return err;
2331}
2332
2333static void ath_tx_edma_cleanup(struct ath_softc *sc)
2334{
2335 struct ath_descdma *dd = &sc->txsdma;
2336
2337 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2338 dd->dd_desc_paddr);
2339}
2340
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002341int ath_tx_init(struct ath_softc *sc, int nbufs)
2342{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002343 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344 int error = 0;
2345
Sujith797fe5cb2009-03-30 15:28:45 +05302346 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347
Sujith797fe5cb2009-03-30 15:28:45 +05302348 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002349 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302350 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002351 ath_err(common,
2352 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302353 goto err;
2354 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355
Sujith797fe5cb2009-03-30 15:28:45 +05302356 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002357 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302358 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002359 ath_err(common,
2360 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302361 goto err;
2362 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002364 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2365
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002366 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2367 error = ath_tx_edma_init(sc);
2368 if (error)
2369 goto err;
2370 }
2371
Sujith797fe5cb2009-03-30 15:28:45 +05302372err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373 if (error != 0)
2374 ath_tx_cleanup(sc);
2375
2376 return error;
2377}
2378
Sujith797fe5cb2009-03-30 15:28:45 +05302379void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380{
Sujithb77f4832008-12-07 21:44:03 +05302381 if (sc->beacon.bdma.dd_desc_len != 0)
2382 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383
Sujithb77f4832008-12-07 21:44:03 +05302384 if (sc->tx.txdma.dd_desc_len != 0)
2385 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002386
2387 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2388 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389}
2390
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2392{
Sujithc5170162008-10-29 10:13:59 +05302393 struct ath_atx_tid *tid;
2394 struct ath_atx_ac *ac;
2395 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002396
Sujith8ee5afb2008-12-07 21:43:36 +05302397 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302398 tidno < WME_NUM_TID;
2399 tidno++, tid++) {
2400 tid->an = an;
2401 tid->tidno = tidno;
2402 tid->seq_start = tid->seq_next = 0;
2403 tid->baw_size = WME_MAX_BA;
2404 tid->baw_head = tid->baw_tail = 0;
2405 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302406 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302407 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002408 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302409 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302410 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302411 tid->state &= ~AGGR_ADDBA_COMPLETE;
2412 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302413 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002414
Sujith8ee5afb2008-12-07 21:43:36 +05302415 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302416 acno < WME_NUM_AC; acno++, ac++) {
2417 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002418 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302419 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420 }
2421}
2422
Sujithb5aa9bf2008-10-29 10:13:31 +05302423void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424{
Felix Fietkau2b409942010-07-07 19:42:08 +02002425 struct ath_atx_ac *ac;
2426 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002427 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002428 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302429
Felix Fietkau2b409942010-07-07 19:42:08 +02002430 for (tidno = 0, tid = &an->tid[tidno];
2431 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002432
Felix Fietkau2b409942010-07-07 19:42:08 +02002433 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002434 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435
Felix Fietkau2b409942010-07-07 19:42:08 +02002436 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002437
Felix Fietkau2b409942010-07-07 19:42:08 +02002438 if (tid->sched) {
2439 list_del(&tid->list);
2440 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002442
2443 if (ac->sched) {
2444 list_del(&ac->list);
2445 tid->ac->sched = false;
2446 }
2447
2448 ath_tid_drain(sc, txq, tid);
2449 tid->state &= ~AGGR_ADDBA_COMPLETE;
2450 tid->state &= ~AGGR_CLEANUP;
2451
2452 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002453 }
2454}