blob: a46b4e2f045c44e66a90459e1fb841d9570d8741 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500182 if (tid->baw_head == tid->baw_tail) {
183 tid->state &= ~AGGR_ADDBA_COMPLETE;
184 tid->state &= ~AGGR_CLEANUP;
185 }
186
Sujithe8324352009-01-16 21:38:42 +0530187 spin_unlock_bh(&txq->axq_lock);
188}
189
190static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
191 int seqno)
192{
193 int index, cindex;
194
195 index = ATH_BA_INDEX(tid->seq_start, seqno);
196 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
197
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200198 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530199
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200200 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530201 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
202 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
203 }
204}
205
206static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100207 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530208{
209 int index, cindex;
210
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100211 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530212 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200213 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530214
215 if (index >= ((tid->baw_tail - tid->baw_head) &
216 (ATH_TID_MAX_BUFS - 1))) {
217 tid->baw_tail = cindex;
218 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
219 }
220}
221
222/*
223 * TODO: For frame(s) that are in the retry state, we will reuse the
224 * sequence number(s) without setting the retry bit. The
225 * alternative is to give up on these and BAR the receiver's window
226 * forward.
227 */
228static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
229 struct ath_atx_tid *tid)
230
231{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200232 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530233 struct ath_buf *bf;
234 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700235 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700237
238 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530239 INIT_LIST_HEAD(&bf_head);
240
Felix Fietkau56dc6332011-08-28 00:32:22 +0200241 while ((skb = __skb_dequeue(&tid->buf_q))) {
242 fi = get_frame_info(skb);
243 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530244
Felix Fietkau44f1d262011-08-28 00:32:25 +0200245 if (!bf) {
246 spin_unlock(&txq->axq_lock);
247 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
248 spin_lock(&txq->axq_lock);
249 continue;
250 }
251
Felix Fietkau56dc6332011-08-28 00:32:22 +0200252 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530253
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100254 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200255 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530256
257 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700258 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530259 spin_lock(&txq->axq_lock);
260 }
261
262 tid->seq_next = tid->seq_start;
263 tid->baw_tail = tid->baw_head;
264}
265
Sujithfec247c2009-07-27 12:08:16 +0530266static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100267 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530268{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200270 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530271 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100272 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530273
Sujithfec247c2009-07-27 12:08:16 +0530274 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100275 fi->retries += count;
276
277 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100278 return;
Sujithe8324352009-01-16 21:38:42 +0530279
Sujithe8324352009-01-16 21:38:42 +0530280 hdr = (struct ieee80211_hdr *)skb->data;
281 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200282 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
283 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530284}
285
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200286static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
287{
288 struct ath_buf *bf = NULL;
289
290 spin_lock_bh(&sc->tx.txbuflock);
291
292 if (unlikely(list_empty(&sc->tx.txbuf))) {
293 spin_unlock_bh(&sc->tx.txbuflock);
294 return NULL;
295 }
296
297 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
298 list_del(&bf->list);
299
300 spin_unlock_bh(&sc->tx.txbuflock);
301
302 return bf;
303}
304
305static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
306{
307 spin_lock_bh(&sc->tx.txbuflock);
308 list_add_tail(&bf->list, &sc->tx.txbuf);
309 spin_unlock_bh(&sc->tx.txbuflock);
310}
311
Sujithd43f30152009-01-16 21:38:53 +0530312static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
313{
314 struct ath_buf *tbf;
315
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200316 tbf = ath_tx_get_buffer(sc);
317 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530318 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530319
320 ATH_TXBUF_RESET(tbf);
321
322 tbf->bf_mpdu = bf->bf_mpdu;
323 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400324 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530325 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530326
327 return tbf;
328}
329
Felix Fietkaub572d032010-11-14 15:20:07 +0100330static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
331 struct ath_tx_status *ts, int txok,
332 int *nframes, int *nbad)
333{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100334 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100335 u16 seq_st = 0;
336 u32 ba[WME_BA_BMP_SIZE >> 5];
337 int ba_index;
338 int isaggr = 0;
339
340 *nbad = 0;
341 *nframes = 0;
342
Felix Fietkaub572d032010-11-14 15:20:07 +0100343 isaggr = bf_isaggr(bf);
344 if (isaggr) {
345 seq_st = ts->ts_seqnum;
346 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
347 }
348
349 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100350 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200351 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100352
353 (*nframes)++;
354 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
355 (*nbad)++;
356
357 bf = bf->bf_next;
358 }
359}
360
361
Sujithd43f30152009-01-16 21:38:53 +0530362static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
363 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100364 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530365{
366 struct ath_node *an = NULL;
367 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530368 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100369 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530370 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800371 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530372 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530373 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200374 struct list_head bf_head;
375 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530376 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530377 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530378 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
379 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200380 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100381 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200382 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100383 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200384 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100385 int i, retries;
Sujithe8324352009-01-16 21:38:42 +0530386
Sujitha22be222009-03-30 15:28:36 +0530387 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530388 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530389
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800390 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800391
Felix Fietkau78c46532010-06-25 01:26:16 +0200392 memcpy(rates, tx_info->control.rates, sizeof(rates));
393
Felix Fietkauda647622011-12-14 22:08:03 +0100394 retries = ts->ts_longretry + 1;
395 for (i = 0; i < ts->ts_rateindex; i++)
396 retries += rates[i].count;
397
Sujith1286ec62009-01-27 13:30:37 +0530398 rcu_read_lock();
399
Ben Greear686b9cb2010-09-23 09:44:36 -0700400 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530401 if (!sta) {
402 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200403
Felix Fietkau31e79a52010-07-12 23:16:34 +0200404 INIT_LIST_HEAD(&bf_head);
405 while (bf) {
406 bf_next = bf->bf_next;
407
Felix Fietkaufce041b2011-05-19 12:20:25 +0200408 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200409 list_move_tail(&bf->list, &bf_head);
410
Felix Fietkau31e79a52010-07-12 23:16:34 +0200411 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
412 0, 0);
413
414 bf = bf_next;
415 }
Sujith1286ec62009-01-27 13:30:37 +0530416 return;
Sujithe8324352009-01-16 21:38:42 +0530417 }
418
Sujith1286ec62009-01-27 13:30:37 +0530419 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100420 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
421 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530422
Felix Fietkaub11b1602010-07-11 12:48:44 +0200423 /*
424 * The hardware occasionally sends a tx status for the wrong TID.
425 * In this case, the BA status cannot be considered valid and all
426 * subframes need to be retransmitted
427 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100428 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200429 txok = false;
430
Sujithe8324352009-01-16 21:38:42 +0530431 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530432 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530433
Sujithd43f30152009-01-16 21:38:53 +0530434 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700435 if (ts->ts_flags & ATH9K_TX_BA) {
436 seq_st = ts->ts_seqnum;
437 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530438 } else {
Sujithd43f30152009-01-16 21:38:53 +0530439 /*
440 * AR5416 can become deaf/mute when BA
441 * issue happens. Chip needs to be reset.
442 * But AP code may have sychronization issues
443 * when perform internal reset in this routine.
444 * Only enable reset in STA mode for now.
445 */
Sujith2660b812009-02-09 13:27:26 +0530446 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530447 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530448 }
449 }
450
Felix Fietkau56dc6332011-08-28 00:32:22 +0200451 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530452
Felix Fietkaub572d032010-11-14 15:20:07 +0100453 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530454 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200455 u16 seqno = bf->bf_state.seqno;
456
Felix Fietkauf0b82202011-01-15 14:30:15 +0100457 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530458 bf_next = bf->bf_next;
459
Felix Fietkau78c46532010-06-25 01:26:16 +0200460 skb = bf->bf_mpdu;
461 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100462 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200463
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200464 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530465 /* transmit completion, subframe is
466 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530467 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530468 } else if (!isaggr && txok) {
469 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530470 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530471 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200472 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530473 /*
474 * cleanup in progress, just fail
475 * the un-acked sub-frames
476 */
477 txfail = 1;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200478 } else if (flush) {
479 txpending = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200480 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
Felix Fietkau26a64252011-10-07 02:28:14 +0200481 if (txok || !an->sleeping)
Felix Fietkauda647622011-12-14 22:08:03 +0100482 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
483 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200484
Felix Fietkau55195412011-04-17 23:28:09 +0200485 txpending = 1;
486 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200487 txfail = 1;
488 sendbar = 1;
489 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530490 }
491 }
492
Felix Fietkaufce041b2011-05-19 12:20:25 +0200493 /*
494 * Make sure the last desc is reclaimed if it
495 * not a holding desc.
496 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200497 INIT_LIST_HEAD(&bf_head);
498 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
499 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530500 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530501
Felix Fietkau90fa5392010-09-20 13:45:38 +0200502 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530503 /*
504 * complete the acked-ones/xretried ones; update
505 * block-ack window
506 */
507 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200508 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530509 spin_unlock_bh(&txq->axq_lock);
510
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530511 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200512 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200513 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530514 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530515 }
516
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700517 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
518 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530519 } else {
Sujithd43f30152009-01-16 21:38:53 +0530520 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400521 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
522 if (bf->bf_next == NULL && bf_last->bf_stale) {
523 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530524
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400525 tbf = ath_clone_txbuf(sc, bf_last);
526 /*
527 * Update tx baw and complete the
528 * frame with failed status if we
529 * run out of tx buf.
530 */
531 if (!tbf) {
532 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200533 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400534 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400535
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400536 ath_tx_complete_buf(sc, bf, txq,
537 &bf_head,
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200538 ts, 0,
539 !flush);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400540 break;
541 }
542
Felix Fietkau56dc6332011-08-28 00:32:22 +0200543 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400544 }
Sujithe8324352009-01-16 21:38:42 +0530545 }
546
547 /*
548 * Put this buffer to the temporary pending
549 * queue to retain ordering
550 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200551 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530552 }
553
554 bf = bf_next;
555 }
556
Felix Fietkau4cee7862010-07-23 03:53:16 +0200557 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200558 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200559 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200560 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200561
Felix Fietkau4cee7862010-07-23 03:53:16 +0200562 spin_lock_bh(&txq->axq_lock);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200563 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200564 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600565 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200566
567 if (ts->ts_status & ATH9K_TXERR_FILT)
568 tid->ac->clear_ps_filter = true;
569 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200570 spin_unlock_bh(&txq->axq_lock);
571 }
572
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500573 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200574 ath_tx_flush_tid(sc, tid);
575
Sujith1286ec62009-01-27 13:30:37 +0530576 rcu_read_unlock();
577
Felix Fietkau030d6292011-10-07 02:28:13 +0200578 if (needreset) {
579 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200580 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200581 }
Sujithe8324352009-01-16 21:38:42 +0530582}
583
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530584static bool ath_lookup_legacy(struct ath_buf *bf)
585{
586 struct sk_buff *skb;
587 struct ieee80211_tx_info *tx_info;
588 struct ieee80211_tx_rate *rates;
589 int i;
590
591 skb = bf->bf_mpdu;
592 tx_info = IEEE80211_SKB_CB(skb);
593 rates = tx_info->control.rates;
594
Felix Fietkau059ee092011-08-27 10:25:27 +0200595 for (i = 0; i < 4; i++) {
596 if (!rates[i].count || rates[i].idx < 0)
597 break;
598
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530599 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
600 return true;
601 }
602
603 return false;
604}
605
Sujithe8324352009-01-16 21:38:42 +0530606static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
607 struct ath_atx_tid *tid)
608{
Sujithe8324352009-01-16 21:38:42 +0530609 struct sk_buff *skb;
610 struct ieee80211_tx_info *tx_info;
611 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530612 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530613 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530614 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530615 int i;
616
Sujitha22be222009-03-30 15:28:36 +0530617 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530618 tx_info = IEEE80211_SKB_CB(skb);
619 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530620
621 /*
622 * Find the lowest frame length among the rate series that will have a
623 * 4ms transmit duration.
624 * TODO - TXOP limit needs to be considered.
625 */
626 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
627
628 for (i = 0; i < 4; i++) {
629 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100630 int modeidx;
631 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530632 legacy = 1;
633 break;
634 }
635
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200636 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100637 modeidx = MCS_HT40;
638 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200639 modeidx = MCS_HT20;
640
641 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
642 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100643
644 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530645 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530646 }
647 }
648
649 /*
650 * limit aggregate size by the minimum rate if rate selected is
651 * not a probe rate, if rate selected is a probe rate then
652 * avoid aggregation of this packet.
653 */
654 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
655 return 0;
656
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530657 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
658 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
659 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530660 aggr_limit = min((max_4ms_framelen * 3) / 8,
661 (u32)ATH_AMPDU_LIMIT_MAX);
662 else
663 aggr_limit = min(max_4ms_framelen,
664 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530665
666 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300667 * h/w can accept aggregates up to 16 bit lengths (65535).
668 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530669 * as zero. Ignore 65536 since we are constrained by hw.
670 */
Sujith4ef70842009-07-23 15:32:41 +0530671 if (tid->an->maxampdu)
672 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530673
674 return aggr_limit;
675}
676
677/*
Sujithd43f30152009-01-16 21:38:53 +0530678 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530679 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530680 */
681static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530682 struct ath_buf *bf, u16 frmlen,
683 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530684{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530685#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530686 struct sk_buff *skb = bf->bf_mpdu;
687 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530688 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530689 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100690 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200691 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100692 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530693
694 /* Select standard number of delimiters based on frame length alone */
695 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
696
697 /*
698 * If encryption enabled, hardware requires some more padding between
699 * subframes.
700 * TODO - this could be improved to be dependent on the rate.
701 * The hardware can keep up at lower rates, but not higher rates
702 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530703 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
704 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530705 ndelim += ATH_AGGR_ENCRYPTDELIM;
706
707 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530708 * Add delimiter when using RTS/CTS with aggregation
709 * and non enterprise AR9003 card
710 */
Felix Fietkau34597312011-08-29 18:57:54 +0200711 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
712 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530713 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
714
715 /*
Sujithe8324352009-01-16 21:38:42 +0530716 * Convert desired mpdu density from microeconds to bytes based
717 * on highest rate in rate series (i.e. first rate) to determine
718 * required minimum length for subframe. Take into account
719 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530720 *
Sujithe8324352009-01-16 21:38:42 +0530721 * If there is no mpdu density restriction, no further calculation
722 * is needed.
723 */
Sujith4ef70842009-07-23 15:32:41 +0530724
725 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530726 return ndelim;
727
728 rix = tx_info->control.rates[0].idx;
729 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530730 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
731 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
732
733 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530734 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530735 else
Sujith4ef70842009-07-23 15:32:41 +0530736 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530737
738 if (nsymbols == 0)
739 nsymbols = 1;
740
Felix Fietkauc6663872010-04-19 19:57:33 +0200741 streams = HT_RC_2_STREAMS(rix);
742 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530743 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
744
Sujithe8324352009-01-16 21:38:42 +0530745 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530746 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
747 ndelim = max(mindelim, ndelim);
748 }
749
750 return ndelim;
751}
752
753static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530754 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530755 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100756 struct list_head *bf_q,
757 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530758{
759#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200760 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530761 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530762 u16 aggr_limit = 0, al = 0, bpad = 0,
763 al_delta, h_baw = tid->baw_size / 2;
764 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200765 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200767 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200768 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530769
770 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200771 skb = skb_peek(&tid->buf_q);
772 fi = get_frame_info(skb);
773 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200774 if (!fi->bf)
775 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200776
Felix Fietkau44f1d262011-08-28 00:32:25 +0200777 if (!bf)
778 continue;
779
Felix Fietkau399c6482011-09-14 21:24:17 +0200780 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200781 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200782 if (!bf_first)
783 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530784
Sujithd43f30152009-01-16 21:38:53 +0530785 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200786 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530787 status = ATH_AGGR_BAW_CLOSED;
788 break;
789 }
790
791 if (!rl) {
792 aggr_limit = ath_lookup_rate(sc, bf, tid);
793 rl = 1;
794 }
795
Sujithd43f30152009-01-16 21:38:53 +0530796 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530798
Sujithd43f30152009-01-16 21:38:53 +0530799 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530800 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
801 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530802 status = ATH_AGGR_LIMITED;
803 break;
804 }
805
Felix Fietkau0299a502010-10-21 02:47:24 +0200806 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200807 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200808 break;
809
Sujithd43f30152009-01-16 21:38:53 +0530810 /* do not exceed subframe limit */
811 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530812 status = ATH_AGGR_LIMITED;
813 break;
814 }
815
Sujithd43f30152009-01-16 21:38:53 +0530816 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530817 al += bpad + al_delta;
818
819 /*
820 * Get the delimiters needed to meet the MPDU
821 * density for this node.
822 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530823 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
824 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530825 bpad = PADBYTES(al_delta) + (ndelim << 2);
826
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530827 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530828 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100831 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200832 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200833 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200834
835 __skb_unlink(skb, &tid->buf_q);
836 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200837 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530838 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200839
Sujithe8324352009-01-16 21:38:42 +0530840 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530841
Felix Fietkau56dc6332011-08-28 00:32:22 +0200842 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530843
Felix Fietkau269c44b2010-11-14 15:20:06 +0100844 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530845
Sujithe8324352009-01-16 21:38:42 +0530846 return status;
847#undef PADBYTES
848}
849
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200850/*
851 * rix - rate index
852 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
853 * width - 0 for 20 MHz, 1 for 40 MHz
854 * half_gi - to use 4us v/s 3.6 us for symbol time
855 */
856static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
857 int width, int half_gi, bool shortPreamble)
858{
859 u32 nbits, nsymbits, duration, nsymbols;
860 int streams;
861
862 /* find number of symbols: PLCP + data */
863 streams = HT_RC_2_STREAMS(rix);
864 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
865 nsymbits = bits_per_symbol[rix % 8][width] * streams;
866 nsymbols = (nbits + nsymbits - 1) / nsymbits;
867
868 if (!half_gi)
869 duration = SYMBOL_TIME(nsymbols);
870 else
871 duration = SYMBOL_TIME_HALFGI(nsymbols);
872
873 /* addup duration for legacy/ht training and signal fields */
874 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
875
876 return duration;
877}
878
Felix Fietkau493cf042011-09-14 21:24:22 +0200879static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
880 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200881{
882 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200883 struct sk_buff *skb;
884 struct ieee80211_tx_info *tx_info;
885 struct ieee80211_tx_rate *rates;
886 const struct ieee80211_rate *rate;
887 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200888 int i;
889 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200890
891 skb = bf->bf_mpdu;
892 tx_info = IEEE80211_SKB_CB(skb);
893 rates = tx_info->control.rates;
894 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200895
896 /* set dur_update_en for l-sig computation except for PS-Poll frames */
897 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200898
899 /*
900 * We check if Short Preamble is needed for the CTS rate by
901 * checking the BSS's global flag.
902 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
903 */
904 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200905 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200906 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200907 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200908
909 for (i = 0; i < 4; i++) {
910 bool is_40, is_sgi, is_sp;
911 int phy;
912
913 if (!rates[i].count || (rates[i].idx < 0))
914 continue;
915
916 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200917 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200918
919 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200920 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
921 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200922 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200923 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
924 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200925 }
926
927 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200928 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200929 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200930 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200931
932 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
933 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
934 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
935
936 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
937 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200938 info->rates[i].Rate = rix | 0x80;
939 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
940 ah->txchainmask, info->rates[i].Rate);
941 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200942 is_40, is_sgi, is_sp);
943 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200944 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200945 continue;
946 }
947
948 /* legacy rates */
949 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
950 !(rate->flags & IEEE80211_RATE_ERP_G))
951 phy = WLAN_RC_PHY_CCK;
952 else
953 phy = WLAN_RC_PHY_OFDM;
954
955 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200956 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200957 if (rate->hw_value_short) {
958 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200959 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200960 } else {
961 is_sp = false;
962 }
963
964 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200965 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200966 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200967 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
968 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200969
Felix Fietkau493cf042011-09-14 21:24:22 +0200970 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200971 phy, rate->bitrate * 100, len, rix, is_sp);
972 }
973
974 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
975 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200976 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200977
978 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200979 if (info->flags & ATH9K_TXDESC_RTSENA)
980 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200981}
982
Felix Fietkau493cf042011-09-14 21:24:22 +0200983static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
984{
985 struct ieee80211_hdr *hdr;
986 enum ath9k_pkt_type htype;
987 __le16 fc;
988
989 hdr = (struct ieee80211_hdr *)skb->data;
990 fc = hdr->frame_control;
991
992 if (ieee80211_is_beacon(fc))
993 htype = ATH9K_PKT_TYPE_BEACON;
994 else if (ieee80211_is_probe_resp(fc))
995 htype = ATH9K_PKT_TYPE_PROBE_RESP;
996 else if (ieee80211_is_atim(fc))
997 htype = ATH9K_PKT_TYPE_ATIM;
998 else if (ieee80211_is_pspoll(fc))
999 htype = ATH9K_PKT_TYPE_PSPOLL;
1000 else
1001 htype = ATH9K_PKT_TYPE_NORMAL;
1002
1003 return htype;
1004}
1005
1006static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1007 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001008{
1009 struct ath_hw *ah = sc->sc_ah;
1010 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1011 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001012 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001013 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001014
Felix Fietkau493cf042011-09-14 21:24:22 +02001015 memset(&info, 0, sizeof(info));
1016 info.is_first = true;
1017 info.is_last = true;
1018 info.txpower = MAX_RATE_POWER;
1019 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001020
Felix Fietkau493cf042011-09-14 21:24:22 +02001021 info.flags = ATH9K_TXDESC_INTREQ;
1022 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1023 info.flags |= ATH9K_TXDESC_NOACK;
1024 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1025 info.flags |= ATH9K_TXDESC_LDPC;
1026
1027 ath_buf_set_rate(sc, bf, &info, len);
1028
1029 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1030 info.flags |= ATH9K_TXDESC_CLRDMASK;
1031
1032 if (bf->bf_state.bfs_paprd)
1033 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1034
Felix Fietkau399c6482011-09-14 21:24:17 +02001035
1036 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001037 struct sk_buff *skb = bf->bf_mpdu;
1038 struct ath_frame_info *fi = get_frame_info(skb);
1039
1040 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001041 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001042 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001043 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001044 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001045
John W. Linville42cecc32011-09-19 15:42:31 -04001046 info.buf_addr[0] = bf->bf_buf_addr;
1047 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001048 info.pkt_len = fi->framelen;
1049 info.keyix = fi->keyix;
1050 info.keytype = fi->keytype;
1051
1052 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001053 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001054 info.aggr = AGGR_BUF_FIRST;
1055 else if (!bf->bf_next)
1056 info.aggr = AGGR_BUF_LAST;
1057 else
1058 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001059
Felix Fietkau493cf042011-09-14 21:24:22 +02001060 info.ndelim = bf->bf_state.ndelim;
1061 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001062 }
1063
Felix Fietkau493cf042011-09-14 21:24:22 +02001064 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001065 bf = bf->bf_next;
1066 }
1067}
1068
Sujithe8324352009-01-16 21:38:42 +05301069static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1070 struct ath_atx_tid *tid)
1071{
Sujithd43f30152009-01-16 21:38:53 +05301072 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301073 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001074 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301075 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001076 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301077
1078 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001079 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301080 return;
1081
1082 INIT_LIST_HEAD(&bf_q);
1083
Felix Fietkau269c44b2010-11-14 15:20:06 +01001084 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301085
1086 /*
Sujithd43f30152009-01-16 21:38:53 +05301087 * no frames picked up to be aggregated;
1088 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301089 */
1090 if (list_empty(&bf_q))
1091 break;
1092
1093 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301094 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001095 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301096
Felix Fietkau55195412011-04-17 23:28:09 +02001097 if (tid->ac->clear_ps_filter) {
1098 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001099 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1100 } else {
1101 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001102 }
1103
Sujithd43f30152009-01-16 21:38:53 +05301104 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001105 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001106 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1107 bf->bf_state.bf_type = BUF_AMPDU;
1108 } else {
1109 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301110 }
1111
Felix Fietkau493cf042011-09-14 21:24:22 +02001112 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001113 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001114 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301115 status != ATH_AGGR_BAW_CLOSED);
1116}
1117
Felix Fietkau231c3a12010-09-20 19:35:28 +02001118int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1119 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301120{
1121 struct ath_atx_tid *txtid;
1122 struct ath_node *an;
1123
1124 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301125 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001126
1127 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1128 return -EAGAIN;
1129
Sujithf83da962009-07-23 15:32:37 +05301130 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001131 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001132 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001133
Felix Fietkau2ed72222011-01-10 17:05:49 -07001134 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1135 txtid->baw_head = txtid->baw_tail = 0;
1136
Felix Fietkau231c3a12010-09-20 19:35:28 +02001137 return 0;
Sujithe8324352009-01-16 21:38:42 +05301138}
1139
Sujithf83da962009-07-23 15:32:37 +05301140void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301141{
1142 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1143 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001144 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301145
1146 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301147 return;
Sujithe8324352009-01-16 21:38:42 +05301148
1149 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301150 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301151 return;
Sujithe8324352009-01-16 21:38:42 +05301152 }
1153
Sujithe8324352009-01-16 21:38:42 +05301154 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001155 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001156
1157 /*
1158 * If frames are still being transmitted for this TID, they will be
1159 * cleaned up during tx completion. To prevent race conditions, this
1160 * TID can only be reused after all in-progress subframes have been
1161 * completed.
1162 */
1163 if (txtid->baw_head != txtid->baw_tail)
1164 txtid->state |= AGGR_CLEANUP;
1165 else
1166 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301167 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301168
Felix Fietkau90fa5392010-09-20 13:45:38 +02001169 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301170}
1171
Johannes Berg042ec452011-09-29 16:04:26 +02001172void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1173 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001174{
1175 struct ath_atx_tid *tid;
1176 struct ath_atx_ac *ac;
1177 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001178 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001179 int tidno;
1180
1181 for (tidno = 0, tid = &an->tid[tidno];
1182 tidno < WME_NUM_TID; tidno++, tid++) {
1183
1184 if (!tid->sched)
1185 continue;
1186
1187 ac = tid->ac;
1188 txq = ac->txq;
1189
1190 spin_lock_bh(&txq->axq_lock);
1191
Johannes Berg042ec452011-09-29 16:04:26 +02001192 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001193
1194 tid->sched = false;
1195 list_del(&tid->list);
1196
1197 if (ac->sched) {
1198 ac->sched = false;
1199 list_del(&ac->list);
1200 }
1201
1202 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001203
Johannes Berg042ec452011-09-29 16:04:26 +02001204 ieee80211_sta_set_buffered(sta, tidno, buffered);
1205 }
Felix Fietkau55195412011-04-17 23:28:09 +02001206}
1207
1208void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1209{
1210 struct ath_atx_tid *tid;
1211 struct ath_atx_ac *ac;
1212 struct ath_txq *txq;
1213 int tidno;
1214
1215 for (tidno = 0, tid = &an->tid[tidno];
1216 tidno < WME_NUM_TID; tidno++, tid++) {
1217
1218 ac = tid->ac;
1219 txq = ac->txq;
1220
1221 spin_lock_bh(&txq->axq_lock);
1222 ac->clear_ps_filter = true;
1223
Felix Fietkau56dc6332011-08-28 00:32:22 +02001224 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001225 ath_tx_queue_tid(txq, tid);
1226 ath_txq_schedule(sc, txq);
1227 }
1228
1229 spin_unlock_bh(&txq->axq_lock);
1230 }
1231}
1232
Sujithe8324352009-01-16 21:38:42 +05301233void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1234{
1235 struct ath_atx_tid *txtid;
1236 struct ath_node *an;
1237
1238 an = (struct ath_node *)sta->drv_priv;
1239
1240 if (sc->sc_flags & SC_OP_TXAGGR) {
1241 txtid = ATH_AN_2_TID(an, tid);
1242 txtid->baw_size =
1243 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1244 txtid->state |= AGGR_ADDBA_COMPLETE;
1245 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1246 ath_tx_resume_tid(sc, txtid);
1247 }
1248}
1249
Sujithe8324352009-01-16 21:38:42 +05301250/********************/
1251/* Queue Management */
1252/********************/
1253
Sujithe8324352009-01-16 21:38:42 +05301254static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1255 struct ath_txq *txq)
1256{
1257 struct ath_atx_ac *ac, *ac_tmp;
1258 struct ath_atx_tid *tid, *tid_tmp;
1259
1260 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1261 list_del(&ac->list);
1262 ac->sched = false;
1263 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1264 list_del(&tid->list);
1265 tid->sched = false;
1266 ath_tid_drain(sc, txq, tid);
1267 }
1268 }
1269}
1270
1271struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1272{
Sujithcbe61d82009-02-09 13:27:12 +05301273 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301274 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001275 static const int subtype_txq_to_hwq[] = {
1276 [WME_AC_BE] = ATH_TXQ_AC_BE,
1277 [WME_AC_BK] = ATH_TXQ_AC_BK,
1278 [WME_AC_VI] = ATH_TXQ_AC_VI,
1279 [WME_AC_VO] = ATH_TXQ_AC_VO,
1280 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001281 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301282
1283 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001284 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301285 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1286 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1287 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1288 qi.tqi_physCompBuf = 0;
1289
1290 /*
1291 * Enable interrupts only for EOL and DESC conditions.
1292 * We mark tx descriptors to receive a DESC interrupt
1293 * when a tx queue gets deep; otherwise waiting for the
1294 * EOL to reap descriptors. Note that this is done to
1295 * reduce interrupt load and this only defers reaping
1296 * descriptors, never transmitting frames. Aside from
1297 * reducing interrupts this also permits more concurrency.
1298 * The only potential downside is if the tx queue backs
1299 * up in which case the top half of the kernel may backup
1300 * due to a lack of tx descriptors.
1301 *
1302 * The UAPSD queue is an exception, since we take a desc-
1303 * based intr on the EOSP frames.
1304 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001305 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1306 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1307 TXQ_FLAG_TXERRINT_ENABLE;
1308 } else {
1309 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1310 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1311 else
1312 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1313 TXQ_FLAG_TXDESCINT_ENABLE;
1314 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001315 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1316 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301317 /*
1318 * NB: don't print a message, this happens
1319 * normally on parts with too few tx queues
1320 */
1321 return NULL;
1322 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001323 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1324 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301325
Ben Greear60f2d1d2011-01-09 23:11:52 -08001326 txq->axq_qnum = axq_qnum;
1327 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301328 txq->axq_link = NULL;
1329 INIT_LIST_HEAD(&txq->axq_q);
1330 INIT_LIST_HEAD(&txq->axq_acq);
1331 spin_lock_init(&txq->axq_lock);
1332 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001333 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001334 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001335 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001336
1337 txq->txq_headidx = txq->txq_tailidx = 0;
1338 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1339 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301340 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001341 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301342}
1343
Sujithe8324352009-01-16 21:38:42 +05301344int ath_txq_update(struct ath_softc *sc, int qnum,
1345 struct ath9k_tx_queue_info *qinfo)
1346{
Sujithcbe61d82009-02-09 13:27:12 +05301347 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301348 int error = 0;
1349 struct ath9k_tx_queue_info qi;
1350
1351 if (qnum == sc->beacon.beaconq) {
1352 /*
1353 * XXX: for beacon queue, we just save the parameter.
1354 * It will be picked up by ath_beaconq_config when
1355 * it's necessary.
1356 */
1357 sc->beacon.beacon_qi = *qinfo;
1358 return 0;
1359 }
1360
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001361 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301362
1363 ath9k_hw_get_txq_props(ah, qnum, &qi);
1364 qi.tqi_aifs = qinfo->tqi_aifs;
1365 qi.tqi_cwmin = qinfo->tqi_cwmin;
1366 qi.tqi_cwmax = qinfo->tqi_cwmax;
1367 qi.tqi_burstTime = qinfo->tqi_burstTime;
1368 qi.tqi_readyTime = qinfo->tqi_readyTime;
1369
1370 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001371 ath_err(ath9k_hw_common(sc->sc_ah),
1372 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301373 error = -EIO;
1374 } else {
1375 ath9k_hw_resettxqueue(ah, qnum);
1376 }
1377
1378 return error;
1379}
1380
1381int ath_cabq_update(struct ath_softc *sc)
1382{
1383 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001384 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301385 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301386
1387 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1388 /*
1389 * Ensure the readytime % is within the bounds.
1390 */
Sujith17d79042009-02-09 13:27:03 +05301391 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1392 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1393 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1394 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301395
Steve Brown9814f6b2011-02-07 17:10:39 -07001396 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301397 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301398 ath_txq_update(sc, qnum, &qi);
1399
1400 return 0;
1401}
1402
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001403static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1404{
1405 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1406 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1407}
1408
Felix Fietkaufce041b2011-05-19 12:20:25 +02001409static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1410 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301411 __releases(txq->axq_lock)
1412 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301413{
1414 struct ath_buf *bf, *lastbf;
1415 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001416 struct ath_tx_status ts;
1417
1418 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001419 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301420 INIT_LIST_HEAD(&bf_head);
1421
Felix Fietkaufce041b2011-05-19 12:20:25 +02001422 while (!list_empty(list)) {
1423 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301424
Felix Fietkaufce041b2011-05-19 12:20:25 +02001425 if (bf->bf_stale) {
1426 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301427
Felix Fietkaufce041b2011-05-19 12:20:25 +02001428 ath_tx_return_buffer(sc, bf);
1429 continue;
Sujithe8324352009-01-16 21:38:42 +05301430 }
1431
1432 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001433 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001434
Sujithe8324352009-01-16 21:38:42 +05301435 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001436 if (bf_is_ampdu_not_probing(bf))
1437 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301438
Felix Fietkaufce041b2011-05-19 12:20:25 +02001439 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301440 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001441 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1442 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301443 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001444 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001445 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001446 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001447}
1448
1449/*
1450 * Drain a given TX queue (could be Beacon or Data)
1451 *
1452 * This assumes output has been stopped and
1453 * we do not need to block ath_tx_tasklet.
1454 */
1455void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1456{
1457 spin_lock_bh(&txq->axq_lock);
1458 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1459 int idx = txq->txq_tailidx;
1460
1461 while (!list_empty(&txq->txq_fifo[idx])) {
1462 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1463 retry_tx);
1464
1465 INCR(idx, ATH_TXFIFO_DEPTH);
1466 }
1467 txq->txq_tailidx = idx;
1468 }
1469
1470 txq->axq_link = NULL;
1471 txq->axq_tx_inprogress = false;
1472 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001473
1474 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001475 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1476 ath_txq_drain_pending_buffers(sc, txq);
1477
1478 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301479}
1480
Felix Fietkau080e1a22010-12-05 20:17:53 +01001481bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301482{
Sujithcbe61d82009-02-09 13:27:12 +05301483 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001484 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301485 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001486 int i;
1487 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301488
1489 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001490 return true;
Sujith043a0402009-01-16 21:38:47 +05301491
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001492 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301493
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001494 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301495 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001496 if (!ATH_TXQ_SETUP(sc, i))
1497 continue;
1498
Felix Fietkau34d25812011-10-07 02:28:12 +02001499 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1500 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301501 }
1502
Felix Fietkau080e1a22010-12-05 20:17:53 +01001503 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001504 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301505
1506 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001507 if (!ATH_TXQ_SETUP(sc, i))
1508 continue;
1509
1510 /*
1511 * The caller will resume queues with ieee80211_wake_queues.
1512 * Mark the queue as not stopped to prevent ath_tx_complete
1513 * from waking the queue too early.
1514 */
1515 txq = &sc->tx.txq[i];
1516 txq->stopped = false;
1517 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301518 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001519
1520 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301521}
1522
Sujithe8324352009-01-16 21:38:42 +05301523void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1524{
1525 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1526 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1527}
1528
Ben Greear7755bad2011-01-18 17:30:00 -08001529/* For each axq_acq entry, for each tid, try to schedule packets
1530 * for transmit until ampdu_depth has reached min Q depth.
1531 */
Sujithe8324352009-01-16 21:38:42 +05301532void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1533{
Ben Greear7755bad2011-01-18 17:30:00 -08001534 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1535 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301536
Felix Fietkau236de512011-09-03 01:40:25 +02001537 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001538 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301539 return;
1540
1541 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001542 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301543
Ben Greear7755bad2011-01-18 17:30:00 -08001544 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1545 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1546 list_del(&ac->list);
1547 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301548
Ben Greear7755bad2011-01-18 17:30:00 -08001549 while (!list_empty(&ac->tid_q)) {
1550 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1551 list);
1552 list_del(&tid->list);
1553 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301554
Ben Greear7755bad2011-01-18 17:30:00 -08001555 if (tid->paused)
1556 continue;
Sujithe8324352009-01-16 21:38:42 +05301557
Ben Greear7755bad2011-01-18 17:30:00 -08001558 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301559
Ben Greear7755bad2011-01-18 17:30:00 -08001560 /*
1561 * add tid to round-robin queue if more frames
1562 * are pending for the tid
1563 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001564 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001565 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301566
Ben Greear7755bad2011-01-18 17:30:00 -08001567 if (tid == last_tid ||
1568 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1569 break;
Sujithe8324352009-01-16 21:38:42 +05301570 }
Ben Greear7755bad2011-01-18 17:30:00 -08001571
1572 if (!list_empty(&ac->tid_q)) {
1573 if (!ac->sched) {
1574 ac->sched = true;
1575 list_add_tail(&ac->list, &txq->axq_acq);
1576 }
1577 }
1578
1579 if (ac == last_ac ||
1580 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1581 return;
Sujithe8324352009-01-16 21:38:42 +05301582 }
1583}
1584
Sujithe8324352009-01-16 21:38:42 +05301585/***********/
1586/* TX, DMA */
1587/***********/
1588
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001589/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001590 * Insert a chain of ath_buf (descriptors) on a txq and
1591 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001592 */
Sujith102e0572008-10-29 10:15:16 +05301593static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001594 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001595{
Sujithcbe61d82009-02-09 13:27:12 +05301596 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001597 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001598 struct ath_buf *bf, *bf_last;
1599 bool puttxbuf = false;
1600 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301601
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602 /*
1603 * Insert the frame on the outbound list and
1604 * pass it on to the hardware.
1605 */
1606
1607 if (list_empty(head))
1608 return;
1609
Felix Fietkaufce041b2011-05-19 12:20:25 +02001610 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001611 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001612 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001613
Joe Perches226afe62010-12-02 19:12:37 -08001614 ath_dbg(common, ATH_DBG_QUEUE,
1615 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001616
Felix Fietkaufce041b2011-05-19 12:20:25 +02001617 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1618 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001619 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001620 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001621 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001622 list_splice_tail_init(head, &txq->axq_q);
1623
Felix Fietkaufce041b2011-05-19 12:20:25 +02001624 if (txq->axq_link) {
1625 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001626 ath_dbg(common, ATH_DBG_XMIT,
1627 "link[%u] (%p)=%llx (%p)\n",
1628 txq->axq_qnum, txq->axq_link,
1629 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001630 } else if (!edma)
1631 puttxbuf = true;
1632
1633 txq->axq_link = bf_last->bf_desc;
1634 }
1635
1636 if (puttxbuf) {
1637 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1638 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1639 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1640 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1641 }
1642
1643 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001644 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001645 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001646 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001647
1648 if (!internal) {
1649 txq->axq_depth++;
1650 if (bf_is_ampdu_not_probing(bf))
1651 txq->axq_ampdu_depth++;
1652 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001653}
1654
Sujithe8324352009-01-16 21:38:42 +05301655static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001656 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301657{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001658 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001659 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001660 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301661
1662 /*
1663 * Do not queue to h/w when any of the following conditions is true:
1664 * - there are pending frames in software queue
1665 * - the TID is currently paused for ADDBA/BAR request
1666 * - seqno is not within block-ack window
1667 * - h/w queue depth exceeds low water mark
1668 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001669 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001670 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001671 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001672 /*
Sujithe8324352009-01-16 21:38:42 +05301673 * Add this frame to software queue for scheduling later
1674 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001675 */
Ben Greearbda8add2011-01-09 23:11:48 -08001676 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001677 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001678 if (!txctl->an || !txctl->an->sleeping)
1679 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301680 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001681 }
1682
Felix Fietkau44f1d262011-08-28 00:32:25 +02001683 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1684 if (!bf)
1685 return;
1686
Felix Fietkau399c6482011-09-14 21:24:17 +02001687 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001688 INIT_LIST_HEAD(&bf_head);
1689 list_add(&bf->list, &bf_head);
1690
Sujithe8324352009-01-16 21:38:42 +05301691 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001692 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301693
1694 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001695 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301696 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001697 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001698 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301699}
1700
Felix Fietkau82b873a2010-11-11 03:18:37 +01001701static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001702 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001703{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001704 struct ath_frame_info *fi = get_frame_info(skb);
1705 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301706 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001707
Felix Fietkau44f1d262011-08-28 00:32:25 +02001708 bf = fi->bf;
1709 if (!bf)
1710 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1711
1712 if (!bf)
1713 return;
1714
1715 INIT_LIST_HEAD(&bf_head);
1716 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001717 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301718
1719 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001720 if (tid)
1721 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301722
Sujithd43f30152009-01-16 21:38:53 +05301723 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001724 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001725 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301726 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001727}
1728
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001729static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1730 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301731{
1732 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001733 struct ieee80211_sta *sta = tx_info->control.sta;
1734 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001735 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001736 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001737 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001738 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301739
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001740 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301741
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001742 if (sta)
1743 an = (struct ath_node *) sta->drv_priv;
1744
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001745 memset(fi, 0, sizeof(*fi));
1746 if (hw_key)
1747 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001748 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1749 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001750 else
1751 fi->keyix = ATH9K_TXKEYIX_INVALID;
1752 fi->keytype = keytype;
1753 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301754}
1755
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301756u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1757{
1758 struct ath_hw *ah = sc->sc_ah;
1759 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301760 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1761 (curchan->channelFlags & CHANNEL_5GHZ) &&
1762 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301763 return 0x3;
1764 else
1765 return chainmask;
1766}
1767
Felix Fietkau44f1d262011-08-28 00:32:25 +02001768/*
1769 * Assign a descriptor (and sequence number if necessary,
1770 * and map buffer for DMA. Frees skb on error
1771 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001772static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001773 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001774 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001775 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301776{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001777 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001778 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001779 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001780 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001781 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001782
1783 bf = ath_tx_get_buffer(sc);
1784 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001785 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001786 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001787 }
Sujithe8324352009-01-16 21:38:42 +05301788
Sujithe8324352009-01-16 21:38:42 +05301789 ATH_TXBUF_RESET(bf);
1790
Felix Fietkaufa05f872011-08-28 00:32:24 +02001791 if (tid) {
1792 seqno = tid->seq_next;
1793 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1794 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1795 bf->bf_state.seqno = seqno;
1796 }
1797
Sujithe8324352009-01-16 21:38:42 +05301798 bf->bf_mpdu = skb;
1799
Ben Greearc1739eb32010-10-14 12:45:29 -07001800 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1801 skb->len, DMA_TO_DEVICE);
1802 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301803 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001804 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001805 ath_err(ath9k_hw_common(sc->sc_ah),
1806 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001807 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001808 goto error;
Sujithe8324352009-01-16 21:38:42 +05301809 }
1810
Felix Fietkau56dc6332011-08-28 00:32:22 +02001811 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001812
1813 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001814
1815error:
1816 dev_kfree_skb_any(skb);
1817 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001818}
1819
1820/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001821static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001822 struct ath_tx_control *txctl)
1823{
Felix Fietkau04caf862010-11-14 15:20:12 +01001824 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1825 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001826 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001827 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001828 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301829
Sujithe8324352009-01-16 21:38:42 +05301830 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301831 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1832 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001833 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1834 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001835 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001836
Felix Fietkau066dae92010-11-07 14:59:39 +01001837 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001838 }
1839
1840 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001841 /*
1842 * Try aggregation if it's a unicast data frame
1843 * and the destination is HT capable.
1844 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001845 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301846 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001847 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1848 if (!bf)
1849 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001850
Felix Fietkau82b873a2010-11-11 03:18:37 +01001851 bf->bf_state.bfs_paprd = txctl->paprd;
1852
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301853 if (txctl->paprd)
1854 bf->bf_state.bfs_paprd_timestamp = jiffies;
1855
Felix Fietkau44f1d262011-08-28 00:32:25 +02001856 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301857 }
1858
Felix Fietkaufa05f872011-08-28 00:32:24 +02001859out:
Sujithe8324352009-01-16 21:38:42 +05301860 spin_unlock_bh(&txctl->txq->axq_lock);
1861}
1862
1863/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001864int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301865 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001866{
Felix Fietkau28d16702010-11-14 15:20:10 +01001867 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1868 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001869 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001870 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001871 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001872 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001873 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001874 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001875 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001876
Ben Greeara9927ba2010-12-06 21:13:49 -08001877 /* NOTE: sta can be NULL according to net/mac80211.h */
1878 if (sta)
1879 txctl->an = (struct ath_node *)sta->drv_priv;
1880
Felix Fietkau04caf862010-11-14 15:20:12 +01001881 if (info->control.hw_key)
1882 frmlen += info->control.hw_key->icv_len;
1883
Felix Fietkau28d16702010-11-14 15:20:10 +01001884 /*
1885 * As a temporary workaround, assign seq# here; this will likely need
1886 * to be cleaned up to work better with Beacon transmission and virtual
1887 * BSSes.
1888 */
1889 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1890 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1891 sc->tx.seq_no += 0x10;
1892 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1893 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1894 }
1895
John W. Linville42cecc32011-09-19 15:42:31 -04001896 /* Add the padding after the header if this is not already done */
1897 padpos = ath9k_cmn_padpos(hdr->frame_control);
1898 padsize = padpos & 3;
1899 if (padsize && skb->len > padpos) {
1900 if (skb_headroom(skb) < padsize)
1901 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001902
John W. Linville42cecc32011-09-19 15:42:31 -04001903 skb_push(skb, padsize);
1904 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001905 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001906 }
1907
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001908 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1909 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1910 !ieee80211_is_data(hdr->frame_control))
1911 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1912
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001913 setup_frame_info(hw, skb, frmlen);
1914
1915 /*
1916 * At this point, the vif, hw_key and sta pointers in the tx control
1917 * info are no longer valid (overwritten by the ath_frame_info data.
1918 */
1919
Felix Fietkau066dae92010-11-07 14:59:39 +01001920 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001921 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001922 if (txq == sc->tx.txq_map[q] &&
1923 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001924 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001925 txq->stopped = 1;
1926 }
1927 spin_unlock_bh(&txq->axq_lock);
1928
Felix Fietkau44f1d262011-08-28 00:32:25 +02001929 ath_tx_start_dma(sc, skb, txctl);
1930 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001931}
1932
Sujithe8324352009-01-16 21:38:42 +05301933/*****************/
1934/* TX Completion */
1935/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001936
Sujithe8324352009-01-16 21:38:42 +05301937static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301938 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001939{
Sujithe8324352009-01-16 21:38:42 +05301940 struct ieee80211_hw *hw = sc->hw;
1941 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001942 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001943 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001944 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301945
Joe Perches226afe62010-12-02 19:12:37 -08001946 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301947
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301948 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301949 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301950
Felix Fietkau55797b12011-09-14 21:24:16 +02001951 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301952 /* Frame was ACKed */
1953 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301954
John W. Linville42cecc32011-09-19 15:42:31 -04001955 padpos = ath9k_cmn_padpos(hdr->frame_control);
1956 padsize = padpos & 3;
1957 if (padsize && skb->len>padpos+padsize) {
1958 /*
1959 * Remove MAC header padding before giving the frame back to
1960 * mac80211.
1961 */
1962 memmove(skb->data + padsize, skb->data, padpos);
1963 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301964 }
1965
Felix Fietkauc8e88682011-11-16 13:08:40 +01001966 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05301967 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001968 ath_dbg(common, ATH_DBG_PS,
1969 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301970 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1971 PS_WAIT_FOR_CAB |
1972 PS_WAIT_FOR_PSPOLL_DATA |
1973 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001974 }
1975
Felix Fietkau7545daf2011-01-24 19:23:16 +01001976 q = skb_get_queue_mapping(skb);
1977 if (txq == sc->tx.txq_map[q]) {
1978 spin_lock_bh(&txq->axq_lock);
1979 if (WARN_ON(--txq->pending_frames < 0))
1980 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001981
Felix Fietkau7545daf2011-01-24 19:23:16 +01001982 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1983 ieee80211_wake_queue(sc->hw, q);
1984 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001985 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001986 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001987 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001988
1989 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301990}
1991
1992static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001993 struct ath_txq *txq, struct list_head *bf_q,
1994 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301995{
1996 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301998 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301999 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302000
Sujithe8324352009-01-16 21:38:42 +05302001 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302002 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05302003
Felix Fietkau55797b12011-09-14 21:24:16 +02002004 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302005 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302006
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002007 if (ts->ts_status & ATH9K_TXERR_FILT)
2008 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2009
Ben Greearc1739eb32010-10-14 12:45:29 -07002010 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002011 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002012
2013 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302014 if (time_after(jiffies,
2015 bf->bf_state.bfs_paprd_timestamp +
2016 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002017 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002018 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002019 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002020 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002021 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302022 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002023 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002024 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2025 * accidentally reference it later.
2026 */
2027 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302028
2029 /*
2030 * Return the list of ath_buf of this mpdu to free queue
2031 */
2032 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2033 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2034 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2035}
2036
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002037static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2038 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002039 int txok)
Sujithc4288392008-11-18 09:09:30 +05302040{
Sujitha22be222009-03-30 15:28:36 +05302041 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302042 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302043 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002044 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002045 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302046 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302047
Sujith95e4acb2009-03-13 08:56:09 +05302048 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002049 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302050
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002051 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302052 WARN_ON(tx_rateindex >= hw->max_rates);
2053
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002054 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002055 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302056
Felix Fietkaub572d032010-11-14 15:20:07 +01002057 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002058 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302059 tx_info->status.ampdu_len = nframes;
2060 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002061
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002062 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002063 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002064 /*
2065 * If an underrun error is seen assume it as an excessive
2066 * retry only if max frame trigger level has been reached
2067 * (2 KB for single stream, and 4 KB for dual stream).
2068 * Adjust the long retry as if the frame was tried
2069 * hw->max_rate_tries times to affect how rate control updates
2070 * PER for the failed rate.
2071 * In case of congestion on the bus penalizing this type of
2072 * underruns should help hardware actually transmit new frames
2073 * successfully by eventually preferring slower rates.
2074 * This itself should also alleviate congestion on the bus.
2075 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002076 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2077 ATH9K_TX_DELIM_UNDERRUN)) &&
2078 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002079 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002080 tx_info->status.rates[tx_rateindex].count =
2081 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302082 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302083
Felix Fietkau545750d2009-11-23 22:21:01 +01002084 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302085 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002086 tx_info->status.rates[i].idx = -1;
2087 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302088
Felix Fietkau78c46532010-06-25 01:26:16 +02002089 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302090}
2091
Felix Fietkaufce041b2011-05-19 12:20:25 +02002092static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2093 struct ath_tx_status *ts, struct ath_buf *bf,
2094 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302095 __releases(txq->axq_lock)
2096 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002097{
2098 int txok;
2099
2100 txq->axq_depth--;
2101 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2102 txq->axq_tx_inprogress = false;
2103 if (bf_is_ampdu_not_probing(bf))
2104 txq->axq_ampdu_depth--;
2105
2106 spin_unlock_bh(&txq->axq_lock);
2107
2108 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002109 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002110 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2111 } else
2112 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2113
2114 spin_lock_bh(&txq->axq_lock);
2115
2116 if (sc->sc_flags & SC_OP_TXAGGR)
2117 ath_txq_schedule(sc, txq);
2118}
2119
Sujithc4288392008-11-18 09:09:30 +05302120static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121{
Sujithcbe61d82009-02-09 13:27:12 +05302122 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002123 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2125 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302126 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002127 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 int status;
2129
Joe Perches226afe62010-12-02 19:12:37 -08002130 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2131 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2132 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133
Felix Fietkaufce041b2011-05-19 12:20:25 +02002134 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002136 if (work_pending(&sc->hw_reset_work))
2137 break;
2138
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002139 if (list_empty(&txq->axq_q)) {
2140 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002141 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002142 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002143 break;
2144 }
2145 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2146
2147 /*
2148 * There is a race condition that a BH gets scheduled
2149 * after sw writes TxE and before hw re-load the last
2150 * descriptor to get the newly chained one.
2151 * Software must keep the last DONE descriptor as a
2152 * holding descriptor - software does so by marking
2153 * it with the STALE flag.
2154 */
2155 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302156 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002157 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002158 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002160
2161 bf = list_entry(bf_held->list.next, struct ath_buf,
2162 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 }
2164
2165 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302166 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002167
Felix Fietkau29bffa92010-03-29 20:14:23 -07002168 memset(&ts, 0, sizeof(ts));
2169 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002170 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002172
Ben Greear2dac4fb2011-01-09 23:11:45 -08002173 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174
2175 /*
2176 * Remove ath_buf's of the same transmit unit from txq,
2177 * however leave the last descriptor back as the holding
2178 * descriptor for hw.
2179 */
Sujitha119cc42009-03-30 15:28:38 +05302180 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182 if (!list_is_singular(&lastbf->list))
2183 list_cut_position(&bf_head,
2184 &txq->axq_q, lastbf->list.prev);
2185
Felix Fietkaufce041b2011-05-19 12:20:25 +02002186 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002187 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002188 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002189 }
Johannes Berge6a98542008-10-21 12:40:02 +02002190
Felix Fietkaufce041b2011-05-19 12:20:25 +02002191 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002193 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002194}
2195
Sujith305fe472009-07-23 15:32:29 +05302196static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002197{
2198 struct ath_softc *sc = container_of(work, struct ath_softc,
2199 tx_complete_work.work);
2200 struct ath_txq *txq;
2201 int i;
2202 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002203#ifdef CONFIG_ATH9K_DEBUGFS
2204 sc->tx_complete_poll_work_seen++;
2205#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002206
2207 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2208 if (ATH_TXQ_SETUP(sc, i)) {
2209 txq = &sc->tx.txq[i];
2210 spin_lock_bh(&txq->axq_lock);
2211 if (txq->axq_depth) {
2212 if (txq->axq_tx_inprogress) {
2213 needreset = true;
2214 spin_unlock_bh(&txq->axq_lock);
2215 break;
2216 } else {
2217 txq->axq_tx_inprogress = true;
2218 }
2219 }
2220 spin_unlock_bh(&txq->axq_lock);
2221 }
2222
2223 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002224 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2225 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002226 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002227 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002228 }
2229
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002230 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002231 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2232}
2233
2234
Sujithe8324352009-01-16 21:38:42 +05302235
2236void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002237{
Sujithe8324352009-01-16 21:38:42 +05302238 int i;
2239 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002240
Sujithe8324352009-01-16 21:38:42 +05302241 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002242
2243 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302244 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2245 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002246 }
2247}
2248
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249void ath_tx_edma_tasklet(struct ath_softc *sc)
2250{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002251 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002252 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2253 struct ath_hw *ah = sc->sc_ah;
2254 struct ath_txq *txq;
2255 struct ath_buf *bf, *lastbf;
2256 struct list_head bf_head;
2257 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002258
2259 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002260 if (work_pending(&sc->hw_reset_work))
2261 break;
2262
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 if (status == -EINPROGRESS)
2265 break;
2266 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002267 ath_dbg(common, ATH_DBG_XMIT,
2268 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002269 break;
2270 }
2271
2272 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002273 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002274 continue;
2275
Felix Fietkaufce041b2011-05-19 12:20:25 +02002276 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002277
2278 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002279
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002280 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2281 spin_unlock_bh(&txq->axq_lock);
2282 return;
2283 }
2284
2285 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2286 struct ath_buf, list);
2287 lastbf = bf->bf_lastbf;
2288
2289 INIT_LIST_HEAD(&bf_head);
2290 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2291 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002292
Felix Fietkaufce041b2011-05-19 12:20:25 +02002293 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2294 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002295
Felix Fietkaufce041b2011-05-19 12:20:25 +02002296 if (!list_empty(&txq->axq_q)) {
2297 struct list_head bf_q;
2298
2299 INIT_LIST_HEAD(&bf_q);
2300 txq->axq_link = NULL;
2301 list_splice_tail_init(&txq->axq_q, &bf_q);
2302 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2303 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002304 }
2305
Felix Fietkaufce041b2011-05-19 12:20:25 +02002306 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002307 spin_unlock_bh(&txq->axq_lock);
2308 }
2309}
2310
Sujithe8324352009-01-16 21:38:42 +05302311/*****************/
2312/* Init, Cleanup */
2313/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002314
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002315static int ath_txstatus_setup(struct ath_softc *sc, int size)
2316{
2317 struct ath_descdma *dd = &sc->txsdma;
2318 u8 txs_len = sc->sc_ah->caps.txs_len;
2319
2320 dd->dd_desc_len = size * txs_len;
2321 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2322 &dd->dd_desc_paddr, GFP_KERNEL);
2323 if (!dd->dd_desc)
2324 return -ENOMEM;
2325
2326 return 0;
2327}
2328
2329static int ath_tx_edma_init(struct ath_softc *sc)
2330{
2331 int err;
2332
2333 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2334 if (!err)
2335 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2336 sc->txsdma.dd_desc_paddr,
2337 ATH_TXSTATUS_RING_SIZE);
2338
2339 return err;
2340}
2341
2342static void ath_tx_edma_cleanup(struct ath_softc *sc)
2343{
2344 struct ath_descdma *dd = &sc->txsdma;
2345
2346 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2347 dd->dd_desc_paddr);
2348}
2349
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002350int ath_tx_init(struct ath_softc *sc, int nbufs)
2351{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002352 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002353 int error = 0;
2354
Sujith797fe5cb2009-03-30 15:28:45 +05302355 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002356
Sujith797fe5cb2009-03-30 15:28:45 +05302357 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002358 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302359 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002360 ath_err(common,
2361 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302362 goto err;
2363 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002364
Sujith797fe5cb2009-03-30 15:28:45 +05302365 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002366 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302367 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002368 ath_err(common,
2369 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302370 goto err;
2371 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002372
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002373 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2374
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002375 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2376 error = ath_tx_edma_init(sc);
2377 if (error)
2378 goto err;
2379 }
2380
Sujith797fe5cb2009-03-30 15:28:45 +05302381err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002382 if (error != 0)
2383 ath_tx_cleanup(sc);
2384
2385 return error;
2386}
2387
Sujith797fe5cb2009-03-30 15:28:45 +05302388void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389{
Sujithb77f4832008-12-07 21:44:03 +05302390 if (sc->beacon.bdma.dd_desc_len != 0)
2391 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Sujithb77f4832008-12-07 21:44:03 +05302393 if (sc->tx.txdma.dd_desc_len != 0)
2394 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002395
2396 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2397 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002398}
2399
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002400void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2401{
Sujithc5170162008-10-29 10:13:59 +05302402 struct ath_atx_tid *tid;
2403 struct ath_atx_ac *ac;
2404 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405
Sujith8ee5afb2008-12-07 21:43:36 +05302406 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302407 tidno < WME_NUM_TID;
2408 tidno++, tid++) {
2409 tid->an = an;
2410 tid->tidno = tidno;
2411 tid->seq_start = tid->seq_next = 0;
2412 tid->baw_size = WME_MAX_BA;
2413 tid->baw_head = tid->baw_tail = 0;
2414 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302415 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302416 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002417 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302418 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302419 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302420 tid->state &= ~AGGR_ADDBA_COMPLETE;
2421 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302422 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423
Sujith8ee5afb2008-12-07 21:43:36 +05302424 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302425 acno < WME_NUM_AC; acno++, ac++) {
2426 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002427 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302428 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002429 }
2430}
2431
Sujithb5aa9bf2008-10-29 10:13:31 +05302432void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433{
Felix Fietkau2b409942010-07-07 19:42:08 +02002434 struct ath_atx_ac *ac;
2435 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002436 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002437 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302438
Felix Fietkau2b409942010-07-07 19:42:08 +02002439 for (tidno = 0, tid = &an->tid[tidno];
2440 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441
Felix Fietkau2b409942010-07-07 19:42:08 +02002442 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002443 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002444
Felix Fietkau2b409942010-07-07 19:42:08 +02002445 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446
Felix Fietkau2b409942010-07-07 19:42:08 +02002447 if (tid->sched) {
2448 list_del(&tid->list);
2449 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002450 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002451
2452 if (ac->sched) {
2453 list_del(&ac->list);
2454 tid->ac->sched = false;
2455 }
2456
2457 ath_tid_drain(sc, txq, tid);
2458 tid->state &= ~AGGR_ADDBA_COMPLETE;
2459 tid->state &= ~AGGR_CLEANUP;
2460
2461 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002462 }
2463}