blob: 23eaa1b26ebe5ca9a1a242ea4de5e9fa6508b02c [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +010056 struct ath_tx_status *ts, int txok);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100107static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800108 __acquires(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100109{
110 spin_lock_bh(&txq->axq_lock);
111}
112
113static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800114 __releases(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100115{
116 spin_unlock_bh(&txq->axq_lock);
117}
118
119static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800120 __releases(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100121{
122 struct sk_buff_head q;
123 struct sk_buff *skb;
124
125 __skb_queue_head_init(&q);
126 skb_queue_splice_init(&txq->complete_q, &q);
127 spin_unlock_bh(&txq->axq_lock);
128
129 while ((skb = __skb_dequeue(&q)))
130 ieee80211_tx_status(sc->hw, skb);
131}
132
Sujithe8324352009-01-16 21:38:42 +0530133static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
134{
135 struct ath_atx_ac *ac = tid->ac;
136
137 if (tid->paused)
138 return;
139
140 if (tid->sched)
141 return;
142
143 tid->sched = true;
144 list_add_tail(&tid->list, &ac->tid_q);
145
146 if (ac->sched)
147 return;
148
149 ac->sched = true;
150 list_add_tail(&ac->list, &txq->axq_acq);
151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530156
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200157 WARN_ON(!tid->paused);
158
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100159 ath_txq_lock(sc, txq);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200160 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530161
Felix Fietkau56dc6332011-08-28 00:32:22 +0200162 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530163 goto unlock;
164
165 ath_tx_queue_tid(txq, tid);
166 ath_txq_schedule(sc, txq);
167unlock:
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100168 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +0530169}
170
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100171static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100172{
173 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100174 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
175 sizeof(tx_info->rate_driver_data));
176 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100177}
178
Felix Fietkau156369f2011-12-14 22:08:04 +0100179static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
180{
181 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
182 seqno << IEEE80211_SEQ_SEQ_SHIFT);
183}
184
Sujithe8324352009-01-16 21:38:42 +0530185static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
186{
Felix Fietkau066dae92010-11-07 14:59:39 +0100187 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200188 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530189 struct ath_buf *bf;
190 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200191 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100192 struct ath_frame_info *fi;
Felix Fietkau156369f2011-12-14 22:08:04 +0100193 bool sendbar = false;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200194
Sujithe8324352009-01-16 21:38:42 +0530195 INIT_LIST_HEAD(&bf_head);
196
Felix Fietkau90fa5392010-09-20 13:45:38 +0200197 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530198
Felix Fietkau56dc6332011-08-28 00:32:22 +0200199 while ((skb = __skb_dequeue(&tid->buf_q))) {
200 fi = get_frame_info(skb);
201 bf = fi->bf;
202
Felix Fietkau44f1d262011-08-28 00:32:25 +0200203 if (bf && fi->retries) {
204 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200205 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100206 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
207 sendbar = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200208 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200209 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200210 }
Sujithe8324352009-01-16 21:38:42 +0530211 }
212
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500213 if (tid->baw_head == tid->baw_tail) {
214 tid->state &= ~AGGR_ADDBA_COMPLETE;
215 tid->state &= ~AGGR_CLEANUP;
216 }
217
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100218 if (sendbar) {
219 ath_txq_unlock(sc, txq);
Felix Fietkau156369f2011-12-14 22:08:04 +0100220 ath_send_bar(tid, tid->seq_start);
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100221 ath_txq_lock(sc, txq);
222 }
Sujithe8324352009-01-16 21:38:42 +0530223}
224
225static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
226 int seqno)
227{
228 int index, cindex;
229
230 index = ATH_BA_INDEX(tid->seq_start, seqno);
231 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
232
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200233 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530234
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200235 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530236 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
237 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
Felix Fietkauf9437542011-12-14 22:08:08 +0100238 if (tid->bar_index >= 0)
239 tid->bar_index--;
Sujithe8324352009-01-16 21:38:42 +0530240 }
241}
242
243static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100244 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530245{
246 int index, cindex;
247
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100248 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530249 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200250 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530251
252 if (index >= ((tid->baw_tail - tid->baw_head) &
253 (ATH_TID_MAX_BUFS - 1))) {
254 tid->baw_tail = cindex;
255 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
256 }
257}
258
259/*
260 * TODO: For frame(s) that are in the retry state, we will reuse the
261 * sequence number(s) without setting the retry bit. The
262 * alternative is to give up on these and BAR the receiver's window
263 * forward.
264 */
265static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
266 struct ath_atx_tid *tid)
267
268{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200269 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530270 struct ath_buf *bf;
271 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700272 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100273 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700274
275 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530276 INIT_LIST_HEAD(&bf_head);
277
Felix Fietkau56dc6332011-08-28 00:32:22 +0200278 while ((skb = __skb_dequeue(&tid->buf_q))) {
279 fi = get_frame_info(skb);
280 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530281
Felix Fietkau44f1d262011-08-28 00:32:25 +0200282 if (!bf) {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200283 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200284 continue;
285 }
286
Felix Fietkau56dc6332011-08-28 00:32:22 +0200287 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530288
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100289 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200290 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530291
Felix Fietkau156369f2011-12-14 22:08:04 +0100292 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +0530293 }
294
295 tid->seq_next = tid->seq_start;
296 tid->baw_tail = tid->baw_head;
Felix Fietkauf9437542011-12-14 22:08:08 +0100297 tid->bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530298}
299
Sujithfec247c2009-07-27 12:08:16 +0530300static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100301 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530302{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100303 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200304 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530305 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100306 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530307
Sujithfec247c2009-07-27 12:08:16 +0530308 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100309 fi->retries += count;
310
311 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100312 return;
Sujithe8324352009-01-16 21:38:42 +0530313
Sujithe8324352009-01-16 21:38:42 +0530314 hdr = (struct ieee80211_hdr *)skb->data;
315 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200316 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
317 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530318}
319
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200320static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
321{
322 struct ath_buf *bf = NULL;
323
324 spin_lock_bh(&sc->tx.txbuflock);
325
326 if (unlikely(list_empty(&sc->tx.txbuf))) {
327 spin_unlock_bh(&sc->tx.txbuflock);
328 return NULL;
329 }
330
331 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
332 list_del(&bf->list);
333
334 spin_unlock_bh(&sc->tx.txbuflock);
335
336 return bf;
337}
338
339static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
340{
341 spin_lock_bh(&sc->tx.txbuflock);
342 list_add_tail(&bf->list, &sc->tx.txbuf);
343 spin_unlock_bh(&sc->tx.txbuflock);
344}
345
Sujithd43f30152009-01-16 21:38:53 +0530346static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
347{
348 struct ath_buf *tbf;
349
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200350 tbf = ath_tx_get_buffer(sc);
351 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530352 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530353
354 ATH_TXBUF_RESET(tbf);
355
356 tbf->bf_mpdu = bf->bf_mpdu;
357 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400358 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530359 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530360
361 return tbf;
362}
363
Felix Fietkaub572d032010-11-14 15:20:07 +0100364static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
365 struct ath_tx_status *ts, int txok,
366 int *nframes, int *nbad)
367{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100368 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100369 u16 seq_st = 0;
370 u32 ba[WME_BA_BMP_SIZE >> 5];
371 int ba_index;
372 int isaggr = 0;
373
374 *nbad = 0;
375 *nframes = 0;
376
Felix Fietkaub572d032010-11-14 15:20:07 +0100377 isaggr = bf_isaggr(bf);
378 if (isaggr) {
379 seq_st = ts->ts_seqnum;
380 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
381 }
382
383 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100384 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200385 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100386
387 (*nframes)++;
388 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
389 (*nbad)++;
390
391 bf = bf->bf_next;
392 }
393}
394
395
Sujithd43f30152009-01-16 21:38:53 +0530396static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
397 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100398 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530399{
400 struct ath_node *an = NULL;
401 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530402 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100403 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530404 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800405 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530406 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530407 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200408 struct list_head bf_head;
409 struct sk_buff_head bf_pending;
Felix Fietkau156369f2011-12-14 22:08:04 +0100410 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
Sujithe8324352009-01-16 21:38:42 +0530411 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530412 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
413 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200414 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100415 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200416 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100417 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200418 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100419 int i, retries;
Felix Fietkau156369f2011-12-14 22:08:04 +0100420 int bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530421
Sujitha22be222009-03-30 15:28:36 +0530422 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530423 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530424
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800425 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800426
Felix Fietkau78c46532010-06-25 01:26:16 +0200427 memcpy(rates, tx_info->control.rates, sizeof(rates));
428
Felix Fietkauda647622011-12-14 22:08:03 +0100429 retries = ts->ts_longretry + 1;
430 for (i = 0; i < ts->ts_rateindex; i++)
431 retries += rates[i].count;
432
Sujith1286ec62009-01-27 13:30:37 +0530433 rcu_read_lock();
434
Ben Greear686b9cb2010-09-23 09:44:36 -0700435 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530436 if (!sta) {
437 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200438
Felix Fietkau31e79a52010-07-12 23:16:34 +0200439 INIT_LIST_HEAD(&bf_head);
440 while (bf) {
441 bf_next = bf->bf_next;
442
Felix Fietkaufce041b2011-05-19 12:20:25 +0200443 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200444 list_move_tail(&bf->list, &bf_head);
445
Felix Fietkau156369f2011-12-14 22:08:04 +0100446 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200447
448 bf = bf_next;
449 }
Sujith1286ec62009-01-27 13:30:37 +0530450 return;
Sujithe8324352009-01-16 21:38:42 +0530451 }
452
Sujith1286ec62009-01-27 13:30:37 +0530453 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100454 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
455 tid = ATH_AN_2_TID(an, tidno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100456 seq_first = tid->seq_start;
Sujith1286ec62009-01-27 13:30:37 +0530457
Felix Fietkaub11b1602010-07-11 12:48:44 +0200458 /*
459 * The hardware occasionally sends a tx status for the wrong TID.
460 * In this case, the BA status cannot be considered valid and all
461 * subframes need to be retransmitted
462 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100463 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200464 txok = false;
465
Sujithe8324352009-01-16 21:38:42 +0530466 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530467 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530468
Sujithd43f30152009-01-16 21:38:53 +0530469 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700470 if (ts->ts_flags & ATH9K_TX_BA) {
471 seq_st = ts->ts_seqnum;
472 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530473 } else {
Sujithd43f30152009-01-16 21:38:53 +0530474 /*
475 * AR5416 can become deaf/mute when BA
476 * issue happens. Chip needs to be reset.
477 * But AP code may have sychronization issues
478 * when perform internal reset in this routine.
479 * Only enable reset in STA mode for now.
480 */
Sujith2660b812009-02-09 13:27:26 +0530481 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530482 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530483 }
484 }
485
Felix Fietkau56dc6332011-08-28 00:32:22 +0200486 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530487
Felix Fietkaub572d032010-11-14 15:20:07 +0100488 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530489 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200490 u16 seqno = bf->bf_state.seqno;
491
Felix Fietkauf0b82202011-01-15 14:30:15 +0100492 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530493 bf_next = bf->bf_next;
494
Felix Fietkau78c46532010-06-25 01:26:16 +0200495 skb = bf->bf_mpdu;
496 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100497 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200498
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200499 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530500 /* transmit completion, subframe is
501 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530502 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530503 } else if (!isaggr && txok) {
504 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530505 acked_cnt++;
Felix Fietkaub0477012011-12-14 22:08:05 +0100506 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
507 /*
508 * cleanup in progress, just fail
509 * the un-acked sub-frames
510 */
511 txfail = 1;
512 } else if (flush) {
513 txpending = 1;
514 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
515 if (txok || !an->sleeping)
516 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
517 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200518
Felix Fietkaub0477012011-12-14 22:08:05 +0100519 txpending = 1;
520 } else {
521 txfail = 1;
522 txfail_cnt++;
523 bar_index = max_t(int, bar_index,
524 ATH_BA_INDEX(seq_first, seqno));
Sujithe8324352009-01-16 21:38:42 +0530525 }
526
Felix Fietkaufce041b2011-05-19 12:20:25 +0200527 /*
528 * Make sure the last desc is reclaimed if it
529 * not a holding desc.
530 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200531 INIT_LIST_HEAD(&bf_head);
532 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
533 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530534 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530535
Felix Fietkau90fa5392010-09-20 13:45:38 +0200536 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530537 /*
538 * complete the acked-ones/xretried ones; update
539 * block-ack window
540 */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200541 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530542
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530543 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200544 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200545 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530546 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530547 }
548
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700549 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
Felix Fietkau156369f2011-12-14 22:08:04 +0100550 !txfail);
Sujithe8324352009-01-16 21:38:42 +0530551 } else {
Sujithd43f30152009-01-16 21:38:53 +0530552 /* retry the un-acked ones */
Felix Fietkaub0477012011-12-14 22:08:05 +0100553 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
554 bf->bf_next == NULL && bf_last->bf_stale) {
555 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530556
Felix Fietkaub0477012011-12-14 22:08:05 +0100557 tbf = ath_clone_txbuf(sc, bf_last);
558 /*
559 * Update tx baw and complete the
560 * frame with failed status if we
561 * run out of tx buf.
562 */
563 if (!tbf) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100564 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400565
Felix Fietkaub0477012011-12-14 22:08:05 +0100566 ath_tx_complete_buf(sc, bf, txq,
567 &bf_head, ts, 0);
568 bar_index = max_t(int, bar_index,
569 ATH_BA_INDEX(seq_first, seqno));
570 break;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400571 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100572
573 fi->bf = tbf;
Sujithe8324352009-01-16 21:38:42 +0530574 }
575
576 /*
577 * Put this buffer to the temporary pending
578 * queue to retain ordering
579 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200580 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530581 }
582
583 bf = bf_next;
584 }
585
Felix Fietkau4cee7862010-07-23 03:53:16 +0200586 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200587 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200588 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200589 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200590
Felix Fietkau56dc6332011-08-28 00:32:22 +0200591 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200592 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600593 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200594
595 if (ts->ts_status & ATH9K_TXERR_FILT)
596 tid->ac->clear_ps_filter = true;
597 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200598 }
599
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100600 if (bar_index >= 0) {
601 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
602
603 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
604 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
605
606 ath_txq_unlock(sc, txq);
607 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
608 ath_txq_lock(sc, txq);
609 }
610
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500611 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200612 ath_tx_flush_tid(sc, tid);
613
Sujith1286ec62009-01-27 13:30:37 +0530614 rcu_read_unlock();
615
Felix Fietkau030d6292011-10-07 02:28:13 +0200616 if (needreset) {
617 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200618 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200619 }
Sujithe8324352009-01-16 21:38:42 +0530620}
621
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530622static bool ath_lookup_legacy(struct ath_buf *bf)
623{
624 struct sk_buff *skb;
625 struct ieee80211_tx_info *tx_info;
626 struct ieee80211_tx_rate *rates;
627 int i;
628
629 skb = bf->bf_mpdu;
630 tx_info = IEEE80211_SKB_CB(skb);
631 rates = tx_info->control.rates;
632
Felix Fietkau059ee092011-08-27 10:25:27 +0200633 for (i = 0; i < 4; i++) {
634 if (!rates[i].count || rates[i].idx < 0)
635 break;
636
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530637 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
638 return true;
639 }
640
641 return false;
642}
643
Sujithe8324352009-01-16 21:38:42 +0530644static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
645 struct ath_atx_tid *tid)
646{
Sujithe8324352009-01-16 21:38:42 +0530647 struct sk_buff *skb;
648 struct ieee80211_tx_info *tx_info;
649 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530650 u32 max_4ms_framelen, frmlen;
Sujith Manoharanc0ac53f2012-02-22 12:40:38 +0530651 u16 aggr_limit, bt_aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530652 int i;
653
Sujitha22be222009-03-30 15:28:36 +0530654 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530655 tx_info = IEEE80211_SKB_CB(skb);
656 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530657
658 /*
659 * Find the lowest frame length among the rate series that will have a
660 * 4ms transmit duration.
661 * TODO - TXOP limit needs to be considered.
662 */
663 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
664
665 for (i = 0; i < 4; i++) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100666 int modeidx;
Sujithe8324352009-01-16 21:38:42 +0530667
Felix Fietkaub0477012011-12-14 22:08:05 +0100668 if (!rates[i].count)
669 continue;
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200670
Felix Fietkaub0477012011-12-14 22:08:05 +0100671 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
672 legacy = 1;
673 break;
Sujithe8324352009-01-16 21:38:42 +0530674 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100675
676 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
677 modeidx = MCS_HT40;
678 else
679 modeidx = MCS_HT20;
680
681 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
682 modeidx++;
683
684 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
685 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530686 }
687
688 /*
689 * limit aggregate size by the minimum rate if rate selected is
690 * not a probe rate, if rate selected is a probe rate then
691 * avoid aggregation of this packet.
692 */
693 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
694 return 0;
695
Sujith Manoharanc0ac53f2012-02-22 12:40:38 +0530696 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
697
698 /*
699 * Override the default aggregation limit for BTCOEX.
700 */
701 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
702 if (bt_aggr_limit)
703 aggr_limit = bt_aggr_limit;
Sujithe8324352009-01-16 21:38:42 +0530704
705 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300706 * h/w can accept aggregates up to 16 bit lengths (65535).
707 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530708 * as zero. Ignore 65536 since we are constrained by hw.
709 */
Sujith4ef70842009-07-23 15:32:41 +0530710 if (tid->an->maxampdu)
711 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530712
713 return aggr_limit;
714}
715
716/*
Sujithd43f30152009-01-16 21:38:53 +0530717 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530718 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530719 */
720static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530721 struct ath_buf *bf, u16 frmlen,
722 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530723{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530724#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530725 struct sk_buff *skb = bf->bf_mpdu;
726 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530727 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530728 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100729 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200730 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100731 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530732
733 /* Select standard number of delimiters based on frame length alone */
734 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
735
736 /*
737 * If encryption enabled, hardware requires some more padding between
738 * subframes.
739 * TODO - this could be improved to be dependent on the rate.
740 * The hardware can keep up at lower rates, but not higher rates
741 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530742 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
743 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530744 ndelim += ATH_AGGR_ENCRYPTDELIM;
745
746 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530747 * Add delimiter when using RTS/CTS with aggregation
748 * and non enterprise AR9003 card
749 */
Felix Fietkau34597312011-08-29 18:57:54 +0200750 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
751 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530752 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
753
754 /*
Sujithe8324352009-01-16 21:38:42 +0530755 * Convert desired mpdu density from microeconds to bytes based
756 * on highest rate in rate series (i.e. first rate) to determine
757 * required minimum length for subframe. Take into account
758 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530759 *
Sujithe8324352009-01-16 21:38:42 +0530760 * If there is no mpdu density restriction, no further calculation
761 * is needed.
762 */
Sujith4ef70842009-07-23 15:32:41 +0530763
764 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530765 return ndelim;
766
767 rix = tx_info->control.rates[0].idx;
768 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530769 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
770 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
771
772 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530773 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530774 else
Sujith4ef70842009-07-23 15:32:41 +0530775 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530776
777 if (nsymbols == 0)
778 nsymbols = 1;
779
Felix Fietkauc6663872010-04-19 19:57:33 +0200780 streams = HT_RC_2_STREAMS(rix);
781 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530782 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
783
Sujithe8324352009-01-16 21:38:42 +0530784 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530785 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
786 ndelim = max(mindelim, ndelim);
787 }
788
789 return ndelim;
790}
791
792static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530793 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530794 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100795 struct list_head *bf_q,
796 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530797{
798#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200799 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530800 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530801 u16 aggr_limit = 0, al = 0, bpad = 0,
802 al_delta, h_baw = tid->baw_size / 2;
803 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200804 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100805 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200806 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200807 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530808
809 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200810 skb = skb_peek(&tid->buf_q);
811 fi = get_frame_info(skb);
812 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200813 if (!fi->bf)
814 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200815
Felix Fietkau44f1d262011-08-28 00:32:25 +0200816 if (!bf)
817 continue;
818
Felix Fietkau399c6482011-09-14 21:24:17 +0200819 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200820 seqno = bf->bf_state.seqno;
Sujithe8324352009-01-16 21:38:42 +0530821
Sujithd43f30152009-01-16 21:38:53 +0530822 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200823 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530824 status = ATH_AGGR_BAW_CLOSED;
825 break;
826 }
827
Felix Fietkauf9437542011-12-14 22:08:08 +0100828 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
829 struct ath_tx_status ts = {};
830 struct list_head bf_head;
831
832 INIT_LIST_HEAD(&bf_head);
833 list_add(&bf->list, &bf_head);
834 __skb_unlink(skb, &tid->buf_q);
835 ath_tx_update_baw(sc, tid, seqno);
836 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
837 continue;
838 }
839
840 if (!bf_first)
841 bf_first = bf;
842
Sujithe8324352009-01-16 21:38:42 +0530843 if (!rl) {
844 aggr_limit = ath_lookup_rate(sc, bf, tid);
845 rl = 1;
846 }
847
Sujithd43f30152009-01-16 21:38:53 +0530848 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100849 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530850
Sujithd43f30152009-01-16 21:38:53 +0530851 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530852 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
853 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530854 status = ATH_AGGR_LIMITED;
855 break;
856 }
857
Felix Fietkau0299a502010-10-21 02:47:24 +0200858 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200859 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200860 break;
861
Sujithd43f30152009-01-16 21:38:53 +0530862 /* do not exceed subframe limit */
863 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530864 status = ATH_AGGR_LIMITED;
865 break;
866 }
867
Sujithd43f30152009-01-16 21:38:53 +0530868 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530869 al += bpad + al_delta;
870
871 /*
872 * Get the delimiters needed to meet the MPDU
873 * density for this node.
874 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530875 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
876 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530877 bpad = PADBYTES(al_delta) + (ndelim << 2);
878
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530879 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530880 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530881
Sujithd43f30152009-01-16 21:38:53 +0530882 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100883 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200884 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200885 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200886
887 __skb_unlink(skb, &tid->buf_q);
888 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200889 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530890 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200891
Sujithe8324352009-01-16 21:38:42 +0530892 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530893
Felix Fietkau56dc6332011-08-28 00:32:22 +0200894 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau269c44b2010-11-14 15:20:06 +0100896 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530897
Sujithe8324352009-01-16 21:38:42 +0530898 return status;
899#undef PADBYTES
900}
901
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200902/*
903 * rix - rate index
904 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
905 * width - 0 for 20 MHz, 1 for 40 MHz
906 * half_gi - to use 4us v/s 3.6 us for symbol time
907 */
908static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
909 int width, int half_gi, bool shortPreamble)
910{
911 u32 nbits, nsymbits, duration, nsymbols;
912 int streams;
913
914 /* find number of symbols: PLCP + data */
915 streams = HT_RC_2_STREAMS(rix);
916 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
917 nsymbits = bits_per_symbol[rix % 8][width] * streams;
918 nsymbols = (nbits + nsymbits - 1) / nsymbits;
919
920 if (!half_gi)
921 duration = SYMBOL_TIME(nsymbols);
922 else
923 duration = SYMBOL_TIME_HALFGI(nsymbols);
924
925 /* addup duration for legacy/ht training and signal fields */
926 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
927
928 return duration;
929}
930
Felix Fietkau493cf042011-09-14 21:24:22 +0200931static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
932 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200933{
934 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200935 struct sk_buff *skb;
936 struct ieee80211_tx_info *tx_info;
937 struct ieee80211_tx_rate *rates;
938 const struct ieee80211_rate *rate;
939 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200940 int i;
941 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200942
943 skb = bf->bf_mpdu;
944 tx_info = IEEE80211_SKB_CB(skb);
945 rates = tx_info->control.rates;
946 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200947
948 /* set dur_update_en for l-sig computation except for PS-Poll frames */
949 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200950
951 /*
952 * We check if Short Preamble is needed for the CTS rate by
953 * checking the BSS's global flag.
954 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
955 */
956 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200957 info->rtscts_rate = rate->hw_value;
Sujith Manoharand47a61a2012-03-14 14:41:05 +0530958
959 if (tx_info->control.vif &&
960 tx_info->control.vif->bss_conf.use_short_preamble)
Felix Fietkau493cf042011-09-14 21:24:22 +0200961 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200962
963 for (i = 0; i < 4; i++) {
964 bool is_40, is_sgi, is_sp;
965 int phy;
966
967 if (!rates[i].count || (rates[i].idx < 0))
968 continue;
969
970 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200971 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200972
973 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200974 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
975 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200976 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200977 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
978 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200979 }
980
981 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200982 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200983 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200984 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200985
986 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
987 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
988 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
989
990 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
991 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200992 info->rates[i].Rate = rix | 0x80;
993 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
994 ah->txchainmask, info->rates[i].Rate);
995 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200996 is_40, is_sgi, is_sp);
997 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200998 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200999 continue;
1000 }
1001
1002 /* legacy rates */
1003 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1004 !(rate->flags & IEEE80211_RATE_ERP_G))
1005 phy = WLAN_RC_PHY_CCK;
1006 else
1007 phy = WLAN_RC_PHY_OFDM;
1008
1009 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +02001010 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001011 if (rate->hw_value_short) {
1012 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +02001013 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001014 } else {
1015 is_sp = false;
1016 }
1017
1018 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +02001019 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001020 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001021 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1022 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001023
Felix Fietkau493cf042011-09-14 21:24:22 +02001024 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001025 phy, rate->bitrate * 100, len, rix, is_sp);
1026 }
1027
1028 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1029 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +02001030 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001031
1032 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +02001033 if (info->flags & ATH9K_TXDESC_RTSENA)
1034 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001035}
1036
Felix Fietkau493cf042011-09-14 21:24:22 +02001037static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1038{
1039 struct ieee80211_hdr *hdr;
1040 enum ath9k_pkt_type htype;
1041 __le16 fc;
1042
1043 hdr = (struct ieee80211_hdr *)skb->data;
1044 fc = hdr->frame_control;
1045
1046 if (ieee80211_is_beacon(fc))
1047 htype = ATH9K_PKT_TYPE_BEACON;
1048 else if (ieee80211_is_probe_resp(fc))
1049 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1050 else if (ieee80211_is_atim(fc))
1051 htype = ATH9K_PKT_TYPE_ATIM;
1052 else if (ieee80211_is_pspoll(fc))
1053 htype = ATH9K_PKT_TYPE_PSPOLL;
1054 else
1055 htype = ATH9K_PKT_TYPE_NORMAL;
1056
1057 return htype;
1058}
1059
1060static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1061 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001062{
1063 struct ath_hw *ah = sc->sc_ah;
1064 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1065 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001066 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001067 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001068
Felix Fietkau493cf042011-09-14 21:24:22 +02001069 memset(&info, 0, sizeof(info));
1070 info.is_first = true;
1071 info.is_last = true;
1072 info.txpower = MAX_RATE_POWER;
1073 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001074
Felix Fietkau493cf042011-09-14 21:24:22 +02001075 info.flags = ATH9K_TXDESC_INTREQ;
1076 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1077 info.flags |= ATH9K_TXDESC_NOACK;
1078 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1079 info.flags |= ATH9K_TXDESC_LDPC;
1080
1081 ath_buf_set_rate(sc, bf, &info, len);
1082
1083 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1084 info.flags |= ATH9K_TXDESC_CLRDMASK;
1085
1086 if (bf->bf_state.bfs_paprd)
1087 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1088
Felix Fietkau399c6482011-09-14 21:24:17 +02001089
1090 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001091 struct sk_buff *skb = bf->bf_mpdu;
1092 struct ath_frame_info *fi = get_frame_info(skb);
1093
1094 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001095 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001096 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001097 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001098 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001099
John W. Linville42cecc32011-09-19 15:42:31 -04001100 info.buf_addr[0] = bf->bf_buf_addr;
1101 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001102 info.pkt_len = fi->framelen;
1103 info.keyix = fi->keyix;
1104 info.keytype = fi->keytype;
1105
1106 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001107 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001108 info.aggr = AGGR_BUF_FIRST;
1109 else if (!bf->bf_next)
1110 info.aggr = AGGR_BUF_LAST;
1111 else
1112 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001113
Felix Fietkau493cf042011-09-14 21:24:22 +02001114 info.ndelim = bf->bf_state.ndelim;
1115 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001116 }
1117
Felix Fietkau493cf042011-09-14 21:24:22 +02001118 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001119 bf = bf->bf_next;
1120 }
1121}
1122
Sujithe8324352009-01-16 21:38:42 +05301123static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1124 struct ath_atx_tid *tid)
1125{
Sujithd43f30152009-01-16 21:38:53 +05301126 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301127 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001128 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301129 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001130 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301131
1132 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001133 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301134 return;
1135
1136 INIT_LIST_HEAD(&bf_q);
1137
Felix Fietkau269c44b2010-11-14 15:20:06 +01001138 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301139
1140 /*
Sujithd43f30152009-01-16 21:38:53 +05301141 * no frames picked up to be aggregated;
1142 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301143 */
1144 if (list_empty(&bf_q))
1145 break;
1146
1147 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301148 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001149 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301150
Felix Fietkau55195412011-04-17 23:28:09 +02001151 if (tid->ac->clear_ps_filter) {
1152 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001153 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1154 } else {
1155 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001156 }
1157
Sujithd43f30152009-01-16 21:38:53 +05301158 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001159 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001160 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1161 bf->bf_state.bf_type = BUF_AMPDU;
1162 } else {
1163 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301164 }
1165
Felix Fietkau493cf042011-09-14 21:24:22 +02001166 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001167 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001168 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301169 status != ATH_AGGR_BAW_CLOSED);
1170}
1171
Felix Fietkau231c3a12010-09-20 19:35:28 +02001172int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1173 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301174{
1175 struct ath_atx_tid *txtid;
1176 struct ath_node *an;
1177
1178 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301179 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001180
1181 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1182 return -EAGAIN;
1183
Sujithf83da962009-07-23 15:32:37 +05301184 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001185 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001186 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkauf9437542011-12-14 22:08:08 +01001187 txtid->bar_index = -1;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001188
Felix Fietkau2ed72222011-01-10 17:05:49 -07001189 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1190 txtid->baw_head = txtid->baw_tail = 0;
1191
Felix Fietkau231c3a12010-09-20 19:35:28 +02001192 return 0;
Sujithe8324352009-01-16 21:38:42 +05301193}
1194
Sujithf83da962009-07-23 15:32:37 +05301195void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301196{
1197 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1198 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001199 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301200
1201 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301202 return;
Sujithe8324352009-01-16 21:38:42 +05301203
1204 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301205 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301206 return;
Sujithe8324352009-01-16 21:38:42 +05301207 }
1208
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001209 ath_txq_lock(sc, txq);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001210 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001211
1212 /*
1213 * If frames are still being transmitted for this TID, they will be
1214 * cleaned up during tx completion. To prevent race conditions, this
1215 * TID can only be reused after all in-progress subframes have been
1216 * completed.
1217 */
1218 if (txtid->baw_head != txtid->baw_tail)
1219 txtid->state |= AGGR_CLEANUP;
1220 else
1221 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +05301222
Felix Fietkau90fa5392010-09-20 13:45:38 +02001223 ath_tx_flush_tid(sc, txtid);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001224 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +05301225}
1226
Johannes Berg042ec452011-09-29 16:04:26 +02001227void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1228 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001229{
1230 struct ath_atx_tid *tid;
1231 struct ath_atx_ac *ac;
1232 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001233 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001234 int tidno;
1235
1236 for (tidno = 0, tid = &an->tid[tidno];
1237 tidno < WME_NUM_TID; tidno++, tid++) {
1238
1239 if (!tid->sched)
1240 continue;
1241
1242 ac = tid->ac;
1243 txq = ac->txq;
1244
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001245 ath_txq_lock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001246
Johannes Berg042ec452011-09-29 16:04:26 +02001247 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001248
1249 tid->sched = false;
1250 list_del(&tid->list);
1251
1252 if (ac->sched) {
1253 ac->sched = false;
1254 list_del(&ac->list);
1255 }
1256
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001257 ath_txq_unlock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001258
Johannes Berg042ec452011-09-29 16:04:26 +02001259 ieee80211_sta_set_buffered(sta, tidno, buffered);
1260 }
Felix Fietkau55195412011-04-17 23:28:09 +02001261}
1262
1263void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1264{
1265 struct ath_atx_tid *tid;
1266 struct ath_atx_ac *ac;
1267 struct ath_txq *txq;
1268 int tidno;
1269
1270 for (tidno = 0, tid = &an->tid[tidno];
1271 tidno < WME_NUM_TID; tidno++, tid++) {
1272
1273 ac = tid->ac;
1274 txq = ac->txq;
1275
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001276 ath_txq_lock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001277 ac->clear_ps_filter = true;
1278
Felix Fietkau56dc6332011-08-28 00:32:22 +02001279 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001280 ath_tx_queue_tid(txq, tid);
1281 ath_txq_schedule(sc, txq);
1282 }
1283
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001284 ath_txq_unlock_complete(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001285 }
1286}
1287
Sujithe8324352009-01-16 21:38:42 +05301288void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1289{
1290 struct ath_atx_tid *txtid;
1291 struct ath_node *an;
1292
1293 an = (struct ath_node *)sta->drv_priv;
1294
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05301295 txtid = ATH_AN_2_TID(an, tid);
1296 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1297 txtid->state |= AGGR_ADDBA_COMPLETE;
1298 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1299 ath_tx_resume_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301300}
1301
Sujithe8324352009-01-16 21:38:42 +05301302/********************/
1303/* Queue Management */
1304/********************/
1305
Sujithe8324352009-01-16 21:38:42 +05301306static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1307 struct ath_txq *txq)
1308{
1309 struct ath_atx_ac *ac, *ac_tmp;
1310 struct ath_atx_tid *tid, *tid_tmp;
1311
1312 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1313 list_del(&ac->list);
1314 ac->sched = false;
1315 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1316 list_del(&tid->list);
1317 tid->sched = false;
1318 ath_tid_drain(sc, txq, tid);
1319 }
1320 }
1321}
1322
1323struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1324{
Sujithcbe61d82009-02-09 13:27:12 +05301325 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301326 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001327 static const int subtype_txq_to_hwq[] = {
1328 [WME_AC_BE] = ATH_TXQ_AC_BE,
1329 [WME_AC_BK] = ATH_TXQ_AC_BK,
1330 [WME_AC_VI] = ATH_TXQ_AC_VI,
1331 [WME_AC_VO] = ATH_TXQ_AC_VO,
1332 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001333 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301334
1335 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001336 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301337 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1338 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1339 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1340 qi.tqi_physCompBuf = 0;
1341
1342 /*
1343 * Enable interrupts only for EOL and DESC conditions.
1344 * We mark tx descriptors to receive a DESC interrupt
1345 * when a tx queue gets deep; otherwise waiting for the
1346 * EOL to reap descriptors. Note that this is done to
1347 * reduce interrupt load and this only defers reaping
1348 * descriptors, never transmitting frames. Aside from
1349 * reducing interrupts this also permits more concurrency.
1350 * The only potential downside is if the tx queue backs
1351 * up in which case the top half of the kernel may backup
1352 * due to a lack of tx descriptors.
1353 *
1354 * The UAPSD queue is an exception, since we take a desc-
1355 * based intr on the EOSP frames.
1356 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001357 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
Felix Fietkauce8fdf62012-03-14 16:40:22 +01001358 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001359 } else {
1360 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1361 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1362 else
1363 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1364 TXQ_FLAG_TXDESCINT_ENABLE;
1365 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001366 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1367 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301368 /*
1369 * NB: don't print a message, this happens
1370 * normally on parts with too few tx queues
1371 */
1372 return NULL;
1373 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001374 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1375 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301376
Ben Greear60f2d1d2011-01-09 23:11:52 -08001377 txq->axq_qnum = axq_qnum;
1378 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301379 txq->axq_link = NULL;
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001380 __skb_queue_head_init(&txq->complete_q);
Sujithe8324352009-01-16 21:38:42 +05301381 INIT_LIST_HEAD(&txq->axq_q);
1382 INIT_LIST_HEAD(&txq->axq_acq);
1383 spin_lock_init(&txq->axq_lock);
1384 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001385 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001386 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001387 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001388
1389 txq->txq_headidx = txq->txq_tailidx = 0;
1390 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1391 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301392 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001393 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301394}
1395
Sujithe8324352009-01-16 21:38:42 +05301396int ath_txq_update(struct ath_softc *sc, int qnum,
1397 struct ath9k_tx_queue_info *qinfo)
1398{
Sujithcbe61d82009-02-09 13:27:12 +05301399 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301400 int error = 0;
1401 struct ath9k_tx_queue_info qi;
1402
1403 if (qnum == sc->beacon.beaconq) {
1404 /*
1405 * XXX: for beacon queue, we just save the parameter.
1406 * It will be picked up by ath_beaconq_config when
1407 * it's necessary.
1408 */
1409 sc->beacon.beacon_qi = *qinfo;
1410 return 0;
1411 }
1412
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001413 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301414
1415 ath9k_hw_get_txq_props(ah, qnum, &qi);
1416 qi.tqi_aifs = qinfo->tqi_aifs;
1417 qi.tqi_cwmin = qinfo->tqi_cwmin;
1418 qi.tqi_cwmax = qinfo->tqi_cwmax;
1419 qi.tqi_burstTime = qinfo->tqi_burstTime;
1420 qi.tqi_readyTime = qinfo->tqi_readyTime;
1421
1422 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001423 ath_err(ath9k_hw_common(sc->sc_ah),
1424 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301425 error = -EIO;
1426 } else {
1427 ath9k_hw_resettxqueue(ah, qnum);
1428 }
1429
1430 return error;
1431}
1432
1433int ath_cabq_update(struct ath_softc *sc)
1434{
1435 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001436 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301437 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301438
1439 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1440 /*
1441 * Ensure the readytime % is within the bounds.
1442 */
Sujith17d79042009-02-09 13:27:03 +05301443 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1444 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1445 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1446 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301447
Steve Brown9814f6b2011-02-07 17:10:39 -07001448 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301449 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301450 ath_txq_update(sc, qnum, &qi);
1451
1452 return 0;
1453}
1454
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001455static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1456{
1457 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1458 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1459}
1460
Felix Fietkaufce041b2011-05-19 12:20:25 +02001461static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1462 struct list_head *list, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301463{
1464 struct ath_buf *bf, *lastbf;
1465 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001466 struct ath_tx_status ts;
1467
1468 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001469 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301470 INIT_LIST_HEAD(&bf_head);
1471
Felix Fietkaufce041b2011-05-19 12:20:25 +02001472 while (!list_empty(list)) {
1473 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301474
Felix Fietkaufce041b2011-05-19 12:20:25 +02001475 if (bf->bf_stale) {
1476 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301477
Felix Fietkaufce041b2011-05-19 12:20:25 +02001478 ath_tx_return_buffer(sc, bf);
1479 continue;
Sujithe8324352009-01-16 21:38:42 +05301480 }
1481
1482 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001483 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001484
Sujithe8324352009-01-16 21:38:42 +05301485 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001486 if (bf_is_ampdu_not_probing(bf))
1487 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301488
1489 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001490 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1491 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301492 else
Felix Fietkau156369f2011-12-14 22:08:04 +01001493 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001494 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001495}
1496
1497/*
1498 * Drain a given TX queue (could be Beacon or Data)
1499 *
1500 * This assumes output has been stopped and
1501 * we do not need to block ath_tx_tasklet.
1502 */
1503void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1504{
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001505 ath_txq_lock(sc, txq);
1506
Felix Fietkaufce041b2011-05-19 12:20:25 +02001507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1508 int idx = txq->txq_tailidx;
1509
1510 while (!list_empty(&txq->txq_fifo[idx])) {
1511 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1512 retry_tx);
1513
1514 INCR(idx, ATH_TXFIFO_DEPTH);
1515 }
1516 txq->txq_tailidx = idx;
1517 }
1518
1519 txq->axq_link = NULL;
1520 txq->axq_tx_inprogress = false;
1521 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001522
1523 /* flush any pending frames if aggregation is enabled */
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05301524 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
Felix Fietkaufce041b2011-05-19 12:20:25 +02001525 ath_txq_drain_pending_buffers(sc, txq);
1526
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001527 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +05301528}
1529
Felix Fietkau080e1a22010-12-05 20:17:53 +01001530bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301531{
Sujithcbe61d82009-02-09 13:27:12 +05301532 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001533 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301534 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001535 int i;
1536 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301537
1538 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001539 return true;
Sujith043a0402009-01-16 21:38:47 +05301540
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001541 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301542
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001543 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301544 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001545 if (!ATH_TXQ_SETUP(sc, i))
1546 continue;
1547
Felix Fietkau34d25812011-10-07 02:28:12 +02001548 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1549 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301550 }
1551
Felix Fietkau080e1a22010-12-05 20:17:53 +01001552 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001553 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301554
1555 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001556 if (!ATH_TXQ_SETUP(sc, i))
1557 continue;
1558
1559 /*
1560 * The caller will resume queues with ieee80211_wake_queues.
1561 * Mark the queue as not stopped to prevent ath_tx_complete
1562 * from waking the queue too early.
1563 */
1564 txq = &sc->tx.txq[i];
1565 txq->stopped = false;
1566 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301567 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001568
1569 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301570}
1571
Sujithe8324352009-01-16 21:38:42 +05301572void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1573{
1574 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1575 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1576}
1577
Ben Greear7755bad2011-01-18 17:30:00 -08001578/* For each axq_acq entry, for each tid, try to schedule packets
1579 * for transmit until ampdu_depth has reached min Q depth.
1580 */
Sujithe8324352009-01-16 21:38:42 +05301581void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1582{
Ben Greear7755bad2011-01-18 17:30:00 -08001583 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1584 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301585
Felix Fietkau236de512011-09-03 01:40:25 +02001586 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001587 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301588 return;
1589
1590 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001591 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301592
Ben Greear7755bad2011-01-18 17:30:00 -08001593 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1594 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1595 list_del(&ac->list);
1596 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301597
Ben Greear7755bad2011-01-18 17:30:00 -08001598 while (!list_empty(&ac->tid_q)) {
1599 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1600 list);
1601 list_del(&tid->list);
1602 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301603
Ben Greear7755bad2011-01-18 17:30:00 -08001604 if (tid->paused)
1605 continue;
Sujithe8324352009-01-16 21:38:42 +05301606
Ben Greear7755bad2011-01-18 17:30:00 -08001607 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301608
Ben Greear7755bad2011-01-18 17:30:00 -08001609 /*
1610 * add tid to round-robin queue if more frames
1611 * are pending for the tid
1612 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001613 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001614 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301615
Ben Greear7755bad2011-01-18 17:30:00 -08001616 if (tid == last_tid ||
1617 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1618 break;
Sujithe8324352009-01-16 21:38:42 +05301619 }
Ben Greear7755bad2011-01-18 17:30:00 -08001620
Felix Fietkaub0477012011-12-14 22:08:05 +01001621 if (!list_empty(&ac->tid_q) && !ac->sched) {
1622 ac->sched = true;
1623 list_add_tail(&ac->list, &txq->axq_acq);
Ben Greear7755bad2011-01-18 17:30:00 -08001624 }
1625
1626 if (ac == last_ac ||
1627 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1628 return;
Sujithe8324352009-01-16 21:38:42 +05301629 }
1630}
1631
Sujithe8324352009-01-16 21:38:42 +05301632/***********/
1633/* TX, DMA */
1634/***********/
1635
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001636/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001637 * Insert a chain of ath_buf (descriptors) on a txq and
1638 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001639 */
Sujith102e0572008-10-29 10:15:16 +05301640static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001641 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001642{
Sujithcbe61d82009-02-09 13:27:12 +05301643 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001644 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001645 struct ath_buf *bf, *bf_last;
1646 bool puttxbuf = false;
1647 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301648
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001649 /*
1650 * Insert the frame on the outbound list and
1651 * pass it on to the hardware.
1652 */
1653
1654 if (list_empty(head))
1655 return;
1656
Felix Fietkaufce041b2011-05-19 12:20:25 +02001657 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001658 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001659 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001660
Joe Perchesd2182b62011-12-15 14:55:53 -08001661 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1662 txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001663
Felix Fietkaufce041b2011-05-19 12:20:25 +02001664 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1665 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001666 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001667 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001668 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001669 list_splice_tail_init(head, &txq->axq_q);
1670
Felix Fietkaufce041b2011-05-19 12:20:25 +02001671 if (txq->axq_link) {
1672 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perchesd2182b62011-12-15 14:55:53 -08001673 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
Joe Perches226afe62010-12-02 19:12:37 -08001674 txq->axq_qnum, txq->axq_link,
1675 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001676 } else if (!edma)
1677 puttxbuf = true;
1678
1679 txq->axq_link = bf_last->bf_desc;
1680 }
1681
1682 if (puttxbuf) {
1683 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1684 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perchesd2182b62011-12-15 14:55:53 -08001685 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
Felix Fietkaufce041b2011-05-19 12:20:25 +02001686 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1687 }
1688
1689 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001690 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001691 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001692 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001693
1694 if (!internal) {
1695 txq->axq_depth++;
1696 if (bf_is_ampdu_not_probing(bf))
1697 txq->axq_ampdu_depth++;
1698 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001699}
1700
Sujithe8324352009-01-16 21:38:42 +05301701static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001702 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301703{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001704 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001705 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001706 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301707
1708 /*
1709 * Do not queue to h/w when any of the following conditions is true:
1710 * - there are pending frames in software queue
1711 * - the TID is currently paused for ADDBA/BAR request
1712 * - seqno is not within block-ack window
1713 * - h/w queue depth exceeds low water mark
1714 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001715 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001716 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001717 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001718 /*
Sujithe8324352009-01-16 21:38:42 +05301719 * Add this frame to software queue for scheduling later
1720 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001721 */
Ben Greearbda8add2011-01-09 23:11:48 -08001722 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001723 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001724 if (!txctl->an || !txctl->an->sleeping)
1725 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301726 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001727 }
1728
Felix Fietkau44f1d262011-08-28 00:32:25 +02001729 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1730 if (!bf)
1731 return;
1732
Felix Fietkau399c6482011-09-14 21:24:17 +02001733 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001734 INIT_LIST_HEAD(&bf_head);
1735 list_add(&bf->list, &bf_head);
1736
Sujithe8324352009-01-16 21:38:42 +05301737 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001738 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301739
1740 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001741 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301742 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001743 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001744 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301745}
1746
Felix Fietkau82b873a2010-11-11 03:18:37 +01001747static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001748 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001749{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001750 struct ath_frame_info *fi = get_frame_info(skb);
1751 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301752 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001753
Felix Fietkau44f1d262011-08-28 00:32:25 +02001754 bf = fi->bf;
1755 if (!bf)
1756 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1757
1758 if (!bf)
1759 return;
1760
1761 INIT_LIST_HEAD(&bf_head);
1762 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001763 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301764
Sujithd43f30152009-01-16 21:38:53 +05301765 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001766 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001767 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301768 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001769}
1770
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001771static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1772 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301773{
1774 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001775 struct ieee80211_sta *sta = tx_info->control.sta;
1776 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001777 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001778 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001779 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001780 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301781
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001782 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301783
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001784 if (sta)
1785 an = (struct ath_node *) sta->drv_priv;
1786
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001787 memset(fi, 0, sizeof(*fi));
1788 if (hw_key)
1789 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001790 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1791 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001792 else
1793 fi->keyix = ATH9K_TXKEYIX_INVALID;
1794 fi->keytype = keytype;
1795 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301796}
1797
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301798u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1799{
1800 struct ath_hw *ah = sc->sc_ah;
1801 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301802 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1803 (curchan->channelFlags & CHANNEL_5GHZ) &&
1804 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301805 return 0x3;
1806 else
1807 return chainmask;
1808}
1809
Felix Fietkau44f1d262011-08-28 00:32:25 +02001810/*
1811 * Assign a descriptor (and sequence number if necessary,
1812 * and map buffer for DMA. Frees skb on error
1813 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001814static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001815 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001816 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001817 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301818{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001819 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001820 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001822 struct ath_buf *bf;
Sujith Manoharanfd09c852012-04-17 08:34:50 +05301823 int fragno;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001824 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001825
1826 bf = ath_tx_get_buffer(sc);
1827 if (!bf) {
Joe Perchesd2182b62011-12-15 14:55:53 -08001828 ath_dbg(common, XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001829 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001830 }
Sujithe8324352009-01-16 21:38:42 +05301831
Sujithe8324352009-01-16 21:38:42 +05301832 ATH_TXBUF_RESET(bf);
1833
Felix Fietkaufa05f872011-08-28 00:32:24 +02001834 if (tid) {
Sujith Manoharanfd09c852012-04-17 08:34:50 +05301835 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001836 seqno = tid->seq_next;
1837 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Sujith Manoharanfd09c852012-04-17 08:34:50 +05301838
1839 if (fragno)
1840 hdr->seq_ctrl |= cpu_to_le16(fragno);
1841
1842 if (!ieee80211_has_morefrags(hdr->frame_control))
1843 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1844
Felix Fietkaufa05f872011-08-28 00:32:24 +02001845 bf->bf_state.seqno = seqno;
1846 }
1847
Sujithe8324352009-01-16 21:38:42 +05301848 bf->bf_mpdu = skb;
1849
Ben Greearc1739eb32010-10-14 12:45:29 -07001850 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1851 skb->len, DMA_TO_DEVICE);
1852 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301853 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001854 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001855 ath_err(ath9k_hw_common(sc->sc_ah),
1856 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001857 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001858 goto error;
Sujithe8324352009-01-16 21:38:42 +05301859 }
1860
Felix Fietkau56dc6332011-08-28 00:32:22 +02001861 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001862
1863 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001864
1865error:
1866 dev_kfree_skb_any(skb);
1867 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001868}
1869
1870/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001871static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001872 struct ath_tx_control *txctl)
1873{
Felix Fietkau04caf862010-11-14 15:20:12 +01001874 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1875 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001876 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001877 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001878 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301879
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05301880 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301881 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001882 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1883 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001884 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001885
Felix Fietkau066dae92010-11-07 14:59:39 +01001886 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001887 }
1888
1889 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001890 /*
1891 * Try aggregation if it's a unicast data frame
1892 * and the destination is HT capable.
1893 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001894 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301895 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001896 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1897 if (!bf)
Felix Fietkau3ad29522011-12-14 22:08:07 +01001898 return;
Felix Fietkau04caf862010-11-14 15:20:12 +01001899
Felix Fietkau82b873a2010-11-11 03:18:37 +01001900 bf->bf_state.bfs_paprd = txctl->paprd;
1901
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301902 if (txctl->paprd)
1903 bf->bf_state.bfs_paprd_timestamp = jiffies;
1904
Felix Fietkau44f1d262011-08-28 00:32:25 +02001905 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301906 }
Sujithe8324352009-01-16 21:38:42 +05301907}
1908
1909/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001910int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301911 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001912{
Felix Fietkau28d16702010-11-14 15:20:10 +01001913 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1914 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001915 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001916 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001917 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001918 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001919 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001920 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001921 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001922
Ben Greeara9927ba2010-12-06 21:13:49 -08001923 /* NOTE: sta can be NULL according to net/mac80211.h */
1924 if (sta)
1925 txctl->an = (struct ath_node *)sta->drv_priv;
1926
Felix Fietkau04caf862010-11-14 15:20:12 +01001927 if (info->control.hw_key)
1928 frmlen += info->control.hw_key->icv_len;
1929
Felix Fietkau28d16702010-11-14 15:20:10 +01001930 /*
1931 * As a temporary workaround, assign seq# here; this will likely need
1932 * to be cleaned up to work better with Beacon transmission and virtual
1933 * BSSes.
1934 */
1935 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1936 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1937 sc->tx.seq_no += 0x10;
1938 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1939 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1940 }
1941
John W. Linville42cecc32011-09-19 15:42:31 -04001942 /* Add the padding after the header if this is not already done */
1943 padpos = ath9k_cmn_padpos(hdr->frame_control);
1944 padsize = padpos & 3;
1945 if (padsize && skb->len > padpos) {
1946 if (skb_headroom(skb) < padsize)
1947 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001948
John W. Linville42cecc32011-09-19 15:42:31 -04001949 skb_push(skb, padsize);
1950 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001951 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001952 }
1953
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001954 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1955 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1956 !ieee80211_is_data(hdr->frame_control))
1957 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1958
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001959 setup_frame_info(hw, skb, frmlen);
1960
1961 /*
1962 * At this point, the vif, hw_key and sta pointers in the tx control
1963 * info are no longer valid (overwritten by the ath_frame_info data.
1964 */
1965
Felix Fietkau066dae92010-11-07 14:59:39 +01001966 q = skb_get_queue_mapping(skb);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001967
1968 ath_txq_lock(sc, txq);
Felix Fietkau066dae92010-11-07 14:59:39 +01001969 if (txq == sc->tx.txq_map[q] &&
1970 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001971 ieee80211_stop_queue(sc->hw, q);
Rusty Russell3db1cd52011-12-19 13:56:45 +00001972 txq->stopped = true;
Felix Fietkau97923b12010-06-12 00:33:55 -04001973 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001974
Felix Fietkau44f1d262011-08-28 00:32:25 +02001975 ath_tx_start_dma(sc, skb, txctl);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001976
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001977 ath_txq_unlock(sc, txq);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001978
Felix Fietkau44f1d262011-08-28 00:32:25 +02001979 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001980}
1981
Sujithe8324352009-01-16 21:38:42 +05301982/*****************/
1983/* TX Completion */
1984/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001985
Sujithe8324352009-01-16 21:38:42 +05301986static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301987 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001988{
Sujithe8324352009-01-16 21:38:42 +05301989 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001990 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001991 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001992 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301993
Joe Perchesd2182b62011-12-15 14:55:53 -08001994 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301995
Felix Fietkau55797b12011-09-14 21:24:16 +02001996 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301997 /* Frame was ACKed */
1998 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301999
John W. Linville42cecc32011-09-19 15:42:31 -04002000 padpos = ath9k_cmn_padpos(hdr->frame_control);
2001 padsize = padpos & 3;
2002 if (padsize && skb->len>padpos+padsize) {
2003 /*
2004 * Remove MAC header padding before giving the frame back to
2005 * mac80211.
2006 */
2007 memmove(skb->data + padsize, skb->data, padpos);
2008 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05302009 }
2010
Felix Fietkauc8e88682011-11-16 13:08:40 +01002011 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05302012 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perchesd2182b62011-12-15 14:55:53 -08002013 ath_dbg(common, PS,
Joe Perches226afe62010-12-02 19:12:37 -08002014 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05302015 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2016 PS_WAIT_FOR_CAB |
2017 PS_WAIT_FOR_PSPOLL_DATA |
2018 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03002019 }
2020
Felix Fietkau7545daf2011-01-24 19:23:16 +01002021 q = skb_get_queue_mapping(skb);
2022 if (txq == sc->tx.txq_map[q]) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01002023 if (WARN_ON(--txq->pending_frames < 0))
2024 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01002025
Felix Fietkau7545daf2011-01-24 19:23:16 +01002026 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2027 ieee80211_wake_queue(sc->hw, q);
Rusty Russell3db1cd52011-12-19 13:56:45 +00002028 txq->stopped = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002029 }
Felix Fietkau97923b12010-06-12 00:33:55 -04002030 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002031
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002032 __skb_queue_tail(&txq->complete_q, skb);
Sujithe8324352009-01-16 21:38:42 +05302033}
2034
2035static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002036 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +01002037 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05302038{
2039 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002040 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05302041 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302042 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302043
Felix Fietkau55797b12011-09-14 21:24:16 +02002044 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302045 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302046
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002047 if (ts->ts_status & ATH9K_TXERR_FILT)
2048 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2049
Ben Greearc1739eb32010-10-14 12:45:29 -07002050 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002051 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002052
2053 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302054 if (time_after(jiffies,
2055 bf->bf_state.bfs_paprd_timestamp +
2056 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002057 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002058 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002059 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002060 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002061 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302062 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002063 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002064 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2065 * accidentally reference it later.
2066 */
2067 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302068
2069 /*
2070 * Return the list of ath_buf of this mpdu to free queue
2071 */
2072 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2073 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2074 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2075}
2076
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002077static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2078 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002079 int txok)
Sujithc4288392008-11-18 09:09:30 +05302080{
Sujitha22be222009-03-30 15:28:36 +05302081 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302082 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302083 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002084 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002085 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302086 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302087
Sujith95e4acb2009-03-13 08:56:09 +05302088 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002089 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302090
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002091 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302092 WARN_ON(tx_rateindex >= hw->max_rates);
2093
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002094 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002095 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302096
Felix Fietkaub572d032010-11-14 15:20:07 +01002097 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002098 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302099 tx_info->status.ampdu_len = nframes;
2100 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002101
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002102 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002103 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002104 /*
2105 * If an underrun error is seen assume it as an excessive
2106 * retry only if max frame trigger level has been reached
2107 * (2 KB for single stream, and 4 KB for dual stream).
2108 * Adjust the long retry as if the frame was tried
2109 * hw->max_rate_tries times to affect how rate control updates
2110 * PER for the failed rate.
2111 * In case of congestion on the bus penalizing this type of
2112 * underruns should help hardware actually transmit new frames
2113 * successfully by eventually preferring slower rates.
2114 * This itself should also alleviate congestion on the bus.
2115 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002116 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2117 ATH9K_TX_DELIM_UNDERRUN)) &&
2118 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002119 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002120 tx_info->status.rates[tx_rateindex].count =
2121 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302122 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302123
Felix Fietkau545750d2009-11-23 22:21:01 +01002124 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302125 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002126 tx_info->status.rates[i].idx = -1;
2127 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302128
Felix Fietkau78c46532010-06-25 01:26:16 +02002129 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302130}
2131
Felix Fietkaufce041b2011-05-19 12:20:25 +02002132static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2133 struct ath_tx_status *ts, struct ath_buf *bf,
2134 struct list_head *bf_head)
2135{
2136 int txok;
2137
2138 txq->axq_depth--;
2139 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2140 txq->axq_tx_inprogress = false;
2141 if (bf_is_ampdu_not_probing(bf))
2142 txq->axq_ampdu_depth--;
2143
Felix Fietkaufce041b2011-05-19 12:20:25 +02002144 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002145 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkau156369f2011-12-14 22:08:04 +01002146 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002147 } else
2148 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2149
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05302150 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002151 ath_txq_schedule(sc, txq);
2152}
2153
Sujithc4288392008-11-18 09:09:30 +05302154static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155{
Sujithcbe61d82009-02-09 13:27:12 +05302156 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002157 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2159 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302160 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002161 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162 int status;
2163
Joe Perchesd2182b62011-12-15 14:55:53 -08002164 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
Joe Perches226afe62010-12-02 19:12:37 -08002165 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2166 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002167
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002168 ath_txq_lock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002170 if (work_pending(&sc->hw_reset_work))
2171 break;
2172
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173 if (list_empty(&txq->axq_q)) {
2174 txq->axq_link = NULL;
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05302175 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
Ben Greear082f6532011-01-09 23:11:47 -08002176 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 break;
2178 }
2179 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2180
2181 /*
2182 * There is a race condition that a BH gets scheduled
2183 * after sw writes TxE and before hw re-load the last
2184 * descriptor to get the newly chained one.
2185 * Software must keep the last DONE descriptor as a
2186 * holding descriptor - software does so by marking
2187 * it with the STALE flag.
2188 */
2189 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302190 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002191 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002192 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002193 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002194
2195 bf = list_entry(bf_held->list.next, struct ath_buf,
2196 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002197 }
2198
2199 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302200 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002201
Felix Fietkau29bffa92010-03-29 20:14:23 -07002202 memset(&ts, 0, sizeof(ts));
2203 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002204 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002205 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002206
Ben Greear2dac4fb2011-01-09 23:11:45 -08002207 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002208
2209 /*
2210 * Remove ath_buf's of the same transmit unit from txq,
2211 * however leave the last descriptor back as the holding
2212 * descriptor for hw.
2213 */
Sujitha119cc42009-03-30 15:28:38 +05302214 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002215 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002216 if (!list_is_singular(&lastbf->list))
2217 list_cut_position(&bf_head,
2218 &txq->axq_q, lastbf->list.prev);
2219
Felix Fietkaufce041b2011-05-19 12:20:25 +02002220 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002221 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002222 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002223 }
Johannes Berge6a98542008-10-21 12:40:02 +02002224
Felix Fietkaufce041b2011-05-19 12:20:25 +02002225 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002226 }
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002227 ath_txq_unlock_complete(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002228}
2229
Sujith305fe472009-07-23 15:32:29 +05302230static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002231{
2232 struct ath_softc *sc = container_of(work, struct ath_softc,
2233 tx_complete_work.work);
2234 struct ath_txq *txq;
2235 int i;
2236 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002237#ifdef CONFIG_ATH9K_DEBUGFS
2238 sc->tx_complete_poll_work_seen++;
2239#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002240
2241 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2242 if (ATH_TXQ_SETUP(sc, i)) {
2243 txq = &sc->tx.txq[i];
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002244 ath_txq_lock(sc, txq);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002245 if (txq->axq_depth) {
2246 if (txq->axq_tx_inprogress) {
2247 needreset = true;
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002248 ath_txq_unlock(sc, txq);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002249 break;
2250 } else {
2251 txq->axq_tx_inprogress = true;
2252 }
2253 }
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002254 ath_txq_unlock_complete(sc, txq);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002255 }
2256
2257 if (needreset) {
Joe Perchesd2182b62011-12-15 14:55:53 -08002258 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
Joe Perches226afe62010-12-02 19:12:37 -08002259 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002260 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002261 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002262 }
2263
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002264 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002265 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2266}
2267
2268
Sujithe8324352009-01-16 21:38:42 +05302269
2270void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002271{
Felix Fietkau239c7952012-03-14 16:40:26 +01002272 struct ath_hw *ah = sc->sc_ah;
2273 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
Sujithe8324352009-01-16 21:38:42 +05302274 int i;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002275
2276 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302277 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2278 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002279 }
2280}
2281
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282void ath_tx_edma_tasklet(struct ath_softc *sc)
2283{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002284 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002285 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2286 struct ath_hw *ah = sc->sc_ah;
2287 struct ath_txq *txq;
2288 struct ath_buf *bf, *lastbf;
2289 struct list_head bf_head;
2290 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002291
2292 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002293 if (work_pending(&sc->hw_reset_work))
2294 break;
2295
Felix Fietkaufce041b2011-05-19 12:20:25 +02002296 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002297 if (status == -EINPROGRESS)
2298 break;
2299 if (status == -EIO) {
Joe Perchesd2182b62011-12-15 14:55:53 -08002300 ath_dbg(common, XMIT, "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002301 break;
2302 }
2303
Felix Fietkau4e0ad252012-02-27 19:58:42 +01002304 /* Process beacon completions separately */
2305 if (ts.qid == sc->beacon.beaconq) {
2306 sc->beacon.tx_processed = true;
2307 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002308 continue;
Felix Fietkau4e0ad252012-02-27 19:58:42 +01002309 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002310
Felix Fietkaufce041b2011-05-19 12:20:25 +02002311 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002312
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002313 ath_txq_lock(sc, txq);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002314
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002315 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002316 ath_txq_unlock(sc, txq);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002317 return;
2318 }
2319
2320 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2321 struct ath_buf, list);
2322 lastbf = bf->bf_lastbf;
2323
2324 INIT_LIST_HEAD(&bf_head);
2325 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2326 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002327
Felix Fietkaufce041b2011-05-19 12:20:25 +02002328 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2329 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002330
Felix Fietkaufce041b2011-05-19 12:20:25 +02002331 if (!list_empty(&txq->axq_q)) {
2332 struct list_head bf_q;
2333
2334 INIT_LIST_HEAD(&bf_q);
2335 txq->axq_link = NULL;
2336 list_splice_tail_init(&txq->axq_q, &bf_q);
2337 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2338 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002339 }
2340
Felix Fietkaufce041b2011-05-19 12:20:25 +02002341 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002342 ath_txq_unlock_complete(sc, txq);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002343 }
2344}
2345
Sujithe8324352009-01-16 21:38:42 +05302346/*****************/
2347/* Init, Cleanup */
2348/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002350static int ath_txstatus_setup(struct ath_softc *sc, int size)
2351{
2352 struct ath_descdma *dd = &sc->txsdma;
2353 u8 txs_len = sc->sc_ah->caps.txs_len;
2354
2355 dd->dd_desc_len = size * txs_len;
2356 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2357 &dd->dd_desc_paddr, GFP_KERNEL);
2358 if (!dd->dd_desc)
2359 return -ENOMEM;
2360
2361 return 0;
2362}
2363
2364static int ath_tx_edma_init(struct ath_softc *sc)
2365{
2366 int err;
2367
2368 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2369 if (!err)
2370 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2371 sc->txsdma.dd_desc_paddr,
2372 ATH_TXSTATUS_RING_SIZE);
2373
2374 return err;
2375}
2376
2377static void ath_tx_edma_cleanup(struct ath_softc *sc)
2378{
2379 struct ath_descdma *dd = &sc->txsdma;
2380
2381 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2382 dd->dd_desc_paddr);
2383}
2384
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385int ath_tx_init(struct ath_softc *sc, int nbufs)
2386{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002387 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388 int error = 0;
2389
Sujith797fe5cb2009-03-30 15:28:45 +05302390 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391
Sujith797fe5cb2009-03-30 15:28:45 +05302392 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002393 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302394 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002395 ath_err(common,
2396 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302397 goto err;
2398 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002399
Sujith797fe5cb2009-03-30 15:28:45 +05302400 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002401 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302402 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002403 ath_err(common,
2404 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302405 goto err;
2406 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002408 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2409
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002410 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2411 error = ath_tx_edma_init(sc);
2412 if (error)
2413 goto err;
2414 }
2415
Sujith797fe5cb2009-03-30 15:28:45 +05302416err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417 if (error != 0)
2418 ath_tx_cleanup(sc);
2419
2420 return error;
2421}
2422
Sujith797fe5cb2009-03-30 15:28:45 +05302423void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424{
Sujithb77f4832008-12-07 21:44:03 +05302425 if (sc->beacon.bdma.dd_desc_len != 0)
2426 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002427
Sujithb77f4832008-12-07 21:44:03 +05302428 if (sc->tx.txdma.dd_desc_len != 0)
2429 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002430
2431 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2432 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433}
2434
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2436{
Sujithc5170162008-10-29 10:13:59 +05302437 struct ath_atx_tid *tid;
2438 struct ath_atx_ac *ac;
2439 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002440
Sujith8ee5afb2008-12-07 21:43:36 +05302441 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302442 tidno < WME_NUM_TID;
2443 tidno++, tid++) {
2444 tid->an = an;
2445 tid->tidno = tidno;
2446 tid->seq_start = tid->seq_next = 0;
2447 tid->baw_size = WME_MAX_BA;
2448 tid->baw_head = tid->baw_tail = 0;
2449 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302450 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302451 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002452 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302453 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302454 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302455 tid->state &= ~AGGR_ADDBA_COMPLETE;
2456 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302457 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002458
Sujith8ee5afb2008-12-07 21:43:36 +05302459 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302460 acno < WME_NUM_AC; acno++, ac++) {
2461 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002462 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302463 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002464 }
2465}
2466
Sujithb5aa9bf2008-10-29 10:13:31 +05302467void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002468{
Felix Fietkau2b409942010-07-07 19:42:08 +02002469 struct ath_atx_ac *ac;
2470 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002471 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002472 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302473
Felix Fietkau2b409942010-07-07 19:42:08 +02002474 for (tidno = 0, tid = &an->tid[tidno];
2475 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002476
Felix Fietkau2b409942010-07-07 19:42:08 +02002477 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002478 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002479
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002480 ath_txq_lock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002481
Felix Fietkau2b409942010-07-07 19:42:08 +02002482 if (tid->sched) {
2483 list_del(&tid->list);
2484 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002485 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002486
2487 if (ac->sched) {
2488 list_del(&ac->list);
2489 tid->ac->sched = false;
2490 }
2491
2492 ath_tid_drain(sc, txq, tid);
2493 tid->state &= ~AGGR_ADDBA_COMPLETE;
2494 tid->state &= ~AGGR_CLEANUP;
2495
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002496 ath_txq_unlock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002497 }
2498}