blob: 3182408ffe35abcc08c075d8f950cf433d1ca9cf [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +010056 struct ath_tx_status *ts, int txok);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100107static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800108 __acquires(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100109{
110 spin_lock_bh(&txq->axq_lock);
111}
112
113static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800114 __releases(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100115{
116 spin_unlock_bh(&txq->axq_lock);
117}
118
119static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800120 __releases(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100121{
122 struct sk_buff_head q;
123 struct sk_buff *skb;
124
125 __skb_queue_head_init(&q);
126 skb_queue_splice_init(&txq->complete_q, &q);
127 spin_unlock_bh(&txq->axq_lock);
128
129 while ((skb = __skb_dequeue(&q)))
130 ieee80211_tx_status(sc->hw, skb);
131}
132
Sujithe8324352009-01-16 21:38:42 +0530133static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
134{
135 struct ath_atx_ac *ac = tid->ac;
136
137 if (tid->paused)
138 return;
139
140 if (tid->sched)
141 return;
142
143 tid->sched = true;
144 list_add_tail(&tid->list, &ac->tid_q);
145
146 if (ac->sched)
147 return;
148
149 ac->sched = true;
150 list_add_tail(&ac->list, &txq->axq_acq);
151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530156
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200157 WARN_ON(!tid->paused);
158
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100159 ath_txq_lock(sc, txq);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200160 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530161
Felix Fietkau56dc6332011-08-28 00:32:22 +0200162 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530163 goto unlock;
164
165 ath_tx_queue_tid(txq, tid);
166 ath_txq_schedule(sc, txq);
167unlock:
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100168 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +0530169}
170
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100171static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100172{
173 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100174 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
175 sizeof(tx_info->rate_driver_data));
176 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100177}
178
Felix Fietkau156369f2011-12-14 22:08:04 +0100179static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
180{
181 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
182 seqno << IEEE80211_SEQ_SEQ_SHIFT);
183}
184
Sujithe8324352009-01-16 21:38:42 +0530185static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
186{
Felix Fietkau066dae92010-11-07 14:59:39 +0100187 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200188 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530189 struct ath_buf *bf;
190 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200191 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100192 struct ath_frame_info *fi;
Felix Fietkau156369f2011-12-14 22:08:04 +0100193 bool sendbar = false;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200194
Sujithe8324352009-01-16 21:38:42 +0530195 INIT_LIST_HEAD(&bf_head);
196
Felix Fietkau90fa5392010-09-20 13:45:38 +0200197 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530198
Felix Fietkau56dc6332011-08-28 00:32:22 +0200199 while ((skb = __skb_dequeue(&tid->buf_q))) {
200 fi = get_frame_info(skb);
201 bf = fi->bf;
202
Felix Fietkau44f1d262011-08-28 00:32:25 +0200203 if (bf && fi->retries) {
204 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200205 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100206 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
207 sendbar = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200208 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200209 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200210 }
Sujithe8324352009-01-16 21:38:42 +0530211 }
212
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500213 if (tid->baw_head == tid->baw_tail) {
214 tid->state &= ~AGGR_ADDBA_COMPLETE;
215 tid->state &= ~AGGR_CLEANUP;
216 }
217
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100218 if (sendbar) {
219 ath_txq_unlock(sc, txq);
Felix Fietkau156369f2011-12-14 22:08:04 +0100220 ath_send_bar(tid, tid->seq_start);
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100221 ath_txq_lock(sc, txq);
222 }
Sujithe8324352009-01-16 21:38:42 +0530223}
224
225static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
226 int seqno)
227{
228 int index, cindex;
229
230 index = ATH_BA_INDEX(tid->seq_start, seqno);
231 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
232
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200233 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530234
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200235 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530236 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
237 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
Felix Fietkauf9437542011-12-14 22:08:08 +0100238 if (tid->bar_index >= 0)
239 tid->bar_index--;
Sujithe8324352009-01-16 21:38:42 +0530240 }
241}
242
243static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100244 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530245{
246 int index, cindex;
247
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100248 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530249 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200250 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530251
252 if (index >= ((tid->baw_tail - tid->baw_head) &
253 (ATH_TID_MAX_BUFS - 1))) {
254 tid->baw_tail = cindex;
255 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
256 }
257}
258
259/*
260 * TODO: For frame(s) that are in the retry state, we will reuse the
261 * sequence number(s) without setting the retry bit. The
262 * alternative is to give up on these and BAR the receiver's window
263 * forward.
264 */
265static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
266 struct ath_atx_tid *tid)
267
268{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200269 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530270 struct ath_buf *bf;
271 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700272 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100273 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700274
275 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530276 INIT_LIST_HEAD(&bf_head);
277
Felix Fietkau56dc6332011-08-28 00:32:22 +0200278 while ((skb = __skb_dequeue(&tid->buf_q))) {
279 fi = get_frame_info(skb);
280 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530281
Felix Fietkau44f1d262011-08-28 00:32:25 +0200282 if (!bf) {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200283 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200284 continue;
285 }
286
Felix Fietkau56dc6332011-08-28 00:32:22 +0200287 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530288
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100289 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200290 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530291
Felix Fietkau156369f2011-12-14 22:08:04 +0100292 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +0530293 }
294
295 tid->seq_next = tid->seq_start;
296 tid->baw_tail = tid->baw_head;
Felix Fietkauf9437542011-12-14 22:08:08 +0100297 tid->bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530298}
299
Sujithfec247c2009-07-27 12:08:16 +0530300static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100301 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530302{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100303 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200304 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530305 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100306 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530307
Sujithfec247c2009-07-27 12:08:16 +0530308 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100309 fi->retries += count;
310
311 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100312 return;
Sujithe8324352009-01-16 21:38:42 +0530313
Sujithe8324352009-01-16 21:38:42 +0530314 hdr = (struct ieee80211_hdr *)skb->data;
315 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200316 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
317 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530318}
319
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200320static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
321{
322 struct ath_buf *bf = NULL;
323
324 spin_lock_bh(&sc->tx.txbuflock);
325
326 if (unlikely(list_empty(&sc->tx.txbuf))) {
327 spin_unlock_bh(&sc->tx.txbuflock);
328 return NULL;
329 }
330
331 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
332 list_del(&bf->list);
333
334 spin_unlock_bh(&sc->tx.txbuflock);
335
336 return bf;
337}
338
339static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
340{
341 spin_lock_bh(&sc->tx.txbuflock);
342 list_add_tail(&bf->list, &sc->tx.txbuf);
343 spin_unlock_bh(&sc->tx.txbuflock);
344}
345
Sujithd43f30152009-01-16 21:38:53 +0530346static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
347{
348 struct ath_buf *tbf;
349
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200350 tbf = ath_tx_get_buffer(sc);
351 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530352 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530353
354 ATH_TXBUF_RESET(tbf);
355
356 tbf->bf_mpdu = bf->bf_mpdu;
357 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400358 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530359 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530360
361 return tbf;
362}
363
Felix Fietkaub572d032010-11-14 15:20:07 +0100364static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
365 struct ath_tx_status *ts, int txok,
366 int *nframes, int *nbad)
367{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100368 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100369 u16 seq_st = 0;
370 u32 ba[WME_BA_BMP_SIZE >> 5];
371 int ba_index;
372 int isaggr = 0;
373
374 *nbad = 0;
375 *nframes = 0;
376
Felix Fietkaub572d032010-11-14 15:20:07 +0100377 isaggr = bf_isaggr(bf);
378 if (isaggr) {
379 seq_st = ts->ts_seqnum;
380 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
381 }
382
383 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100384 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200385 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100386
387 (*nframes)++;
388 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
389 (*nbad)++;
390
391 bf = bf->bf_next;
392 }
393}
394
395
Sujithd43f30152009-01-16 21:38:53 +0530396static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
397 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100398 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530399{
400 struct ath_node *an = NULL;
401 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530402 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100403 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530404 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800405 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530406 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530407 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200408 struct list_head bf_head;
409 struct sk_buff_head bf_pending;
Felix Fietkau156369f2011-12-14 22:08:04 +0100410 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
Sujithe8324352009-01-16 21:38:42 +0530411 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530412 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
413 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200414 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100415 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200416 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100417 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200418 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100419 int i, retries;
Felix Fietkau156369f2011-12-14 22:08:04 +0100420 int bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530421
Sujitha22be222009-03-30 15:28:36 +0530422 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530423 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530424
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800425 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800426
Felix Fietkau78c46532010-06-25 01:26:16 +0200427 memcpy(rates, tx_info->control.rates, sizeof(rates));
428
Felix Fietkauda647622011-12-14 22:08:03 +0100429 retries = ts->ts_longretry + 1;
430 for (i = 0; i < ts->ts_rateindex; i++)
431 retries += rates[i].count;
432
Sujith1286ec62009-01-27 13:30:37 +0530433 rcu_read_lock();
434
Ben Greear686b9cb2010-09-23 09:44:36 -0700435 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530436 if (!sta) {
437 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200438
Felix Fietkau31e79a52010-07-12 23:16:34 +0200439 INIT_LIST_HEAD(&bf_head);
440 while (bf) {
441 bf_next = bf->bf_next;
442
Felix Fietkaufce041b2011-05-19 12:20:25 +0200443 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200444 list_move_tail(&bf->list, &bf_head);
445
Felix Fietkau156369f2011-12-14 22:08:04 +0100446 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200447
448 bf = bf_next;
449 }
Sujith1286ec62009-01-27 13:30:37 +0530450 return;
Sujithe8324352009-01-16 21:38:42 +0530451 }
452
Sujith1286ec62009-01-27 13:30:37 +0530453 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100454 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
455 tid = ATH_AN_2_TID(an, tidno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100456 seq_first = tid->seq_start;
Sujith1286ec62009-01-27 13:30:37 +0530457
Felix Fietkaub11b1602010-07-11 12:48:44 +0200458 /*
459 * The hardware occasionally sends a tx status for the wrong TID.
460 * In this case, the BA status cannot be considered valid and all
461 * subframes need to be retransmitted
462 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100463 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200464 txok = false;
465
Sujithe8324352009-01-16 21:38:42 +0530466 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530467 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530468
Sujithd43f30152009-01-16 21:38:53 +0530469 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700470 if (ts->ts_flags & ATH9K_TX_BA) {
471 seq_st = ts->ts_seqnum;
472 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530473 } else {
Sujithd43f30152009-01-16 21:38:53 +0530474 /*
475 * AR5416 can become deaf/mute when BA
476 * issue happens. Chip needs to be reset.
477 * But AP code may have sychronization issues
478 * when perform internal reset in this routine.
479 * Only enable reset in STA mode for now.
480 */
Sujith2660b812009-02-09 13:27:26 +0530481 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530482 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530483 }
484 }
485
Felix Fietkau56dc6332011-08-28 00:32:22 +0200486 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530487
Felix Fietkaub572d032010-11-14 15:20:07 +0100488 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530489 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200490 u16 seqno = bf->bf_state.seqno;
491
Felix Fietkauf0b82202011-01-15 14:30:15 +0100492 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530493 bf_next = bf->bf_next;
494
Felix Fietkau78c46532010-06-25 01:26:16 +0200495 skb = bf->bf_mpdu;
496 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100497 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200498
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200499 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530500 /* transmit completion, subframe is
501 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530502 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530503 } else if (!isaggr && txok) {
504 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530505 acked_cnt++;
Felix Fietkaub0477012011-12-14 22:08:05 +0100506 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
507 /*
508 * cleanup in progress, just fail
509 * the un-acked sub-frames
510 */
511 txfail = 1;
512 } else if (flush) {
513 txpending = 1;
514 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
515 if (txok || !an->sleeping)
516 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
517 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200518
Felix Fietkaub0477012011-12-14 22:08:05 +0100519 txpending = 1;
520 } else {
521 txfail = 1;
522 txfail_cnt++;
523 bar_index = max_t(int, bar_index,
524 ATH_BA_INDEX(seq_first, seqno));
Sujithe8324352009-01-16 21:38:42 +0530525 }
526
Felix Fietkaufce041b2011-05-19 12:20:25 +0200527 /*
528 * Make sure the last desc is reclaimed if it
529 * not a holding desc.
530 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200531 INIT_LIST_HEAD(&bf_head);
532 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
533 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530534 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530535
Felix Fietkau90fa5392010-09-20 13:45:38 +0200536 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530537 /*
538 * complete the acked-ones/xretried ones; update
539 * block-ack window
540 */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200541 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530542
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530543 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200544 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200545 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530546 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530547 }
548
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700549 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
Felix Fietkau156369f2011-12-14 22:08:04 +0100550 !txfail);
Sujithe8324352009-01-16 21:38:42 +0530551 } else {
Sujithd43f30152009-01-16 21:38:53 +0530552 /* retry the un-acked ones */
Felix Fietkaub0477012011-12-14 22:08:05 +0100553 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
554 bf->bf_next == NULL && bf_last->bf_stale) {
555 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530556
Felix Fietkaub0477012011-12-14 22:08:05 +0100557 tbf = ath_clone_txbuf(sc, bf_last);
558 /*
559 * Update tx baw and complete the
560 * frame with failed status if we
561 * run out of tx buf.
562 */
563 if (!tbf) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100564 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400565
Felix Fietkaub0477012011-12-14 22:08:05 +0100566 ath_tx_complete_buf(sc, bf, txq,
567 &bf_head, ts, 0);
568 bar_index = max_t(int, bar_index,
569 ATH_BA_INDEX(seq_first, seqno));
570 break;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400571 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100572
573 fi->bf = tbf;
Sujithe8324352009-01-16 21:38:42 +0530574 }
575
576 /*
577 * Put this buffer to the temporary pending
578 * queue to retain ordering
579 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200580 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530581 }
582
583 bf = bf_next;
584 }
585
Felix Fietkau4cee7862010-07-23 03:53:16 +0200586 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200587 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200588 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200589 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200590
Felix Fietkau56dc6332011-08-28 00:32:22 +0200591 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200592 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600593 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200594
595 if (ts->ts_status & ATH9K_TXERR_FILT)
596 tid->ac->clear_ps_filter = true;
597 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200598 }
599
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100600 if (bar_index >= 0) {
601 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
602
603 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
604 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
605
606 ath_txq_unlock(sc, txq);
607 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
608 ath_txq_lock(sc, txq);
609 }
610
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500611 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200612 ath_tx_flush_tid(sc, tid);
613
Sujith1286ec62009-01-27 13:30:37 +0530614 rcu_read_unlock();
615
Felix Fietkau030d6292011-10-07 02:28:13 +0200616 if (needreset) {
617 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200618 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200619 }
Sujithe8324352009-01-16 21:38:42 +0530620}
621
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530622static bool ath_lookup_legacy(struct ath_buf *bf)
623{
624 struct sk_buff *skb;
625 struct ieee80211_tx_info *tx_info;
626 struct ieee80211_tx_rate *rates;
627 int i;
628
629 skb = bf->bf_mpdu;
630 tx_info = IEEE80211_SKB_CB(skb);
631 rates = tx_info->control.rates;
632
Felix Fietkau059ee092011-08-27 10:25:27 +0200633 for (i = 0; i < 4; i++) {
634 if (!rates[i].count || rates[i].idx < 0)
635 break;
636
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530637 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
638 return true;
639 }
640
641 return false;
642}
643
Sujithe8324352009-01-16 21:38:42 +0530644static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
645 struct ath_atx_tid *tid)
646{
Sujithe8324352009-01-16 21:38:42 +0530647 struct sk_buff *skb;
648 struct ieee80211_tx_info *tx_info;
649 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530650 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530651 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530652 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530653 int i;
654
Sujitha22be222009-03-30 15:28:36 +0530655 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530656 tx_info = IEEE80211_SKB_CB(skb);
657 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530658
659 /*
660 * Find the lowest frame length among the rate series that will have a
661 * 4ms transmit duration.
662 * TODO - TXOP limit needs to be considered.
663 */
664 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
665
666 for (i = 0; i < 4; i++) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100667 int modeidx;
Sujithe8324352009-01-16 21:38:42 +0530668
Felix Fietkaub0477012011-12-14 22:08:05 +0100669 if (!rates[i].count)
670 continue;
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200671
Felix Fietkaub0477012011-12-14 22:08:05 +0100672 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
673 legacy = 1;
674 break;
Sujithe8324352009-01-16 21:38:42 +0530675 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100676
677 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
678 modeidx = MCS_HT40;
679 else
680 modeidx = MCS_HT20;
681
682 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
683 modeidx++;
684
685 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
686 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530687 }
688
689 /*
690 * limit aggregate size by the minimum rate if rate selected is
691 * not a probe rate, if rate selected is a probe rate then
692 * avoid aggregation of this packet.
693 */
694 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
695 return 0;
696
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530697 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
698 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
699 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530700 aggr_limit = min((max_4ms_framelen * 3) / 8,
701 (u32)ATH_AMPDU_LIMIT_MAX);
702 else
703 aggr_limit = min(max_4ms_framelen,
704 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530705
706 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300707 * h/w can accept aggregates up to 16 bit lengths (65535).
708 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530709 * as zero. Ignore 65536 since we are constrained by hw.
710 */
Sujith4ef70842009-07-23 15:32:41 +0530711 if (tid->an->maxampdu)
712 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530713
714 return aggr_limit;
715}
716
717/*
Sujithd43f30152009-01-16 21:38:53 +0530718 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530719 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530720 */
721static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530722 struct ath_buf *bf, u16 frmlen,
723 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530724{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530725#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530726 struct sk_buff *skb = bf->bf_mpdu;
727 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530728 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530729 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100730 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200731 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100732 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530733
734 /* Select standard number of delimiters based on frame length alone */
735 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
736
737 /*
738 * If encryption enabled, hardware requires some more padding between
739 * subframes.
740 * TODO - this could be improved to be dependent on the rate.
741 * The hardware can keep up at lower rates, but not higher rates
742 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530743 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
744 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530745 ndelim += ATH_AGGR_ENCRYPTDELIM;
746
747 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530748 * Add delimiter when using RTS/CTS with aggregation
749 * and non enterprise AR9003 card
750 */
Felix Fietkau34597312011-08-29 18:57:54 +0200751 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
752 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530753 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
754
755 /*
Sujithe8324352009-01-16 21:38:42 +0530756 * Convert desired mpdu density from microeconds to bytes based
757 * on highest rate in rate series (i.e. first rate) to determine
758 * required minimum length for subframe. Take into account
759 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530760 *
Sujithe8324352009-01-16 21:38:42 +0530761 * If there is no mpdu density restriction, no further calculation
762 * is needed.
763 */
Sujith4ef70842009-07-23 15:32:41 +0530764
765 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530766 return ndelim;
767
768 rix = tx_info->control.rates[0].idx;
769 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530770 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
771 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
772
773 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530774 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530775 else
Sujith4ef70842009-07-23 15:32:41 +0530776 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530777
778 if (nsymbols == 0)
779 nsymbols = 1;
780
Felix Fietkauc6663872010-04-19 19:57:33 +0200781 streams = HT_RC_2_STREAMS(rix);
782 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530783 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
784
Sujithe8324352009-01-16 21:38:42 +0530785 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530786 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
787 ndelim = max(mindelim, ndelim);
788 }
789
790 return ndelim;
791}
792
793static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530794 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100796 struct list_head *bf_q,
797 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530798{
799#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200800 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530801 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530802 u16 aggr_limit = 0, al = 0, bpad = 0,
803 al_delta, h_baw = tid->baw_size / 2;
804 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200805 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100806 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200807 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200808 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530809
810 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200811 skb = skb_peek(&tid->buf_q);
812 fi = get_frame_info(skb);
813 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200814 if (!fi->bf)
815 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200816
Felix Fietkau44f1d262011-08-28 00:32:25 +0200817 if (!bf)
818 continue;
819
Felix Fietkau399c6482011-09-14 21:24:17 +0200820 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200821 seqno = bf->bf_state.seqno;
Sujithe8324352009-01-16 21:38:42 +0530822
Sujithd43f30152009-01-16 21:38:53 +0530823 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200824 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530825 status = ATH_AGGR_BAW_CLOSED;
826 break;
827 }
828
Felix Fietkauf9437542011-12-14 22:08:08 +0100829 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
830 struct ath_tx_status ts = {};
831 struct list_head bf_head;
832
833 INIT_LIST_HEAD(&bf_head);
834 list_add(&bf->list, &bf_head);
835 __skb_unlink(skb, &tid->buf_q);
836 ath_tx_update_baw(sc, tid, seqno);
837 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
838 continue;
839 }
840
841 if (!bf_first)
842 bf_first = bf;
843
Sujithe8324352009-01-16 21:38:42 +0530844 if (!rl) {
845 aggr_limit = ath_lookup_rate(sc, bf, tid);
846 rl = 1;
847 }
848
Sujithd43f30152009-01-16 21:38:53 +0530849 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100850 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530851
Sujithd43f30152009-01-16 21:38:53 +0530852 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530853 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
854 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530855 status = ATH_AGGR_LIMITED;
856 break;
857 }
858
Felix Fietkau0299a502010-10-21 02:47:24 +0200859 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200860 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200861 break;
862
Sujithd43f30152009-01-16 21:38:53 +0530863 /* do not exceed subframe limit */
864 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530865 status = ATH_AGGR_LIMITED;
866 break;
867 }
868
Sujithd43f30152009-01-16 21:38:53 +0530869 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530870 al += bpad + al_delta;
871
872 /*
873 * Get the delimiters needed to meet the MPDU
874 * density for this node.
875 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530876 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
877 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530878 bpad = PADBYTES(al_delta) + (ndelim << 2);
879
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530880 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530881 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530882
Sujithd43f30152009-01-16 21:38:53 +0530883 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100884 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200885 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200886 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200887
888 __skb_unlink(skb, &tid->buf_q);
889 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200890 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530891 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200892
Sujithe8324352009-01-16 21:38:42 +0530893 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530894
Felix Fietkau56dc6332011-08-28 00:32:22 +0200895 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530896
Felix Fietkau269c44b2010-11-14 15:20:06 +0100897 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530898
Sujithe8324352009-01-16 21:38:42 +0530899 return status;
900#undef PADBYTES
901}
902
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200903/*
904 * rix - rate index
905 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
906 * width - 0 for 20 MHz, 1 for 40 MHz
907 * half_gi - to use 4us v/s 3.6 us for symbol time
908 */
909static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
910 int width, int half_gi, bool shortPreamble)
911{
912 u32 nbits, nsymbits, duration, nsymbols;
913 int streams;
914
915 /* find number of symbols: PLCP + data */
916 streams = HT_RC_2_STREAMS(rix);
917 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
918 nsymbits = bits_per_symbol[rix % 8][width] * streams;
919 nsymbols = (nbits + nsymbits - 1) / nsymbits;
920
921 if (!half_gi)
922 duration = SYMBOL_TIME(nsymbols);
923 else
924 duration = SYMBOL_TIME_HALFGI(nsymbols);
925
926 /* addup duration for legacy/ht training and signal fields */
927 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
928
929 return duration;
930}
931
Felix Fietkau493cf042011-09-14 21:24:22 +0200932static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
933 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200934{
935 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200936 struct sk_buff *skb;
937 struct ieee80211_tx_info *tx_info;
938 struct ieee80211_tx_rate *rates;
939 const struct ieee80211_rate *rate;
940 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200941 int i;
942 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200943
944 skb = bf->bf_mpdu;
945 tx_info = IEEE80211_SKB_CB(skb);
946 rates = tx_info->control.rates;
947 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200948
949 /* set dur_update_en for l-sig computation except for PS-Poll frames */
950 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200951
952 /*
953 * We check if Short Preamble is needed for the CTS rate by
954 * checking the BSS's global flag.
955 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
956 */
957 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200958 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200959 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200960 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200961
962 for (i = 0; i < 4; i++) {
963 bool is_40, is_sgi, is_sp;
964 int phy;
965
966 if (!rates[i].count || (rates[i].idx < 0))
967 continue;
968
969 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200970 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200971
972 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200973 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
974 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200975 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200976 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
977 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200978 }
979
980 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200981 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200982 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200983 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200984
985 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
986 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
987 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
988
989 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
990 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200991 info->rates[i].Rate = rix | 0x80;
992 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
993 ah->txchainmask, info->rates[i].Rate);
994 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200995 is_40, is_sgi, is_sp);
996 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200997 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200998 continue;
999 }
1000
1001 /* legacy rates */
1002 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1003 !(rate->flags & IEEE80211_RATE_ERP_G))
1004 phy = WLAN_RC_PHY_CCK;
1005 else
1006 phy = WLAN_RC_PHY_OFDM;
1007
1008 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +02001009 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001010 if (rate->hw_value_short) {
1011 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +02001012 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001013 } else {
1014 is_sp = false;
1015 }
1016
1017 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +02001018 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001019 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001020 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1021 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001022
Felix Fietkau493cf042011-09-14 21:24:22 +02001023 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001024 phy, rate->bitrate * 100, len, rix, is_sp);
1025 }
1026
1027 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1028 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +02001029 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001030
1031 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +02001032 if (info->flags & ATH9K_TXDESC_RTSENA)
1033 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001034}
1035
Felix Fietkau493cf042011-09-14 21:24:22 +02001036static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1037{
1038 struct ieee80211_hdr *hdr;
1039 enum ath9k_pkt_type htype;
1040 __le16 fc;
1041
1042 hdr = (struct ieee80211_hdr *)skb->data;
1043 fc = hdr->frame_control;
1044
1045 if (ieee80211_is_beacon(fc))
1046 htype = ATH9K_PKT_TYPE_BEACON;
1047 else if (ieee80211_is_probe_resp(fc))
1048 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1049 else if (ieee80211_is_atim(fc))
1050 htype = ATH9K_PKT_TYPE_ATIM;
1051 else if (ieee80211_is_pspoll(fc))
1052 htype = ATH9K_PKT_TYPE_PSPOLL;
1053 else
1054 htype = ATH9K_PKT_TYPE_NORMAL;
1055
1056 return htype;
1057}
1058
1059static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1060 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001061{
1062 struct ath_hw *ah = sc->sc_ah;
1063 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1064 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001065 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001066 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001067
Felix Fietkau493cf042011-09-14 21:24:22 +02001068 memset(&info, 0, sizeof(info));
1069 info.is_first = true;
1070 info.is_last = true;
1071 info.txpower = MAX_RATE_POWER;
1072 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001073
Felix Fietkau493cf042011-09-14 21:24:22 +02001074 info.flags = ATH9K_TXDESC_INTREQ;
1075 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1076 info.flags |= ATH9K_TXDESC_NOACK;
1077 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1078 info.flags |= ATH9K_TXDESC_LDPC;
1079
1080 ath_buf_set_rate(sc, bf, &info, len);
1081
1082 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1083 info.flags |= ATH9K_TXDESC_CLRDMASK;
1084
1085 if (bf->bf_state.bfs_paprd)
1086 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1087
Felix Fietkau399c6482011-09-14 21:24:17 +02001088
1089 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001090 struct sk_buff *skb = bf->bf_mpdu;
1091 struct ath_frame_info *fi = get_frame_info(skb);
1092
1093 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001094 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001095 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001096 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001097 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001098
John W. Linville42cecc32011-09-19 15:42:31 -04001099 info.buf_addr[0] = bf->bf_buf_addr;
1100 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001101 info.pkt_len = fi->framelen;
1102 info.keyix = fi->keyix;
1103 info.keytype = fi->keytype;
1104
1105 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001106 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001107 info.aggr = AGGR_BUF_FIRST;
1108 else if (!bf->bf_next)
1109 info.aggr = AGGR_BUF_LAST;
1110 else
1111 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001112
Felix Fietkau493cf042011-09-14 21:24:22 +02001113 info.ndelim = bf->bf_state.ndelim;
1114 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001115 }
1116
Felix Fietkau493cf042011-09-14 21:24:22 +02001117 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001118 bf = bf->bf_next;
1119 }
1120}
1121
Sujithe8324352009-01-16 21:38:42 +05301122static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1123 struct ath_atx_tid *tid)
1124{
Sujithd43f30152009-01-16 21:38:53 +05301125 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301126 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001127 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301128 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001129 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301130
1131 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001132 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301133 return;
1134
1135 INIT_LIST_HEAD(&bf_q);
1136
Felix Fietkau269c44b2010-11-14 15:20:06 +01001137 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301138
1139 /*
Sujithd43f30152009-01-16 21:38:53 +05301140 * no frames picked up to be aggregated;
1141 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301142 */
1143 if (list_empty(&bf_q))
1144 break;
1145
1146 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301147 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001148 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301149
Felix Fietkau55195412011-04-17 23:28:09 +02001150 if (tid->ac->clear_ps_filter) {
1151 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001152 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1153 } else {
1154 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001155 }
1156
Sujithd43f30152009-01-16 21:38:53 +05301157 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001158 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001159 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1160 bf->bf_state.bf_type = BUF_AMPDU;
1161 } else {
1162 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301163 }
1164
Felix Fietkau493cf042011-09-14 21:24:22 +02001165 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001166 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001167 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301168 status != ATH_AGGR_BAW_CLOSED);
1169}
1170
Felix Fietkau231c3a12010-09-20 19:35:28 +02001171int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1172 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301173{
1174 struct ath_atx_tid *txtid;
1175 struct ath_node *an;
1176
1177 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301178 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001179
1180 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1181 return -EAGAIN;
1182
Sujithf83da962009-07-23 15:32:37 +05301183 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001184 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001185 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkauf9437542011-12-14 22:08:08 +01001186 txtid->bar_index = -1;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001187
Felix Fietkau2ed72222011-01-10 17:05:49 -07001188 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1189 txtid->baw_head = txtid->baw_tail = 0;
1190
Felix Fietkau231c3a12010-09-20 19:35:28 +02001191 return 0;
Sujithe8324352009-01-16 21:38:42 +05301192}
1193
Sujithf83da962009-07-23 15:32:37 +05301194void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301195{
1196 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1197 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001198 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301199
1200 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301201 return;
Sujithe8324352009-01-16 21:38:42 +05301202
1203 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301204 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301205 return;
Sujithe8324352009-01-16 21:38:42 +05301206 }
1207
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001208 ath_txq_lock(sc, txq);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001209 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001210
1211 /*
1212 * If frames are still being transmitted for this TID, they will be
1213 * cleaned up during tx completion. To prevent race conditions, this
1214 * TID can only be reused after all in-progress subframes have been
1215 * completed.
1216 */
1217 if (txtid->baw_head != txtid->baw_tail)
1218 txtid->state |= AGGR_CLEANUP;
1219 else
1220 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +05301221
Felix Fietkau90fa5392010-09-20 13:45:38 +02001222 ath_tx_flush_tid(sc, txtid);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001223 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +05301224}
1225
Johannes Berg042ec452011-09-29 16:04:26 +02001226void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1227 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001228{
1229 struct ath_atx_tid *tid;
1230 struct ath_atx_ac *ac;
1231 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001232 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001233 int tidno;
1234
1235 for (tidno = 0, tid = &an->tid[tidno];
1236 tidno < WME_NUM_TID; tidno++, tid++) {
1237
1238 if (!tid->sched)
1239 continue;
1240
1241 ac = tid->ac;
1242 txq = ac->txq;
1243
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001244 ath_txq_lock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001245
Johannes Berg042ec452011-09-29 16:04:26 +02001246 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001247
1248 tid->sched = false;
1249 list_del(&tid->list);
1250
1251 if (ac->sched) {
1252 ac->sched = false;
1253 list_del(&ac->list);
1254 }
1255
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001256 ath_txq_unlock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001257
Johannes Berg042ec452011-09-29 16:04:26 +02001258 ieee80211_sta_set_buffered(sta, tidno, buffered);
1259 }
Felix Fietkau55195412011-04-17 23:28:09 +02001260}
1261
1262void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1263{
1264 struct ath_atx_tid *tid;
1265 struct ath_atx_ac *ac;
1266 struct ath_txq *txq;
1267 int tidno;
1268
1269 for (tidno = 0, tid = &an->tid[tidno];
1270 tidno < WME_NUM_TID; tidno++, tid++) {
1271
1272 ac = tid->ac;
1273 txq = ac->txq;
1274
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001275 ath_txq_lock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001276 ac->clear_ps_filter = true;
1277
Felix Fietkau56dc6332011-08-28 00:32:22 +02001278 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001279 ath_tx_queue_tid(txq, tid);
1280 ath_txq_schedule(sc, txq);
1281 }
1282
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001283 ath_txq_unlock_complete(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001284 }
1285}
1286
Sujithe8324352009-01-16 21:38:42 +05301287void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1288{
1289 struct ath_atx_tid *txtid;
1290 struct ath_node *an;
1291
1292 an = (struct ath_node *)sta->drv_priv;
1293
1294 if (sc->sc_flags & SC_OP_TXAGGR) {
1295 txtid = ATH_AN_2_TID(an, tid);
1296 txtid->baw_size =
1297 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1298 txtid->state |= AGGR_ADDBA_COMPLETE;
1299 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1300 ath_tx_resume_tid(sc, txtid);
1301 }
1302}
1303
Sujithe8324352009-01-16 21:38:42 +05301304/********************/
1305/* Queue Management */
1306/********************/
1307
Sujithe8324352009-01-16 21:38:42 +05301308static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1309 struct ath_txq *txq)
1310{
1311 struct ath_atx_ac *ac, *ac_tmp;
1312 struct ath_atx_tid *tid, *tid_tmp;
1313
1314 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1315 list_del(&ac->list);
1316 ac->sched = false;
1317 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1318 list_del(&tid->list);
1319 tid->sched = false;
1320 ath_tid_drain(sc, txq, tid);
1321 }
1322 }
1323}
1324
1325struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1326{
Sujithcbe61d82009-02-09 13:27:12 +05301327 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301328 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001329 static const int subtype_txq_to_hwq[] = {
1330 [WME_AC_BE] = ATH_TXQ_AC_BE,
1331 [WME_AC_BK] = ATH_TXQ_AC_BK,
1332 [WME_AC_VI] = ATH_TXQ_AC_VI,
1333 [WME_AC_VO] = ATH_TXQ_AC_VO,
1334 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001335 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301336
1337 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001338 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301339 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1340 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1341 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1342 qi.tqi_physCompBuf = 0;
1343
1344 /*
1345 * Enable interrupts only for EOL and DESC conditions.
1346 * We mark tx descriptors to receive a DESC interrupt
1347 * when a tx queue gets deep; otherwise waiting for the
1348 * EOL to reap descriptors. Note that this is done to
1349 * reduce interrupt load and this only defers reaping
1350 * descriptors, never transmitting frames. Aside from
1351 * reducing interrupts this also permits more concurrency.
1352 * The only potential downside is if the tx queue backs
1353 * up in which case the top half of the kernel may backup
1354 * due to a lack of tx descriptors.
1355 *
1356 * The UAPSD queue is an exception, since we take a desc-
1357 * based intr on the EOSP frames.
1358 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001359 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1360 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1361 TXQ_FLAG_TXERRINT_ENABLE;
1362 } else {
1363 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1364 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1365 else
1366 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1367 TXQ_FLAG_TXDESCINT_ENABLE;
1368 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001369 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1370 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301371 /*
1372 * NB: don't print a message, this happens
1373 * normally on parts with too few tx queues
1374 */
1375 return NULL;
1376 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001377 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1378 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301379
Ben Greear60f2d1d2011-01-09 23:11:52 -08001380 txq->axq_qnum = axq_qnum;
1381 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301382 txq->axq_link = NULL;
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001383 __skb_queue_head_init(&txq->complete_q);
Sujithe8324352009-01-16 21:38:42 +05301384 INIT_LIST_HEAD(&txq->axq_q);
1385 INIT_LIST_HEAD(&txq->axq_acq);
1386 spin_lock_init(&txq->axq_lock);
1387 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001388 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001389 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001390 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001391
1392 txq->txq_headidx = txq->txq_tailidx = 0;
1393 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1394 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301395 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001396 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301397}
1398
Sujithe8324352009-01-16 21:38:42 +05301399int ath_txq_update(struct ath_softc *sc, int qnum,
1400 struct ath9k_tx_queue_info *qinfo)
1401{
Sujithcbe61d82009-02-09 13:27:12 +05301402 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301403 int error = 0;
1404 struct ath9k_tx_queue_info qi;
1405
1406 if (qnum == sc->beacon.beaconq) {
1407 /*
1408 * XXX: for beacon queue, we just save the parameter.
1409 * It will be picked up by ath_beaconq_config when
1410 * it's necessary.
1411 */
1412 sc->beacon.beacon_qi = *qinfo;
1413 return 0;
1414 }
1415
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001416 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301417
1418 ath9k_hw_get_txq_props(ah, qnum, &qi);
1419 qi.tqi_aifs = qinfo->tqi_aifs;
1420 qi.tqi_cwmin = qinfo->tqi_cwmin;
1421 qi.tqi_cwmax = qinfo->tqi_cwmax;
1422 qi.tqi_burstTime = qinfo->tqi_burstTime;
1423 qi.tqi_readyTime = qinfo->tqi_readyTime;
1424
1425 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001426 ath_err(ath9k_hw_common(sc->sc_ah),
1427 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301428 error = -EIO;
1429 } else {
1430 ath9k_hw_resettxqueue(ah, qnum);
1431 }
1432
1433 return error;
1434}
1435
1436int ath_cabq_update(struct ath_softc *sc)
1437{
1438 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001439 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301440 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301441
1442 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1443 /*
1444 * Ensure the readytime % is within the bounds.
1445 */
Sujith17d79042009-02-09 13:27:03 +05301446 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1447 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1448 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1449 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301450
Steve Brown9814f6b2011-02-07 17:10:39 -07001451 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301452 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301453 ath_txq_update(sc, qnum, &qi);
1454
1455 return 0;
1456}
1457
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001458static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1459{
1460 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1461 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1462}
1463
Felix Fietkaufce041b2011-05-19 12:20:25 +02001464static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1465 struct list_head *list, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301466{
1467 struct ath_buf *bf, *lastbf;
1468 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001469 struct ath_tx_status ts;
1470
1471 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001472 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301473 INIT_LIST_HEAD(&bf_head);
1474
Felix Fietkaufce041b2011-05-19 12:20:25 +02001475 while (!list_empty(list)) {
1476 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301477
Felix Fietkaufce041b2011-05-19 12:20:25 +02001478 if (bf->bf_stale) {
1479 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301480
Felix Fietkaufce041b2011-05-19 12:20:25 +02001481 ath_tx_return_buffer(sc, bf);
1482 continue;
Sujithe8324352009-01-16 21:38:42 +05301483 }
1484
1485 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001486 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001487
Sujithe8324352009-01-16 21:38:42 +05301488 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001489 if (bf_is_ampdu_not_probing(bf))
1490 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301491
1492 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001493 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1494 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301495 else
Felix Fietkau156369f2011-12-14 22:08:04 +01001496 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001497 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001498}
1499
1500/*
1501 * Drain a given TX queue (could be Beacon or Data)
1502 *
1503 * This assumes output has been stopped and
1504 * we do not need to block ath_tx_tasklet.
1505 */
1506void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1507{
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001508 ath_txq_lock(sc, txq);
1509
Felix Fietkaufce041b2011-05-19 12:20:25 +02001510 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1511 int idx = txq->txq_tailidx;
1512
1513 while (!list_empty(&txq->txq_fifo[idx])) {
1514 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1515 retry_tx);
1516
1517 INCR(idx, ATH_TXFIFO_DEPTH);
1518 }
1519 txq->txq_tailidx = idx;
1520 }
1521
1522 txq->axq_link = NULL;
1523 txq->axq_tx_inprogress = false;
1524 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001525
1526 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001527 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1528 ath_txq_drain_pending_buffers(sc, txq);
1529
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001530 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +05301531}
1532
Felix Fietkau080e1a22010-12-05 20:17:53 +01001533bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301534{
Sujithcbe61d82009-02-09 13:27:12 +05301535 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001536 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301537 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001538 int i;
1539 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301540
1541 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001542 return true;
Sujith043a0402009-01-16 21:38:47 +05301543
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001544 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301545
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001546 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301547 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001548 if (!ATH_TXQ_SETUP(sc, i))
1549 continue;
1550
Felix Fietkau34d25812011-10-07 02:28:12 +02001551 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1552 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301553 }
1554
Felix Fietkau080e1a22010-12-05 20:17:53 +01001555 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001556 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301557
1558 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001559 if (!ATH_TXQ_SETUP(sc, i))
1560 continue;
1561
1562 /*
1563 * The caller will resume queues with ieee80211_wake_queues.
1564 * Mark the queue as not stopped to prevent ath_tx_complete
1565 * from waking the queue too early.
1566 */
1567 txq = &sc->tx.txq[i];
1568 txq->stopped = false;
1569 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301570 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001571
1572 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301573}
1574
Sujithe8324352009-01-16 21:38:42 +05301575void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1576{
1577 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1578 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1579}
1580
Ben Greear7755bad2011-01-18 17:30:00 -08001581/* For each axq_acq entry, for each tid, try to schedule packets
1582 * for transmit until ampdu_depth has reached min Q depth.
1583 */
Sujithe8324352009-01-16 21:38:42 +05301584void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1585{
Ben Greear7755bad2011-01-18 17:30:00 -08001586 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1587 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301588
Felix Fietkau236de512011-09-03 01:40:25 +02001589 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001590 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301591 return;
1592
1593 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001594 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301595
Ben Greear7755bad2011-01-18 17:30:00 -08001596 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1597 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1598 list_del(&ac->list);
1599 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301600
Ben Greear7755bad2011-01-18 17:30:00 -08001601 while (!list_empty(&ac->tid_q)) {
1602 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1603 list);
1604 list_del(&tid->list);
1605 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301606
Ben Greear7755bad2011-01-18 17:30:00 -08001607 if (tid->paused)
1608 continue;
Sujithe8324352009-01-16 21:38:42 +05301609
Ben Greear7755bad2011-01-18 17:30:00 -08001610 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301611
Ben Greear7755bad2011-01-18 17:30:00 -08001612 /*
1613 * add tid to round-robin queue if more frames
1614 * are pending for the tid
1615 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001616 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001617 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301618
Ben Greear7755bad2011-01-18 17:30:00 -08001619 if (tid == last_tid ||
1620 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1621 break;
Sujithe8324352009-01-16 21:38:42 +05301622 }
Ben Greear7755bad2011-01-18 17:30:00 -08001623
Felix Fietkaub0477012011-12-14 22:08:05 +01001624 if (!list_empty(&ac->tid_q) && !ac->sched) {
1625 ac->sched = true;
1626 list_add_tail(&ac->list, &txq->axq_acq);
Ben Greear7755bad2011-01-18 17:30:00 -08001627 }
1628
1629 if (ac == last_ac ||
1630 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1631 return;
Sujithe8324352009-01-16 21:38:42 +05301632 }
1633}
1634
Sujithe8324352009-01-16 21:38:42 +05301635/***********/
1636/* TX, DMA */
1637/***********/
1638
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001639/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001640 * Insert a chain of ath_buf (descriptors) on a txq and
1641 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001642 */
Sujith102e0572008-10-29 10:15:16 +05301643static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001644 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001645{
Sujithcbe61d82009-02-09 13:27:12 +05301646 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001647 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001648 struct ath_buf *bf, *bf_last;
1649 bool puttxbuf = false;
1650 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301651
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001652 /*
1653 * Insert the frame on the outbound list and
1654 * pass it on to the hardware.
1655 */
1656
1657 if (list_empty(head))
1658 return;
1659
Felix Fietkaufce041b2011-05-19 12:20:25 +02001660 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001661 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001662 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001663
Joe Perchesd2182b62011-12-15 14:55:53 -08001664 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1665 txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001666
Felix Fietkaufce041b2011-05-19 12:20:25 +02001667 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1668 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001669 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001670 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001671 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001672 list_splice_tail_init(head, &txq->axq_q);
1673
Felix Fietkaufce041b2011-05-19 12:20:25 +02001674 if (txq->axq_link) {
1675 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perchesd2182b62011-12-15 14:55:53 -08001676 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
Joe Perches226afe62010-12-02 19:12:37 -08001677 txq->axq_qnum, txq->axq_link,
1678 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001679 } else if (!edma)
1680 puttxbuf = true;
1681
1682 txq->axq_link = bf_last->bf_desc;
1683 }
1684
1685 if (puttxbuf) {
1686 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1687 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perchesd2182b62011-12-15 14:55:53 -08001688 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
Felix Fietkaufce041b2011-05-19 12:20:25 +02001689 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1690 }
1691
1692 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001693 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001694 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001695 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001696
1697 if (!internal) {
1698 txq->axq_depth++;
1699 if (bf_is_ampdu_not_probing(bf))
1700 txq->axq_ampdu_depth++;
1701 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001702}
1703
Sujithe8324352009-01-16 21:38:42 +05301704static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001705 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301706{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001707 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001708 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001709 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301710
1711 /*
1712 * Do not queue to h/w when any of the following conditions is true:
1713 * - there are pending frames in software queue
1714 * - the TID is currently paused for ADDBA/BAR request
1715 * - seqno is not within block-ack window
1716 * - h/w queue depth exceeds low water mark
1717 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001718 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001719 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001720 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001721 /*
Sujithe8324352009-01-16 21:38:42 +05301722 * Add this frame to software queue for scheduling later
1723 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001724 */
Ben Greearbda8add2011-01-09 23:11:48 -08001725 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001726 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001727 if (!txctl->an || !txctl->an->sleeping)
1728 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301729 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001730 }
1731
Felix Fietkau44f1d262011-08-28 00:32:25 +02001732 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1733 if (!bf)
1734 return;
1735
Felix Fietkau399c6482011-09-14 21:24:17 +02001736 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001737 INIT_LIST_HEAD(&bf_head);
1738 list_add(&bf->list, &bf_head);
1739
Sujithe8324352009-01-16 21:38:42 +05301740 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001741 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301742
1743 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001744 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301745 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001746 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001747 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301748}
1749
Felix Fietkau82b873a2010-11-11 03:18:37 +01001750static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001751 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001752{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001753 struct ath_frame_info *fi = get_frame_info(skb);
1754 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301755 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001756
Felix Fietkau44f1d262011-08-28 00:32:25 +02001757 bf = fi->bf;
1758 if (!bf)
1759 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1760
1761 if (!bf)
1762 return;
1763
1764 INIT_LIST_HEAD(&bf_head);
1765 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001766 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301767
Sujithd43f30152009-01-16 21:38:53 +05301768 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001769 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001770 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301771 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001772}
1773
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001774static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1775 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301776{
1777 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001778 struct ieee80211_sta *sta = tx_info->control.sta;
1779 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001780 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001781 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001782 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001783 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301784
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001785 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301786
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001787 if (sta)
1788 an = (struct ath_node *) sta->drv_priv;
1789
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001790 memset(fi, 0, sizeof(*fi));
1791 if (hw_key)
1792 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001793 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1794 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001795 else
1796 fi->keyix = ATH9K_TXKEYIX_INVALID;
1797 fi->keytype = keytype;
1798 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301799}
1800
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301801u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1802{
1803 struct ath_hw *ah = sc->sc_ah;
1804 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301805 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1806 (curchan->channelFlags & CHANNEL_5GHZ) &&
1807 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301808 return 0x3;
1809 else
1810 return chainmask;
1811}
1812
Felix Fietkau44f1d262011-08-28 00:32:25 +02001813/*
1814 * Assign a descriptor (and sequence number if necessary,
1815 * and map buffer for DMA. Frees skb on error
1816 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001817static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001818 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001819 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001820 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301821{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001822 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001823 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001824 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001825 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001826 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001827
1828 bf = ath_tx_get_buffer(sc);
1829 if (!bf) {
Joe Perchesd2182b62011-12-15 14:55:53 -08001830 ath_dbg(common, XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001831 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001832 }
Sujithe8324352009-01-16 21:38:42 +05301833
Sujithe8324352009-01-16 21:38:42 +05301834 ATH_TXBUF_RESET(bf);
1835
Felix Fietkaufa05f872011-08-28 00:32:24 +02001836 if (tid) {
1837 seqno = tid->seq_next;
1838 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1839 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1840 bf->bf_state.seqno = seqno;
1841 }
1842
Sujithe8324352009-01-16 21:38:42 +05301843 bf->bf_mpdu = skb;
1844
Ben Greearc1739eb2010-10-14 12:45:29 -07001845 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1846 skb->len, DMA_TO_DEVICE);
1847 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301848 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001849 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001850 ath_err(ath9k_hw_common(sc->sc_ah),
1851 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001852 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001853 goto error;
Sujithe8324352009-01-16 21:38:42 +05301854 }
1855
Felix Fietkau56dc6332011-08-28 00:32:22 +02001856 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001857
1858 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001859
1860error:
1861 dev_kfree_skb_any(skb);
1862 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001863}
1864
1865/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001866static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001867 struct ath_tx_control *txctl)
1868{
Felix Fietkau04caf862010-11-14 15:20:12 +01001869 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1870 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001871 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001872 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001873 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301874
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301875 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1876 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001877 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1878 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001879 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001880
Felix Fietkau066dae92010-11-07 14:59:39 +01001881 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001882 }
1883
1884 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001885 /*
1886 * Try aggregation if it's a unicast data frame
1887 * and the destination is HT capable.
1888 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001889 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301890 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001891 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1892 if (!bf)
Felix Fietkau3ad29522011-12-14 22:08:07 +01001893 return;
Felix Fietkau04caf862010-11-14 15:20:12 +01001894
Felix Fietkau82b873a2010-11-11 03:18:37 +01001895 bf->bf_state.bfs_paprd = txctl->paprd;
1896
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301897 if (txctl->paprd)
1898 bf->bf_state.bfs_paprd_timestamp = jiffies;
1899
Felix Fietkau44f1d262011-08-28 00:32:25 +02001900 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301901 }
Sujithe8324352009-01-16 21:38:42 +05301902}
1903
1904/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001905int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301906 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001907{
Felix Fietkau28d16702010-11-14 15:20:10 +01001908 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1909 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001910 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001911 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001912 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001913 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001914 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001915 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001916 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001917
Ben Greeara9927ba2010-12-06 21:13:49 -08001918 /* NOTE: sta can be NULL according to net/mac80211.h */
1919 if (sta)
1920 txctl->an = (struct ath_node *)sta->drv_priv;
1921
Felix Fietkau04caf862010-11-14 15:20:12 +01001922 if (info->control.hw_key)
1923 frmlen += info->control.hw_key->icv_len;
1924
Felix Fietkau28d16702010-11-14 15:20:10 +01001925 /*
1926 * As a temporary workaround, assign seq# here; this will likely need
1927 * to be cleaned up to work better with Beacon transmission and virtual
1928 * BSSes.
1929 */
1930 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1931 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1932 sc->tx.seq_no += 0x10;
1933 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1934 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1935 }
1936
John W. Linville42cecc32011-09-19 15:42:31 -04001937 /* Add the padding after the header if this is not already done */
1938 padpos = ath9k_cmn_padpos(hdr->frame_control);
1939 padsize = padpos & 3;
1940 if (padsize && skb->len > padpos) {
1941 if (skb_headroom(skb) < padsize)
1942 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001943
John W. Linville42cecc32011-09-19 15:42:31 -04001944 skb_push(skb, padsize);
1945 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc42011-09-15 10:03:12 +02001946 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001947 }
1948
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001949 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1950 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1951 !ieee80211_is_data(hdr->frame_control))
1952 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1953
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001954 setup_frame_info(hw, skb, frmlen);
1955
1956 /*
1957 * At this point, the vif, hw_key and sta pointers in the tx control
1958 * info are no longer valid (overwritten by the ath_frame_info data.
1959 */
1960
Felix Fietkau066dae92010-11-07 14:59:39 +01001961 q = skb_get_queue_mapping(skb);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001962
1963 ath_txq_lock(sc, txq);
Felix Fietkau066dae92010-11-07 14:59:39 +01001964 if (txq == sc->tx.txq_map[q] &&
1965 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001966 ieee80211_stop_queue(sc->hw, q);
Rusty Russell3db1cd52011-12-19 13:56:45 +00001967 txq->stopped = true;
Felix Fietkau97923b12010-06-12 00:33:55 -04001968 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001969
Felix Fietkau44f1d262011-08-28 00:32:25 +02001970 ath_tx_start_dma(sc, skb, txctl);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001971
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001972 ath_txq_unlock(sc, txq);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001973
Felix Fietkau44f1d262011-08-28 00:32:25 +02001974 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001975}
1976
Sujithe8324352009-01-16 21:38:42 +05301977/*****************/
1978/* TX Completion */
1979/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001980
Sujithe8324352009-01-16 21:38:42 +05301981static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301982 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001983{
Sujithe8324352009-01-16 21:38:42 +05301984 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001985 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001986 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001987 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301988
Joe Perchesd2182b62011-12-15 14:55:53 -08001989 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301990
Felix Fietkau55797b12011-09-14 21:24:16 +02001991 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301992 /* Frame was ACKed */
1993 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301994
John W. Linville42cecc32011-09-19 15:42:31 -04001995 padpos = ath9k_cmn_padpos(hdr->frame_control);
1996 padsize = padpos & 3;
1997 if (padsize && skb->len>padpos+padsize) {
1998 /*
1999 * Remove MAC header padding before giving the frame back to
2000 * mac80211.
2001 */
2002 memmove(skb->data + padsize, skb->data, padpos);
2003 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05302004 }
2005
Felix Fietkauc8e88682011-11-16 13:08:40 +01002006 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05302007 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perchesd2182b62011-12-15 14:55:53 -08002008 ath_dbg(common, PS,
Joe Perches226afe62010-12-02 19:12:37 -08002009 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05302010 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2011 PS_WAIT_FOR_CAB |
2012 PS_WAIT_FOR_PSPOLL_DATA |
2013 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03002014 }
2015
Felix Fietkau7545daf2011-01-24 19:23:16 +01002016 q = skb_get_queue_mapping(skb);
2017 if (txq == sc->tx.txq_map[q]) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01002018 if (WARN_ON(--txq->pending_frames < 0))
2019 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01002020
Felix Fietkau7545daf2011-01-24 19:23:16 +01002021 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2022 ieee80211_wake_queue(sc->hw, q);
Rusty Russell3db1cd52011-12-19 13:56:45 +00002023 txq->stopped = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002024 }
Felix Fietkau97923b12010-06-12 00:33:55 -04002025 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002026
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002027 __skb_queue_tail(&txq->complete_q, skb);
Sujithe8324352009-01-16 21:38:42 +05302028}
2029
2030static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002031 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +01002032 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05302033{
2034 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002035 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05302036 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302037 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302038
Felix Fietkau55797b12011-09-14 21:24:16 +02002039 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302040 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302041
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002042 if (ts->ts_status & ATH9K_TXERR_FILT)
2043 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2044
Ben Greearc1739eb2010-10-14 12:45:29 -07002045 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002046 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002047
2048 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302049 if (time_after(jiffies,
2050 bf->bf_state.bfs_paprd_timestamp +
2051 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002052 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002053 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002054 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002055 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002056 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302057 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002058 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002059 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2060 * accidentally reference it later.
2061 */
2062 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302063
2064 /*
2065 * Return the list of ath_buf of this mpdu to free queue
2066 */
2067 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2068 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2069 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2070}
2071
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002072static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2073 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002074 int txok)
Sujithc4288392008-11-18 09:09:30 +05302075{
Sujitha22be222009-03-30 15:28:36 +05302076 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302077 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302078 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002079 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002080 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302081 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302082
Sujith95e4acb2009-03-13 08:56:09 +05302083 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002084 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302085
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002086 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302087 WARN_ON(tx_rateindex >= hw->max_rates);
2088
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002089 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002090 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302091
Felix Fietkaub572d032010-11-14 15:20:07 +01002092 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002093 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302094 tx_info->status.ampdu_len = nframes;
2095 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002096
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002097 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002098 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002099 /*
2100 * If an underrun error is seen assume it as an excessive
2101 * retry only if max frame trigger level has been reached
2102 * (2 KB for single stream, and 4 KB for dual stream).
2103 * Adjust the long retry as if the frame was tried
2104 * hw->max_rate_tries times to affect how rate control updates
2105 * PER for the failed rate.
2106 * In case of congestion on the bus penalizing this type of
2107 * underruns should help hardware actually transmit new frames
2108 * successfully by eventually preferring slower rates.
2109 * This itself should also alleviate congestion on the bus.
2110 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002111 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2112 ATH9K_TX_DELIM_UNDERRUN)) &&
2113 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002114 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002115 tx_info->status.rates[tx_rateindex].count =
2116 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302117 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302118
Felix Fietkau545750d2009-11-23 22:21:01 +01002119 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302120 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002121 tx_info->status.rates[i].idx = -1;
2122 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302123
Felix Fietkau78c46532010-06-25 01:26:16 +02002124 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302125}
2126
Felix Fietkaufce041b2011-05-19 12:20:25 +02002127static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2128 struct ath_tx_status *ts, struct ath_buf *bf,
2129 struct list_head *bf_head)
2130{
2131 int txok;
2132
2133 txq->axq_depth--;
2134 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2135 txq->axq_tx_inprogress = false;
2136 if (bf_is_ampdu_not_probing(bf))
2137 txq->axq_ampdu_depth--;
2138
Felix Fietkaufce041b2011-05-19 12:20:25 +02002139 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002140 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkau156369f2011-12-14 22:08:04 +01002141 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002142 } else
2143 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2144
Felix Fietkaufce041b2011-05-19 12:20:25 +02002145 if (sc->sc_flags & SC_OP_TXAGGR)
2146 ath_txq_schedule(sc, txq);
2147}
2148
Sujithc4288392008-11-18 09:09:30 +05302149static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002150{
Sujithcbe61d82009-02-09 13:27:12 +05302151 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002152 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002153 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2154 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302155 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002156 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002157 int status;
2158
Joe Perchesd2182b62011-12-15 14:55:53 -08002159 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
Joe Perches226afe62010-12-02 19:12:37 -08002160 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2161 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002163 ath_txq_lock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002164 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002165 if (work_pending(&sc->hw_reset_work))
2166 break;
2167
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168 if (list_empty(&txq->axq_q)) {
2169 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002170 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002171 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002172 break;
2173 }
2174 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2175
2176 /*
2177 * There is a race condition that a BH gets scheduled
2178 * after sw writes TxE and before hw re-load the last
2179 * descriptor to get the newly chained one.
2180 * Software must keep the last DONE descriptor as a
2181 * holding descriptor - software does so by marking
2182 * it with the STALE flag.
2183 */
2184 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302185 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002187 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002188 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002189
2190 bf = list_entry(bf_held->list.next, struct ath_buf,
2191 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 }
2193
2194 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302195 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002196
Felix Fietkau29bffa92010-03-29 20:14:23 -07002197 memset(&ts, 0, sizeof(ts));
2198 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002199 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002200 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002201
Ben Greear2dac4fb2011-01-09 23:11:45 -08002202 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002203
2204 /*
2205 * Remove ath_buf's of the same transmit unit from txq,
2206 * however leave the last descriptor back as the holding
2207 * descriptor for hw.
2208 */
Sujitha119cc42009-03-30 15:28:38 +05302209 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002210 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002211 if (!list_is_singular(&lastbf->list))
2212 list_cut_position(&bf_head,
2213 &txq->axq_q, lastbf->list.prev);
2214
Felix Fietkaufce041b2011-05-19 12:20:25 +02002215 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002216 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002217 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002218 }
Johannes Berge6a98542008-10-21 12:40:02 +02002219
Felix Fietkaufce041b2011-05-19 12:20:25 +02002220 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002221 }
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002222 ath_txq_unlock_complete(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002223}
2224
Sujith305fe472009-07-23 15:32:29 +05302225static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002226{
2227 struct ath_softc *sc = container_of(work, struct ath_softc,
2228 tx_complete_work.work);
2229 struct ath_txq *txq;
2230 int i;
2231 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002232#ifdef CONFIG_ATH9K_DEBUGFS
2233 sc->tx_complete_poll_work_seen++;
2234#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002235
2236 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2237 if (ATH_TXQ_SETUP(sc, i)) {
2238 txq = &sc->tx.txq[i];
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002239 ath_txq_lock(sc, txq);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002240 if (txq->axq_depth) {
2241 if (txq->axq_tx_inprogress) {
2242 needreset = true;
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002243 ath_txq_unlock(sc, txq);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002244 break;
2245 } else {
2246 txq->axq_tx_inprogress = true;
2247 }
2248 }
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002249 ath_txq_unlock_complete(sc, txq);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002250 }
2251
2252 if (needreset) {
Joe Perchesd2182b62011-12-15 14:55:53 -08002253 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
Joe Perches226afe62010-12-02 19:12:37 -08002254 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002255 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002256 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002257 }
2258
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002259 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002260 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2261}
2262
2263
Sujithe8324352009-01-16 21:38:42 +05302264
2265void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002266{
Sujithe8324352009-01-16 21:38:42 +05302267 int i;
2268 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002269
Sujithe8324352009-01-16 21:38:42 +05302270 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002271
2272 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302273 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2274 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002275 }
2276}
2277
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002278void ath_tx_edma_tasklet(struct ath_softc *sc)
2279{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002281 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2282 struct ath_hw *ah = sc->sc_ah;
2283 struct ath_txq *txq;
2284 struct ath_buf *bf, *lastbf;
2285 struct list_head bf_head;
2286 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002287
2288 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002289 if (work_pending(&sc->hw_reset_work))
2290 break;
2291
Felix Fietkaufce041b2011-05-19 12:20:25 +02002292 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002293 if (status == -EINPROGRESS)
2294 break;
2295 if (status == -EIO) {
Joe Perchesd2182b62011-12-15 14:55:53 -08002296 ath_dbg(common, XMIT, "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002297 break;
2298 }
2299
2300 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002301 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002302 continue;
2303
Felix Fietkaufce041b2011-05-19 12:20:25 +02002304 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002305
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002306 ath_txq_lock(sc, txq);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002307
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002308 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002309 ath_txq_unlock(sc, txq);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002310 return;
2311 }
2312
2313 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2314 struct ath_buf, list);
2315 lastbf = bf->bf_lastbf;
2316
2317 INIT_LIST_HEAD(&bf_head);
2318 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2319 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002320
Felix Fietkaufce041b2011-05-19 12:20:25 +02002321 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2322 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002323
Felix Fietkaufce041b2011-05-19 12:20:25 +02002324 if (!list_empty(&txq->axq_q)) {
2325 struct list_head bf_q;
2326
2327 INIT_LIST_HEAD(&bf_q);
2328 txq->axq_link = NULL;
2329 list_splice_tail_init(&txq->axq_q, &bf_q);
2330 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2331 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002332 }
2333
Felix Fietkaufce041b2011-05-19 12:20:25 +02002334 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002335 ath_txq_unlock_complete(sc, txq);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002336 }
2337}
2338
Sujithe8324352009-01-16 21:38:42 +05302339/*****************/
2340/* Init, Cleanup */
2341/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002343static int ath_txstatus_setup(struct ath_softc *sc, int size)
2344{
2345 struct ath_descdma *dd = &sc->txsdma;
2346 u8 txs_len = sc->sc_ah->caps.txs_len;
2347
2348 dd->dd_desc_len = size * txs_len;
2349 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2350 &dd->dd_desc_paddr, GFP_KERNEL);
2351 if (!dd->dd_desc)
2352 return -ENOMEM;
2353
2354 return 0;
2355}
2356
2357static int ath_tx_edma_init(struct ath_softc *sc)
2358{
2359 int err;
2360
2361 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2362 if (!err)
2363 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2364 sc->txsdma.dd_desc_paddr,
2365 ATH_TXSTATUS_RING_SIZE);
2366
2367 return err;
2368}
2369
2370static void ath_tx_edma_cleanup(struct ath_softc *sc)
2371{
2372 struct ath_descdma *dd = &sc->txsdma;
2373
2374 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2375 dd->dd_desc_paddr);
2376}
2377
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002378int ath_tx_init(struct ath_softc *sc, int nbufs)
2379{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002380 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381 int error = 0;
2382
Sujith797fe5c2009-03-30 15:28:45 +05302383 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384
Sujith797fe5c2009-03-30 15:28:45 +05302385 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002386 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302387 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002388 ath_err(common,
2389 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302390 goto err;
2391 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Sujith797fe5c2009-03-30 15:28:45 +05302393 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002394 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302395 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002396 ath_err(common,
2397 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302398 goto err;
2399 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002400
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002401 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2402
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002403 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2404 error = ath_tx_edma_init(sc);
2405 if (error)
2406 goto err;
2407 }
2408
Sujith797fe5c2009-03-30 15:28:45 +05302409err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410 if (error != 0)
2411 ath_tx_cleanup(sc);
2412
2413 return error;
2414}
2415
Sujith797fe5c2009-03-30 15:28:45 +05302416void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417{
Sujithb77f4832008-12-07 21:44:03 +05302418 if (sc->beacon.bdma.dd_desc_len != 0)
2419 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420
Sujithb77f4832008-12-07 21:44:03 +05302421 if (sc->tx.txdma.dd_desc_len != 0)
2422 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002423
2424 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2425 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002426}
2427
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2429{
Sujithc5170162008-10-29 10:13:59 +05302430 struct ath_atx_tid *tid;
2431 struct ath_atx_ac *ac;
2432 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433
Sujith8ee5afb2008-12-07 21:43:36 +05302434 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302435 tidno < WME_NUM_TID;
2436 tidno++, tid++) {
2437 tid->an = an;
2438 tid->tidno = tidno;
2439 tid->seq_start = tid->seq_next = 0;
2440 tid->baw_size = WME_MAX_BA;
2441 tid->baw_head = tid->baw_tail = 0;
2442 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302443 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302444 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002445 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302446 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302447 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302448 tid->state &= ~AGGR_ADDBA_COMPLETE;
2449 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302450 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002451
Sujith8ee5afb2008-12-07 21:43:36 +05302452 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302453 acno < WME_NUM_AC; acno++, ac++) {
2454 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002455 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302456 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002457 }
2458}
2459
Sujithb5aa9bf2008-10-29 10:13:31 +05302460void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002461{
Felix Fietkau2b409942010-07-07 19:42:08 +02002462 struct ath_atx_ac *ac;
2463 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002464 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002465 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302466
Felix Fietkau2b409942010-07-07 19:42:08 +02002467 for (tidno = 0, tid = &an->tid[tidno];
2468 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002469
Felix Fietkau2b409942010-07-07 19:42:08 +02002470 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002471 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002472
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002473 ath_txq_lock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002474
Felix Fietkau2b409942010-07-07 19:42:08 +02002475 if (tid->sched) {
2476 list_del(&tid->list);
2477 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002478 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002479
2480 if (ac->sched) {
2481 list_del(&ac->list);
2482 tid->ac->sched = false;
2483 }
2484
2485 ath_tid_drain(sc, txq, tid);
2486 tid->state &= ~AGGR_ADDBA_COMPLETE;
2487 tid->state &= ~AGGR_CLEANUP;
2488
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002489 ath_txq_unlock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002490 }
2491}