blob: 23e80e63bca9f2b10b2432a38a87d727e990606c [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +010056 struct ath_tx_status *ts, int txok);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Felix Fietkau156369f2011-12-14 22:08:04 +0100153static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
154{
155 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
156 seqno << IEEE80211_SEQ_SEQ_SHIFT);
157}
158
Sujithe8324352009-01-16 21:38:42 +0530159static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
160{
Felix Fietkau066dae92010-11-07 14:59:39 +0100161 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200162 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530163 struct ath_buf *bf;
164 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 struct ath_frame_info *fi;
Felix Fietkau156369f2011-12-14 22:08:04 +0100167 bool sendbar = false;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200168
Sujithe8324352009-01-16 21:38:42 +0530169 INIT_LIST_HEAD(&bf_head);
170
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530172
Felix Fietkau56dc6332011-08-28 00:32:22 +0200173 while ((skb = __skb_dequeue(&tid->buf_q))) {
174 fi = get_frame_info(skb);
175 bf = fi->bf;
176
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 if (bf && fi->retries) {
178 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200179 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100180 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
181 sendbar = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200182 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200183 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200184 }
Sujithe8324352009-01-16 21:38:42 +0530185 }
186
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500187 if (tid->baw_head == tid->baw_tail) {
188 tid->state &= ~AGGR_ADDBA_COMPLETE;
189 tid->state &= ~AGGR_CLEANUP;
190 }
191
Felix Fietkau156369f2011-12-14 22:08:04 +0100192 if (sendbar)
193 ath_send_bar(tid, tid->seq_start);
Sujithe8324352009-01-16 21:38:42 +0530194}
195
196static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 int seqno)
198{
199 int index, cindex;
200
201 index = ATH_BA_INDEX(tid->seq_start, seqno);
202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
203
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200204 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530205
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200206 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530207 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
208 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
Felix Fietkauf9437542011-12-14 22:08:08 +0100209 if (tid->bar_index >= 0)
210 tid->bar_index--;
Sujithe8324352009-01-16 21:38:42 +0530211 }
212}
213
214static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100215 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530216{
217 int index, cindex;
218
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100219 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530220 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200221 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530222
223 if (index >= ((tid->baw_tail - tid->baw_head) &
224 (ATH_TID_MAX_BUFS - 1))) {
225 tid->baw_tail = cindex;
226 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
227 }
228}
229
230/*
231 * TODO: For frame(s) that are in the retry state, we will reuse the
232 * sequence number(s) without setting the retry bit. The
233 * alternative is to give up on these and BAR the receiver's window
234 * forward.
235 */
236static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
237 struct ath_atx_tid *tid)
238
239{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200240 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530241 struct ath_buf *bf;
242 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700243 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100244 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700245
246 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530247 INIT_LIST_HEAD(&bf_head);
248
Felix Fietkau56dc6332011-08-28 00:32:22 +0200249 while ((skb = __skb_dequeue(&tid->buf_q))) {
250 fi = get_frame_info(skb);
251 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530252
Felix Fietkau44f1d262011-08-28 00:32:25 +0200253 if (!bf) {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200254 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200255 continue;
256 }
257
Felix Fietkau56dc6332011-08-28 00:32:22 +0200258 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530259
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100260 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200261 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530262
Felix Fietkau156369f2011-12-14 22:08:04 +0100263 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +0530264 }
265
266 tid->seq_next = tid->seq_start;
267 tid->baw_tail = tid->baw_head;
Felix Fietkauf9437542011-12-14 22:08:08 +0100268 tid->bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530269}
270
Sujithfec247c2009-07-27 12:08:16 +0530271static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100272 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530273{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100274 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200275 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530276 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100277 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530278
Sujithfec247c2009-07-27 12:08:16 +0530279 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100280 fi->retries += count;
281
282 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100283 return;
Sujithe8324352009-01-16 21:38:42 +0530284
Sujithe8324352009-01-16 21:38:42 +0530285 hdr = (struct ieee80211_hdr *)skb->data;
286 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200287 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
288 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530289}
290
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200291static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
292{
293 struct ath_buf *bf = NULL;
294
295 spin_lock_bh(&sc->tx.txbuflock);
296
297 if (unlikely(list_empty(&sc->tx.txbuf))) {
298 spin_unlock_bh(&sc->tx.txbuflock);
299 return NULL;
300 }
301
302 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
303 list_del(&bf->list);
304
305 spin_unlock_bh(&sc->tx.txbuflock);
306
307 return bf;
308}
309
310static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
311{
312 spin_lock_bh(&sc->tx.txbuflock);
313 list_add_tail(&bf->list, &sc->tx.txbuf);
314 spin_unlock_bh(&sc->tx.txbuflock);
315}
316
Sujithd43f30152009-01-16 21:38:53 +0530317static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
318{
319 struct ath_buf *tbf;
320
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200321 tbf = ath_tx_get_buffer(sc);
322 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530323 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530324
325 ATH_TXBUF_RESET(tbf);
326
327 tbf->bf_mpdu = bf->bf_mpdu;
328 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400329 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530330 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530331
332 return tbf;
333}
334
Felix Fietkaub572d032010-11-14 15:20:07 +0100335static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
336 struct ath_tx_status *ts, int txok,
337 int *nframes, int *nbad)
338{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100339 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100340 u16 seq_st = 0;
341 u32 ba[WME_BA_BMP_SIZE >> 5];
342 int ba_index;
343 int isaggr = 0;
344
345 *nbad = 0;
346 *nframes = 0;
347
Felix Fietkaub572d032010-11-14 15:20:07 +0100348 isaggr = bf_isaggr(bf);
349 if (isaggr) {
350 seq_st = ts->ts_seqnum;
351 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
352 }
353
354 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100355 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200356 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100357
358 (*nframes)++;
359 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
360 (*nbad)++;
361
362 bf = bf->bf_next;
363 }
364}
365
366
Sujithd43f30152009-01-16 21:38:53 +0530367static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
368 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100369 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530370{
371 struct ath_node *an = NULL;
372 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530373 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100374 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530375 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800376 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530377 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530378 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200379 struct list_head bf_head;
380 struct sk_buff_head bf_pending;
Felix Fietkau156369f2011-12-14 22:08:04 +0100381 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
Sujithe8324352009-01-16 21:38:42 +0530382 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530383 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
384 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200385 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100386 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200387 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100388 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200389 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100390 int i, retries;
Felix Fietkau156369f2011-12-14 22:08:04 +0100391 int bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530392
Sujitha22be222009-03-30 15:28:36 +0530393 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530394 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530395
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800396 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800397
Felix Fietkau78c46532010-06-25 01:26:16 +0200398 memcpy(rates, tx_info->control.rates, sizeof(rates));
399
Felix Fietkauda647622011-12-14 22:08:03 +0100400 retries = ts->ts_longretry + 1;
401 for (i = 0; i < ts->ts_rateindex; i++)
402 retries += rates[i].count;
403
Sujith1286ec62009-01-27 13:30:37 +0530404 rcu_read_lock();
405
Ben Greear686b9cb2010-09-23 09:44:36 -0700406 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530407 if (!sta) {
408 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200409
Felix Fietkau31e79a52010-07-12 23:16:34 +0200410 INIT_LIST_HEAD(&bf_head);
411 while (bf) {
412 bf_next = bf->bf_next;
413
Felix Fietkaufce041b2011-05-19 12:20:25 +0200414 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200415 list_move_tail(&bf->list, &bf_head);
416
Felix Fietkau156369f2011-12-14 22:08:04 +0100417 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200418
419 bf = bf_next;
420 }
Sujith1286ec62009-01-27 13:30:37 +0530421 return;
Sujithe8324352009-01-16 21:38:42 +0530422 }
423
Sujith1286ec62009-01-27 13:30:37 +0530424 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100425 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
426 tid = ATH_AN_2_TID(an, tidno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100427 seq_first = tid->seq_start;
Sujith1286ec62009-01-27 13:30:37 +0530428
Felix Fietkaub11b1602010-07-11 12:48:44 +0200429 /*
430 * The hardware occasionally sends a tx status for the wrong TID.
431 * In this case, the BA status cannot be considered valid and all
432 * subframes need to be retransmitted
433 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100434 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200435 txok = false;
436
Sujithe8324352009-01-16 21:38:42 +0530437 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530438 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530439
Sujithd43f30152009-01-16 21:38:53 +0530440 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700441 if (ts->ts_flags & ATH9K_TX_BA) {
442 seq_st = ts->ts_seqnum;
443 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530444 } else {
Sujithd43f30152009-01-16 21:38:53 +0530445 /*
446 * AR5416 can become deaf/mute when BA
447 * issue happens. Chip needs to be reset.
448 * But AP code may have sychronization issues
449 * when perform internal reset in this routine.
450 * Only enable reset in STA mode for now.
451 */
Sujith2660b812009-02-09 13:27:26 +0530452 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530453 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530454 }
455 }
456
Felix Fietkau56dc6332011-08-28 00:32:22 +0200457 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530458
Felix Fietkaub572d032010-11-14 15:20:07 +0100459 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530460 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200461 u16 seqno = bf->bf_state.seqno;
462
Felix Fietkauf0b82202011-01-15 14:30:15 +0100463 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530464 bf_next = bf->bf_next;
465
Felix Fietkau78c46532010-06-25 01:26:16 +0200466 skb = bf->bf_mpdu;
467 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100468 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200469
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200470 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530471 /* transmit completion, subframe is
472 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530473 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530474 } else if (!isaggr && txok) {
475 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530476 acked_cnt++;
Felix Fietkaub0477012011-12-14 22:08:05 +0100477 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
478 /*
479 * cleanup in progress, just fail
480 * the un-acked sub-frames
481 */
482 txfail = 1;
483 } else if (flush) {
484 txpending = 1;
485 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
486 if (txok || !an->sleeping)
487 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
488 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200489
Felix Fietkaub0477012011-12-14 22:08:05 +0100490 txpending = 1;
491 } else {
492 txfail = 1;
493 txfail_cnt++;
494 bar_index = max_t(int, bar_index,
495 ATH_BA_INDEX(seq_first, seqno));
Sujithe8324352009-01-16 21:38:42 +0530496 }
497
Felix Fietkaufce041b2011-05-19 12:20:25 +0200498 /*
499 * Make sure the last desc is reclaimed if it
500 * not a holding desc.
501 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200502 INIT_LIST_HEAD(&bf_head);
503 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
504 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530505 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530506
Felix Fietkau90fa5392010-09-20 13:45:38 +0200507 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530508 /*
509 * complete the acked-ones/xretried ones; update
510 * block-ack window
511 */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200512 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530513
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530514 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200515 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200516 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530517 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530518 }
519
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700520 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
Felix Fietkau156369f2011-12-14 22:08:04 +0100521 !txfail);
Sujithe8324352009-01-16 21:38:42 +0530522 } else {
Sujithd43f30152009-01-16 21:38:53 +0530523 /* retry the un-acked ones */
Felix Fietkaub0477012011-12-14 22:08:05 +0100524 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
525 bf->bf_next == NULL && bf_last->bf_stale) {
526 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530527
Felix Fietkaub0477012011-12-14 22:08:05 +0100528 tbf = ath_clone_txbuf(sc, bf_last);
529 /*
530 * Update tx baw and complete the
531 * frame with failed status if we
532 * run out of tx buf.
533 */
534 if (!tbf) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100535 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400536
Felix Fietkaub0477012011-12-14 22:08:05 +0100537 ath_tx_complete_buf(sc, bf, txq,
538 &bf_head, ts, 0);
539 bar_index = max_t(int, bar_index,
540 ATH_BA_INDEX(seq_first, seqno));
541 break;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400542 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100543
544 fi->bf = tbf;
Sujithe8324352009-01-16 21:38:42 +0530545 }
546
547 /*
548 * Put this buffer to the temporary pending
549 * queue to retain ordering
550 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200551 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530552 }
553
554 bf = bf_next;
555 }
556
Felix Fietkauf9437542011-12-14 22:08:08 +0100557 if (bar_index >= 0) {
558 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
Felix Fietkau156369f2011-12-14 22:08:04 +0100559 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
Felix Fietkauf9437542011-12-14 22:08:08 +0100560 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
561 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
562 }
Felix Fietkau156369f2011-12-14 22:08:04 +0100563
Felix Fietkau4cee7862010-07-23 03:53:16 +0200564 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200565 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200566 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200567 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200568
Felix Fietkau56dc6332011-08-28 00:32:22 +0200569 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200570 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600571 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200572
573 if (ts->ts_status & ATH9K_TXERR_FILT)
574 tid->ac->clear_ps_filter = true;
575 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200576 }
577
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500578 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200579 ath_tx_flush_tid(sc, tid);
580
Sujith1286ec62009-01-27 13:30:37 +0530581 rcu_read_unlock();
582
Felix Fietkau030d6292011-10-07 02:28:13 +0200583 if (needreset) {
584 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200585 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200586 }
Sujithe8324352009-01-16 21:38:42 +0530587}
588
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530589static bool ath_lookup_legacy(struct ath_buf *bf)
590{
591 struct sk_buff *skb;
592 struct ieee80211_tx_info *tx_info;
593 struct ieee80211_tx_rate *rates;
594 int i;
595
596 skb = bf->bf_mpdu;
597 tx_info = IEEE80211_SKB_CB(skb);
598 rates = tx_info->control.rates;
599
Felix Fietkau059ee092011-08-27 10:25:27 +0200600 for (i = 0; i < 4; i++) {
601 if (!rates[i].count || rates[i].idx < 0)
602 break;
603
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530604 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
605 return true;
606 }
607
608 return false;
609}
610
Sujithe8324352009-01-16 21:38:42 +0530611static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
612 struct ath_atx_tid *tid)
613{
Sujithe8324352009-01-16 21:38:42 +0530614 struct sk_buff *skb;
615 struct ieee80211_tx_info *tx_info;
616 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530617 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530618 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530619 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530620 int i;
621
Sujitha22be222009-03-30 15:28:36 +0530622 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530623 tx_info = IEEE80211_SKB_CB(skb);
624 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530625
626 /*
627 * Find the lowest frame length among the rate series that will have a
628 * 4ms transmit duration.
629 * TODO - TXOP limit needs to be considered.
630 */
631 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
632
633 for (i = 0; i < 4; i++) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100634 int modeidx;
Sujithe8324352009-01-16 21:38:42 +0530635
Felix Fietkaub0477012011-12-14 22:08:05 +0100636 if (!rates[i].count)
637 continue;
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200638
Felix Fietkaub0477012011-12-14 22:08:05 +0100639 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
640 legacy = 1;
641 break;
Sujithe8324352009-01-16 21:38:42 +0530642 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100643
644 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
645 modeidx = MCS_HT40;
646 else
647 modeidx = MCS_HT20;
648
649 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
650 modeidx++;
651
652 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
653 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530654 }
655
656 /*
657 * limit aggregate size by the minimum rate if rate selected is
658 * not a probe rate, if rate selected is a probe rate then
659 * avoid aggregation of this packet.
660 */
661 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
662 return 0;
663
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530664 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
665 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
666 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530667 aggr_limit = min((max_4ms_framelen * 3) / 8,
668 (u32)ATH_AMPDU_LIMIT_MAX);
669 else
670 aggr_limit = min(max_4ms_framelen,
671 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530672
673 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300674 * h/w can accept aggregates up to 16 bit lengths (65535).
675 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530676 * as zero. Ignore 65536 since we are constrained by hw.
677 */
Sujith4ef70842009-07-23 15:32:41 +0530678 if (tid->an->maxampdu)
679 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530680
681 return aggr_limit;
682}
683
684/*
Sujithd43f30152009-01-16 21:38:53 +0530685 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530686 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530687 */
688static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530689 struct ath_buf *bf, u16 frmlen,
690 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530691{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530692#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530693 struct sk_buff *skb = bf->bf_mpdu;
694 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530695 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530696 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100697 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200698 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100699 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530700
701 /* Select standard number of delimiters based on frame length alone */
702 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
703
704 /*
705 * If encryption enabled, hardware requires some more padding between
706 * subframes.
707 * TODO - this could be improved to be dependent on the rate.
708 * The hardware can keep up at lower rates, but not higher rates
709 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530710 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
711 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530712 ndelim += ATH_AGGR_ENCRYPTDELIM;
713
714 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530715 * Add delimiter when using RTS/CTS with aggregation
716 * and non enterprise AR9003 card
717 */
Felix Fietkau34597312011-08-29 18:57:54 +0200718 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
719 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530720 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
721
722 /*
Sujithe8324352009-01-16 21:38:42 +0530723 * Convert desired mpdu density from microeconds to bytes based
724 * on highest rate in rate series (i.e. first rate) to determine
725 * required minimum length for subframe. Take into account
726 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530727 *
Sujithe8324352009-01-16 21:38:42 +0530728 * If there is no mpdu density restriction, no further calculation
729 * is needed.
730 */
Sujith4ef70842009-07-23 15:32:41 +0530731
732 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530733 return ndelim;
734
735 rix = tx_info->control.rates[0].idx;
736 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530737 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
738 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
739
740 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530741 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530742 else
Sujith4ef70842009-07-23 15:32:41 +0530743 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530744
745 if (nsymbols == 0)
746 nsymbols = 1;
747
Felix Fietkauc6663872010-04-19 19:57:33 +0200748 streams = HT_RC_2_STREAMS(rix);
749 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530750 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
751
Sujithe8324352009-01-16 21:38:42 +0530752 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530753 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
754 ndelim = max(mindelim, ndelim);
755 }
756
757 return ndelim;
758}
759
760static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530761 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530762 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100763 struct list_head *bf_q,
764 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530765{
766#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200767 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530768 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530769 u16 aggr_limit = 0, al = 0, bpad = 0,
770 al_delta, h_baw = tid->baw_size / 2;
771 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200772 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200774 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200775 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530776
777 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200778 skb = skb_peek(&tid->buf_q);
779 fi = get_frame_info(skb);
780 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200781 if (!fi->bf)
782 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200783
Felix Fietkau44f1d262011-08-28 00:32:25 +0200784 if (!bf)
785 continue;
786
Felix Fietkau399c6482011-09-14 21:24:17 +0200787 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200788 seqno = bf->bf_state.seqno;
Sujithe8324352009-01-16 21:38:42 +0530789
Sujithd43f30152009-01-16 21:38:53 +0530790 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200791 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530792 status = ATH_AGGR_BAW_CLOSED;
793 break;
794 }
795
Felix Fietkauf9437542011-12-14 22:08:08 +0100796 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
797 struct ath_tx_status ts = {};
798 struct list_head bf_head;
799
800 INIT_LIST_HEAD(&bf_head);
801 list_add(&bf->list, &bf_head);
802 __skb_unlink(skb, &tid->buf_q);
803 ath_tx_update_baw(sc, tid, seqno);
804 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
805 continue;
806 }
807
808 if (!bf_first)
809 bf_first = bf;
810
Sujithe8324352009-01-16 21:38:42 +0530811 if (!rl) {
812 aggr_limit = ath_lookup_rate(sc, bf, tid);
813 rl = 1;
814 }
815
Sujithd43f30152009-01-16 21:38:53 +0530816 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100817 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530820 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
821 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530822 status = ATH_AGGR_LIMITED;
823 break;
824 }
825
Felix Fietkau0299a502010-10-21 02:47:24 +0200826 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200827 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200828 break;
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* do not exceed subframe limit */
831 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530832 status = ATH_AGGR_LIMITED;
833 break;
834 }
835
Sujithd43f30152009-01-16 21:38:53 +0530836 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530837 al += bpad + al_delta;
838
839 /*
840 * Get the delimiters needed to meet the MPDU
841 * density for this node.
842 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530843 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
844 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530845 bpad = PADBYTES(al_delta) + (ndelim << 2);
846
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530847 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530848 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530849
Sujithd43f30152009-01-16 21:38:53 +0530850 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100851 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200852 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200853 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200854
855 __skb_unlink(skb, &tid->buf_q);
856 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200857 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530858 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200859
Sujithe8324352009-01-16 21:38:42 +0530860 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530861
Felix Fietkau56dc6332011-08-28 00:32:22 +0200862 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530863
Felix Fietkau269c44b2010-11-14 15:20:06 +0100864 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530865
Sujithe8324352009-01-16 21:38:42 +0530866 return status;
867#undef PADBYTES
868}
869
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200870/*
871 * rix - rate index
872 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
873 * width - 0 for 20 MHz, 1 for 40 MHz
874 * half_gi - to use 4us v/s 3.6 us for symbol time
875 */
876static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
877 int width, int half_gi, bool shortPreamble)
878{
879 u32 nbits, nsymbits, duration, nsymbols;
880 int streams;
881
882 /* find number of symbols: PLCP + data */
883 streams = HT_RC_2_STREAMS(rix);
884 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
885 nsymbits = bits_per_symbol[rix % 8][width] * streams;
886 nsymbols = (nbits + nsymbits - 1) / nsymbits;
887
888 if (!half_gi)
889 duration = SYMBOL_TIME(nsymbols);
890 else
891 duration = SYMBOL_TIME_HALFGI(nsymbols);
892
893 /* addup duration for legacy/ht training and signal fields */
894 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
895
896 return duration;
897}
898
Felix Fietkau493cf042011-09-14 21:24:22 +0200899static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
900 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200901{
902 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200903 struct sk_buff *skb;
904 struct ieee80211_tx_info *tx_info;
905 struct ieee80211_tx_rate *rates;
906 const struct ieee80211_rate *rate;
907 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200908 int i;
909 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200910
911 skb = bf->bf_mpdu;
912 tx_info = IEEE80211_SKB_CB(skb);
913 rates = tx_info->control.rates;
914 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200915
916 /* set dur_update_en for l-sig computation except for PS-Poll frames */
917 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200918
919 /*
920 * We check if Short Preamble is needed for the CTS rate by
921 * checking the BSS's global flag.
922 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
923 */
924 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200925 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200926 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200927 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200928
929 for (i = 0; i < 4; i++) {
930 bool is_40, is_sgi, is_sp;
931 int phy;
932
933 if (!rates[i].count || (rates[i].idx < 0))
934 continue;
935
936 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200937 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200938
939 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200940 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
941 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200942 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200943 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
944 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200945 }
946
947 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200948 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200949 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200950 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200951
952 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
953 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
954 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
955
956 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
957 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200958 info->rates[i].Rate = rix | 0x80;
959 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
960 ah->txchainmask, info->rates[i].Rate);
961 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200962 is_40, is_sgi, is_sp);
963 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200964 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200965 continue;
966 }
967
968 /* legacy rates */
969 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
970 !(rate->flags & IEEE80211_RATE_ERP_G))
971 phy = WLAN_RC_PHY_CCK;
972 else
973 phy = WLAN_RC_PHY_OFDM;
974
975 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200976 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200977 if (rate->hw_value_short) {
978 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200979 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200980 } else {
981 is_sp = false;
982 }
983
984 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200985 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200986 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200987 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
988 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200989
Felix Fietkau493cf042011-09-14 21:24:22 +0200990 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200991 phy, rate->bitrate * 100, len, rix, is_sp);
992 }
993
994 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
995 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200996 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200997
998 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200999 if (info->flags & ATH9K_TXDESC_RTSENA)
1000 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001001}
1002
Felix Fietkau493cf042011-09-14 21:24:22 +02001003static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1004{
1005 struct ieee80211_hdr *hdr;
1006 enum ath9k_pkt_type htype;
1007 __le16 fc;
1008
1009 hdr = (struct ieee80211_hdr *)skb->data;
1010 fc = hdr->frame_control;
1011
1012 if (ieee80211_is_beacon(fc))
1013 htype = ATH9K_PKT_TYPE_BEACON;
1014 else if (ieee80211_is_probe_resp(fc))
1015 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1016 else if (ieee80211_is_atim(fc))
1017 htype = ATH9K_PKT_TYPE_ATIM;
1018 else if (ieee80211_is_pspoll(fc))
1019 htype = ATH9K_PKT_TYPE_PSPOLL;
1020 else
1021 htype = ATH9K_PKT_TYPE_NORMAL;
1022
1023 return htype;
1024}
1025
1026static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1027 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001028{
1029 struct ath_hw *ah = sc->sc_ah;
1030 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1031 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001032 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001033 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001034
Felix Fietkau493cf042011-09-14 21:24:22 +02001035 memset(&info, 0, sizeof(info));
1036 info.is_first = true;
1037 info.is_last = true;
1038 info.txpower = MAX_RATE_POWER;
1039 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001040
Felix Fietkau493cf042011-09-14 21:24:22 +02001041 info.flags = ATH9K_TXDESC_INTREQ;
1042 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1043 info.flags |= ATH9K_TXDESC_NOACK;
1044 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1045 info.flags |= ATH9K_TXDESC_LDPC;
1046
1047 ath_buf_set_rate(sc, bf, &info, len);
1048
1049 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1050 info.flags |= ATH9K_TXDESC_CLRDMASK;
1051
1052 if (bf->bf_state.bfs_paprd)
1053 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1054
Felix Fietkau399c6482011-09-14 21:24:17 +02001055
1056 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001057 struct sk_buff *skb = bf->bf_mpdu;
1058 struct ath_frame_info *fi = get_frame_info(skb);
1059
1060 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001061 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001062 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001063 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001064 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001065
John W. Linville42cecc32011-09-19 15:42:31 -04001066 info.buf_addr[0] = bf->bf_buf_addr;
1067 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001068 info.pkt_len = fi->framelen;
1069 info.keyix = fi->keyix;
1070 info.keytype = fi->keytype;
1071
1072 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001073 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001074 info.aggr = AGGR_BUF_FIRST;
1075 else if (!bf->bf_next)
1076 info.aggr = AGGR_BUF_LAST;
1077 else
1078 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001079
Felix Fietkau493cf042011-09-14 21:24:22 +02001080 info.ndelim = bf->bf_state.ndelim;
1081 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001082 }
1083
Felix Fietkau493cf042011-09-14 21:24:22 +02001084 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001085 bf = bf->bf_next;
1086 }
1087}
1088
Sujithe8324352009-01-16 21:38:42 +05301089static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1090 struct ath_atx_tid *tid)
1091{
Sujithd43f30152009-01-16 21:38:53 +05301092 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301093 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001094 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301095 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001096 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301097
1098 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001099 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301100 return;
1101
1102 INIT_LIST_HEAD(&bf_q);
1103
Felix Fietkau269c44b2010-11-14 15:20:06 +01001104 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301105
1106 /*
Sujithd43f30152009-01-16 21:38:53 +05301107 * no frames picked up to be aggregated;
1108 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301109 */
1110 if (list_empty(&bf_q))
1111 break;
1112
1113 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301114 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001115 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301116
Felix Fietkau55195412011-04-17 23:28:09 +02001117 if (tid->ac->clear_ps_filter) {
1118 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001119 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1120 } else {
1121 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001122 }
1123
Sujithd43f30152009-01-16 21:38:53 +05301124 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001125 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001126 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1127 bf->bf_state.bf_type = BUF_AMPDU;
1128 } else {
1129 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301130 }
1131
Felix Fietkau493cf042011-09-14 21:24:22 +02001132 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001133 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001134 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301135 status != ATH_AGGR_BAW_CLOSED);
1136}
1137
Felix Fietkau231c3a12010-09-20 19:35:28 +02001138int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1139 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301140{
1141 struct ath_atx_tid *txtid;
1142 struct ath_node *an;
1143
1144 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301145 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001146
1147 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1148 return -EAGAIN;
1149
Sujithf83da962009-07-23 15:32:37 +05301150 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001151 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001152 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkauf9437542011-12-14 22:08:08 +01001153 txtid->bar_index = -1;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001154
Felix Fietkau2ed72222011-01-10 17:05:49 -07001155 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1156 txtid->baw_head = txtid->baw_tail = 0;
1157
Felix Fietkau231c3a12010-09-20 19:35:28 +02001158 return 0;
Sujithe8324352009-01-16 21:38:42 +05301159}
1160
Sujithf83da962009-07-23 15:32:37 +05301161void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301162{
1163 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1164 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001165 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301166
1167 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301168 return;
Sujithe8324352009-01-16 21:38:42 +05301169
1170 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301171 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301172 return;
Sujithe8324352009-01-16 21:38:42 +05301173 }
1174
Sujithe8324352009-01-16 21:38:42 +05301175 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001176 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001177
1178 /*
1179 * If frames are still being transmitted for this TID, they will be
1180 * cleaned up during tx completion. To prevent race conditions, this
1181 * TID can only be reused after all in-progress subframes have been
1182 * completed.
1183 */
1184 if (txtid->baw_head != txtid->baw_tail)
1185 txtid->state |= AGGR_CLEANUP;
1186 else
1187 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +05301188
Felix Fietkau90fa5392010-09-20 13:45:38 +02001189 ath_tx_flush_tid(sc, txtid);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001190 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301191}
1192
Johannes Berg042ec452011-09-29 16:04:26 +02001193void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1194 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001195{
1196 struct ath_atx_tid *tid;
1197 struct ath_atx_ac *ac;
1198 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001199 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001200 int tidno;
1201
1202 for (tidno = 0, tid = &an->tid[tidno];
1203 tidno < WME_NUM_TID; tidno++, tid++) {
1204
1205 if (!tid->sched)
1206 continue;
1207
1208 ac = tid->ac;
1209 txq = ac->txq;
1210
1211 spin_lock_bh(&txq->axq_lock);
1212
Johannes Berg042ec452011-09-29 16:04:26 +02001213 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001214
1215 tid->sched = false;
1216 list_del(&tid->list);
1217
1218 if (ac->sched) {
1219 ac->sched = false;
1220 list_del(&ac->list);
1221 }
1222
1223 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001224
Johannes Berg042ec452011-09-29 16:04:26 +02001225 ieee80211_sta_set_buffered(sta, tidno, buffered);
1226 }
Felix Fietkau55195412011-04-17 23:28:09 +02001227}
1228
1229void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1230{
1231 struct ath_atx_tid *tid;
1232 struct ath_atx_ac *ac;
1233 struct ath_txq *txq;
1234 int tidno;
1235
1236 for (tidno = 0, tid = &an->tid[tidno];
1237 tidno < WME_NUM_TID; tidno++, tid++) {
1238
1239 ac = tid->ac;
1240 txq = ac->txq;
1241
1242 spin_lock_bh(&txq->axq_lock);
1243 ac->clear_ps_filter = true;
1244
Felix Fietkau56dc6332011-08-28 00:32:22 +02001245 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001246 ath_tx_queue_tid(txq, tid);
1247 ath_txq_schedule(sc, txq);
1248 }
1249
1250 spin_unlock_bh(&txq->axq_lock);
1251 }
1252}
1253
Sujithe8324352009-01-16 21:38:42 +05301254void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1255{
1256 struct ath_atx_tid *txtid;
1257 struct ath_node *an;
1258
1259 an = (struct ath_node *)sta->drv_priv;
1260
1261 if (sc->sc_flags & SC_OP_TXAGGR) {
1262 txtid = ATH_AN_2_TID(an, tid);
1263 txtid->baw_size =
1264 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1265 txtid->state |= AGGR_ADDBA_COMPLETE;
1266 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1267 ath_tx_resume_tid(sc, txtid);
1268 }
1269}
1270
Sujithe8324352009-01-16 21:38:42 +05301271/********************/
1272/* Queue Management */
1273/********************/
1274
Sujithe8324352009-01-16 21:38:42 +05301275static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1276 struct ath_txq *txq)
1277{
1278 struct ath_atx_ac *ac, *ac_tmp;
1279 struct ath_atx_tid *tid, *tid_tmp;
1280
1281 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1282 list_del(&ac->list);
1283 ac->sched = false;
1284 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1285 list_del(&tid->list);
1286 tid->sched = false;
1287 ath_tid_drain(sc, txq, tid);
1288 }
1289 }
1290}
1291
1292struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1293{
Sujithcbe61d82009-02-09 13:27:12 +05301294 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301295 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001296 static const int subtype_txq_to_hwq[] = {
1297 [WME_AC_BE] = ATH_TXQ_AC_BE,
1298 [WME_AC_BK] = ATH_TXQ_AC_BK,
1299 [WME_AC_VI] = ATH_TXQ_AC_VI,
1300 [WME_AC_VO] = ATH_TXQ_AC_VO,
1301 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001302 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301303
1304 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001305 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301306 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1307 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1308 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1309 qi.tqi_physCompBuf = 0;
1310
1311 /*
1312 * Enable interrupts only for EOL and DESC conditions.
1313 * We mark tx descriptors to receive a DESC interrupt
1314 * when a tx queue gets deep; otherwise waiting for the
1315 * EOL to reap descriptors. Note that this is done to
1316 * reduce interrupt load and this only defers reaping
1317 * descriptors, never transmitting frames. Aside from
1318 * reducing interrupts this also permits more concurrency.
1319 * The only potential downside is if the tx queue backs
1320 * up in which case the top half of the kernel may backup
1321 * due to a lack of tx descriptors.
1322 *
1323 * The UAPSD queue is an exception, since we take a desc-
1324 * based intr on the EOSP frames.
1325 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001326 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1327 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1328 TXQ_FLAG_TXERRINT_ENABLE;
1329 } else {
1330 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1331 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1332 else
1333 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1334 TXQ_FLAG_TXDESCINT_ENABLE;
1335 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001336 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1337 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301338 /*
1339 * NB: don't print a message, this happens
1340 * normally on parts with too few tx queues
1341 */
1342 return NULL;
1343 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001344 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1345 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301346
Ben Greear60f2d1d2011-01-09 23:11:52 -08001347 txq->axq_qnum = axq_qnum;
1348 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301349 txq->axq_link = NULL;
1350 INIT_LIST_HEAD(&txq->axq_q);
1351 INIT_LIST_HEAD(&txq->axq_acq);
1352 spin_lock_init(&txq->axq_lock);
1353 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001354 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001355 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001356 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001357
1358 txq->txq_headidx = txq->txq_tailidx = 0;
1359 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1360 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301361 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001362 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301363}
1364
Sujithe8324352009-01-16 21:38:42 +05301365int ath_txq_update(struct ath_softc *sc, int qnum,
1366 struct ath9k_tx_queue_info *qinfo)
1367{
Sujithcbe61d82009-02-09 13:27:12 +05301368 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301369 int error = 0;
1370 struct ath9k_tx_queue_info qi;
1371
1372 if (qnum == sc->beacon.beaconq) {
1373 /*
1374 * XXX: for beacon queue, we just save the parameter.
1375 * It will be picked up by ath_beaconq_config when
1376 * it's necessary.
1377 */
1378 sc->beacon.beacon_qi = *qinfo;
1379 return 0;
1380 }
1381
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001382 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301383
1384 ath9k_hw_get_txq_props(ah, qnum, &qi);
1385 qi.tqi_aifs = qinfo->tqi_aifs;
1386 qi.tqi_cwmin = qinfo->tqi_cwmin;
1387 qi.tqi_cwmax = qinfo->tqi_cwmax;
1388 qi.tqi_burstTime = qinfo->tqi_burstTime;
1389 qi.tqi_readyTime = qinfo->tqi_readyTime;
1390
1391 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001392 ath_err(ath9k_hw_common(sc->sc_ah),
1393 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301394 error = -EIO;
1395 } else {
1396 ath9k_hw_resettxqueue(ah, qnum);
1397 }
1398
1399 return error;
1400}
1401
1402int ath_cabq_update(struct ath_softc *sc)
1403{
1404 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001405 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301406 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301407
1408 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1409 /*
1410 * Ensure the readytime % is within the bounds.
1411 */
Sujith17d79042009-02-09 13:27:03 +05301412 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1413 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1414 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1415 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301416
Steve Brown9814f6b2011-02-07 17:10:39 -07001417 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301418 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301419 ath_txq_update(sc, qnum, &qi);
1420
1421 return 0;
1422}
1423
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001424static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1425{
1426 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1427 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1428}
1429
Felix Fietkaufce041b2011-05-19 12:20:25 +02001430static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1431 struct list_head *list, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301432{
1433 struct ath_buf *bf, *lastbf;
1434 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001435 struct ath_tx_status ts;
1436
1437 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001438 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301439 INIT_LIST_HEAD(&bf_head);
1440
Felix Fietkaufce041b2011-05-19 12:20:25 +02001441 while (!list_empty(list)) {
1442 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301443
Felix Fietkaufce041b2011-05-19 12:20:25 +02001444 if (bf->bf_stale) {
1445 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301446
Felix Fietkaufce041b2011-05-19 12:20:25 +02001447 ath_tx_return_buffer(sc, bf);
1448 continue;
Sujithe8324352009-01-16 21:38:42 +05301449 }
1450
1451 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001452 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001453
Sujithe8324352009-01-16 21:38:42 +05301454 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001455 if (bf_is_ampdu_not_probing(bf))
1456 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301457
1458 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001459 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1460 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301461 else
Felix Fietkau156369f2011-12-14 22:08:04 +01001462 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001463 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001464}
1465
1466/*
1467 * Drain a given TX queue (could be Beacon or Data)
1468 *
1469 * This assumes output has been stopped and
1470 * we do not need to block ath_tx_tasklet.
1471 */
1472void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1473{
1474 spin_lock_bh(&txq->axq_lock);
1475 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1476 int idx = txq->txq_tailidx;
1477
1478 while (!list_empty(&txq->txq_fifo[idx])) {
1479 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1480 retry_tx);
1481
1482 INCR(idx, ATH_TXFIFO_DEPTH);
1483 }
1484 txq->txq_tailidx = idx;
1485 }
1486
1487 txq->axq_link = NULL;
1488 txq->axq_tx_inprogress = false;
1489 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001490
1491 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001492 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1493 ath_txq_drain_pending_buffers(sc, txq);
1494
1495 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301496}
1497
Felix Fietkau080e1a22010-12-05 20:17:53 +01001498bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301499{
Sujithcbe61d82009-02-09 13:27:12 +05301500 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001501 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301502 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001503 int i;
1504 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301505
1506 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001507 return true;
Sujith043a0402009-01-16 21:38:47 +05301508
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001509 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301510
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001511 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301512 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001513 if (!ATH_TXQ_SETUP(sc, i))
1514 continue;
1515
Felix Fietkau34d25812011-10-07 02:28:12 +02001516 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1517 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301518 }
1519
Felix Fietkau080e1a22010-12-05 20:17:53 +01001520 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001521 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301522
1523 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001524 if (!ATH_TXQ_SETUP(sc, i))
1525 continue;
1526
1527 /*
1528 * The caller will resume queues with ieee80211_wake_queues.
1529 * Mark the queue as not stopped to prevent ath_tx_complete
1530 * from waking the queue too early.
1531 */
1532 txq = &sc->tx.txq[i];
1533 txq->stopped = false;
1534 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301535 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001536
1537 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301538}
1539
Sujithe8324352009-01-16 21:38:42 +05301540void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1541{
1542 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1543 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1544}
1545
Ben Greear7755bad2011-01-18 17:30:00 -08001546/* For each axq_acq entry, for each tid, try to schedule packets
1547 * for transmit until ampdu_depth has reached min Q depth.
1548 */
Sujithe8324352009-01-16 21:38:42 +05301549void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1550{
Ben Greear7755bad2011-01-18 17:30:00 -08001551 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1552 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301553
Felix Fietkau236de512011-09-03 01:40:25 +02001554 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001555 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301556 return;
1557
1558 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001559 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301560
Ben Greear7755bad2011-01-18 17:30:00 -08001561 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1562 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1563 list_del(&ac->list);
1564 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301565
Ben Greear7755bad2011-01-18 17:30:00 -08001566 while (!list_empty(&ac->tid_q)) {
1567 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1568 list);
1569 list_del(&tid->list);
1570 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301571
Ben Greear7755bad2011-01-18 17:30:00 -08001572 if (tid->paused)
1573 continue;
Sujithe8324352009-01-16 21:38:42 +05301574
Ben Greear7755bad2011-01-18 17:30:00 -08001575 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301576
Ben Greear7755bad2011-01-18 17:30:00 -08001577 /*
1578 * add tid to round-robin queue if more frames
1579 * are pending for the tid
1580 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001581 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001582 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301583
Ben Greear7755bad2011-01-18 17:30:00 -08001584 if (tid == last_tid ||
1585 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1586 break;
Sujithe8324352009-01-16 21:38:42 +05301587 }
Ben Greear7755bad2011-01-18 17:30:00 -08001588
Felix Fietkaub0477012011-12-14 22:08:05 +01001589 if (!list_empty(&ac->tid_q) && !ac->sched) {
1590 ac->sched = true;
1591 list_add_tail(&ac->list, &txq->axq_acq);
Ben Greear7755bad2011-01-18 17:30:00 -08001592 }
1593
1594 if (ac == last_ac ||
1595 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1596 return;
Sujithe8324352009-01-16 21:38:42 +05301597 }
1598}
1599
Sujithe8324352009-01-16 21:38:42 +05301600/***********/
1601/* TX, DMA */
1602/***********/
1603
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001604/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001605 * Insert a chain of ath_buf (descriptors) on a txq and
1606 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607 */
Sujith102e0572008-10-29 10:15:16 +05301608static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001609 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001610{
Sujithcbe61d82009-02-09 13:27:12 +05301611 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001612 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001613 struct ath_buf *bf, *bf_last;
1614 bool puttxbuf = false;
1615 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301616
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001617 /*
1618 * Insert the frame on the outbound list and
1619 * pass it on to the hardware.
1620 */
1621
1622 if (list_empty(head))
1623 return;
1624
Felix Fietkaufce041b2011-05-19 12:20:25 +02001625 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001626 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001627 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001628
Joe Perches226afe62010-12-02 19:12:37 -08001629 ath_dbg(common, ATH_DBG_QUEUE,
1630 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001631
Felix Fietkaufce041b2011-05-19 12:20:25 +02001632 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1633 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001634 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001635 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001636 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001637 list_splice_tail_init(head, &txq->axq_q);
1638
Felix Fietkaufce041b2011-05-19 12:20:25 +02001639 if (txq->axq_link) {
1640 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001641 ath_dbg(common, ATH_DBG_XMIT,
1642 "link[%u] (%p)=%llx (%p)\n",
1643 txq->axq_qnum, txq->axq_link,
1644 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001645 } else if (!edma)
1646 puttxbuf = true;
1647
1648 txq->axq_link = bf_last->bf_desc;
1649 }
1650
1651 if (puttxbuf) {
1652 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1653 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1654 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1655 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1656 }
1657
1658 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001659 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001660 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001661 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001662
1663 if (!internal) {
1664 txq->axq_depth++;
1665 if (bf_is_ampdu_not_probing(bf))
1666 txq->axq_ampdu_depth++;
1667 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001668}
1669
Sujithe8324352009-01-16 21:38:42 +05301670static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001671 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301672{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001673 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001674 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001675 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301676
1677 /*
1678 * Do not queue to h/w when any of the following conditions is true:
1679 * - there are pending frames in software queue
1680 * - the TID is currently paused for ADDBA/BAR request
1681 * - seqno is not within block-ack window
1682 * - h/w queue depth exceeds low water mark
1683 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001684 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001685 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001686 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001687 /*
Sujithe8324352009-01-16 21:38:42 +05301688 * Add this frame to software queue for scheduling later
1689 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001690 */
Ben Greearbda8add2011-01-09 23:11:48 -08001691 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001692 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001693 if (!txctl->an || !txctl->an->sleeping)
1694 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301695 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001696 }
1697
Felix Fietkau44f1d262011-08-28 00:32:25 +02001698 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1699 if (!bf)
1700 return;
1701
Felix Fietkau399c6482011-09-14 21:24:17 +02001702 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001703 INIT_LIST_HEAD(&bf_head);
1704 list_add(&bf->list, &bf_head);
1705
Sujithe8324352009-01-16 21:38:42 +05301706 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001707 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301708
1709 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001710 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301711 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001712 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001713 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301714}
1715
Felix Fietkau82b873a2010-11-11 03:18:37 +01001716static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001717 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001718{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001719 struct ath_frame_info *fi = get_frame_info(skb);
1720 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301721 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001722
Felix Fietkau44f1d262011-08-28 00:32:25 +02001723 bf = fi->bf;
1724 if (!bf)
1725 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1726
1727 if (!bf)
1728 return;
1729
1730 INIT_LIST_HEAD(&bf_head);
1731 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001732 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301733
Sujithd43f30152009-01-16 21:38:53 +05301734 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001735 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001736 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301737 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001738}
1739
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001740static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1741 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301742{
1743 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001744 struct ieee80211_sta *sta = tx_info->control.sta;
1745 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001746 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001747 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001748 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001749 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301750
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001751 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301752
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001753 if (sta)
1754 an = (struct ath_node *) sta->drv_priv;
1755
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001756 memset(fi, 0, sizeof(*fi));
1757 if (hw_key)
1758 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001759 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1760 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001761 else
1762 fi->keyix = ATH9K_TXKEYIX_INVALID;
1763 fi->keytype = keytype;
1764 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301765}
1766
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301767u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1768{
1769 struct ath_hw *ah = sc->sc_ah;
1770 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301771 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1772 (curchan->channelFlags & CHANNEL_5GHZ) &&
1773 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301774 return 0x3;
1775 else
1776 return chainmask;
1777}
1778
Felix Fietkau44f1d262011-08-28 00:32:25 +02001779/*
1780 * Assign a descriptor (and sequence number if necessary,
1781 * and map buffer for DMA. Frees skb on error
1782 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001783static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001784 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001785 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001786 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301787{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001788 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001789 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001790 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001791 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001792 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001793
1794 bf = ath_tx_get_buffer(sc);
1795 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001796 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001797 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001798 }
Sujithe8324352009-01-16 21:38:42 +05301799
Sujithe8324352009-01-16 21:38:42 +05301800 ATH_TXBUF_RESET(bf);
1801
Felix Fietkaufa05f872011-08-28 00:32:24 +02001802 if (tid) {
1803 seqno = tid->seq_next;
1804 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1805 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1806 bf->bf_state.seqno = seqno;
1807 }
1808
Sujithe8324352009-01-16 21:38:42 +05301809 bf->bf_mpdu = skb;
1810
Ben Greearc1739eb32010-10-14 12:45:29 -07001811 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1812 skb->len, DMA_TO_DEVICE);
1813 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301814 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001815 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001816 ath_err(ath9k_hw_common(sc->sc_ah),
1817 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001818 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001819 goto error;
Sujithe8324352009-01-16 21:38:42 +05301820 }
1821
Felix Fietkau56dc6332011-08-28 00:32:22 +02001822 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001823
1824 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001825
1826error:
1827 dev_kfree_skb_any(skb);
1828 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001829}
1830
1831/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001832static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001833 struct ath_tx_control *txctl)
1834{
Felix Fietkau04caf862010-11-14 15:20:12 +01001835 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1836 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001837 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001838 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001839 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301840
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301841 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1842 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001843 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1844 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001845 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001846
Felix Fietkau066dae92010-11-07 14:59:39 +01001847 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001848 }
1849
1850 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001851 /*
1852 * Try aggregation if it's a unicast data frame
1853 * and the destination is HT capable.
1854 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001855 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301856 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001857 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1858 if (!bf)
Felix Fietkau3ad29522011-12-14 22:08:07 +01001859 return;
Felix Fietkau04caf862010-11-14 15:20:12 +01001860
Felix Fietkau82b873a2010-11-11 03:18:37 +01001861 bf->bf_state.bfs_paprd = txctl->paprd;
1862
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301863 if (txctl->paprd)
1864 bf->bf_state.bfs_paprd_timestamp = jiffies;
1865
Felix Fietkau44f1d262011-08-28 00:32:25 +02001866 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301867 }
Sujithe8324352009-01-16 21:38:42 +05301868}
1869
1870/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001871int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301872 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001873{
Felix Fietkau28d16702010-11-14 15:20:10 +01001874 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1875 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001876 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001877 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001878 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001879 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001880 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001881 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001882 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001883
Ben Greeara9927ba2010-12-06 21:13:49 -08001884 /* NOTE: sta can be NULL according to net/mac80211.h */
1885 if (sta)
1886 txctl->an = (struct ath_node *)sta->drv_priv;
1887
Felix Fietkau04caf862010-11-14 15:20:12 +01001888 if (info->control.hw_key)
1889 frmlen += info->control.hw_key->icv_len;
1890
Felix Fietkau28d16702010-11-14 15:20:10 +01001891 /*
1892 * As a temporary workaround, assign seq# here; this will likely need
1893 * to be cleaned up to work better with Beacon transmission and virtual
1894 * BSSes.
1895 */
1896 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1897 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1898 sc->tx.seq_no += 0x10;
1899 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1900 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1901 }
1902
John W. Linville42cecc32011-09-19 15:42:31 -04001903 /* Add the padding after the header if this is not already done */
1904 padpos = ath9k_cmn_padpos(hdr->frame_control);
1905 padsize = padpos & 3;
1906 if (padsize && skb->len > padpos) {
1907 if (skb_headroom(skb) < padsize)
1908 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001909
John W. Linville42cecc32011-09-19 15:42:31 -04001910 skb_push(skb, padsize);
1911 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001912 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001913 }
1914
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001915 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1916 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1917 !ieee80211_is_data(hdr->frame_control))
1918 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1919
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001920 setup_frame_info(hw, skb, frmlen);
1921
1922 /*
1923 * At this point, the vif, hw_key and sta pointers in the tx control
1924 * info are no longer valid (overwritten by the ath_frame_info data.
1925 */
1926
Felix Fietkau066dae92010-11-07 14:59:39 +01001927 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001928 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001929 if (txq == sc->tx.txq_map[q] &&
1930 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001931 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001932 txq->stopped = 1;
1933 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001934
Felix Fietkau44f1d262011-08-28 00:32:25 +02001935 ath_tx_start_dma(sc, skb, txctl);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001936
1937 spin_unlock_bh(&txq->axq_lock);
1938
Felix Fietkau44f1d262011-08-28 00:32:25 +02001939 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001940}
1941
Sujithe8324352009-01-16 21:38:42 +05301942/*****************/
1943/* TX Completion */
1944/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001945
Sujithe8324352009-01-16 21:38:42 +05301946static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301947 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001948{
Sujithe8324352009-01-16 21:38:42 +05301949 struct ieee80211_hw *hw = sc->hw;
1950 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001951 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001952 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001953 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301954
Joe Perches226afe62010-12-02 19:12:37 -08001955 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301956
Felix Fietkau55797b12011-09-14 21:24:16 +02001957 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301958 /* Frame was ACKed */
1959 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301960
John W. Linville42cecc32011-09-19 15:42:31 -04001961 padpos = ath9k_cmn_padpos(hdr->frame_control);
1962 padsize = padpos & 3;
1963 if (padsize && skb->len>padpos+padsize) {
1964 /*
1965 * Remove MAC header padding before giving the frame back to
1966 * mac80211.
1967 */
1968 memmove(skb->data + padsize, skb->data, padpos);
1969 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301970 }
1971
Felix Fietkauc8e88682011-11-16 13:08:40 +01001972 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05301973 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001974 ath_dbg(common, ATH_DBG_PS,
1975 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301976 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1977 PS_WAIT_FOR_CAB |
1978 PS_WAIT_FOR_PSPOLL_DATA |
1979 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001980 }
1981
Felix Fietkau7545daf2011-01-24 19:23:16 +01001982 q = skb_get_queue_mapping(skb);
1983 if (txq == sc->tx.txq_map[q]) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001984 if (WARN_ON(--txq->pending_frames < 0))
1985 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001986
Felix Fietkau7545daf2011-01-24 19:23:16 +01001987 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1988 ieee80211_wake_queue(sc->hw, q);
1989 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001990 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001991 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001992
1993 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301994}
1995
1996static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001997 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +01001998 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301999{
2000 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002001 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05302002 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302003 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302004
Felix Fietkau55797b12011-09-14 21:24:16 +02002005 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302006 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302007
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002008 if (ts->ts_status & ATH9K_TXERR_FILT)
2009 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2010
Ben Greearc1739eb32010-10-14 12:45:29 -07002011 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002012 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002013
2014 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302015 if (time_after(jiffies,
2016 bf->bf_state.bfs_paprd_timestamp +
2017 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002018 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002019 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002020 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002021 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002022 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302023 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002024 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002025 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2026 * accidentally reference it later.
2027 */
2028 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302029
2030 /*
2031 * Return the list of ath_buf of this mpdu to free queue
2032 */
2033 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2034 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2035 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2036}
2037
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002038static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2039 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002040 int txok)
Sujithc4288392008-11-18 09:09:30 +05302041{
Sujitha22be222009-03-30 15:28:36 +05302042 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302044 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002045 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002046 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302047 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302048
Sujith95e4acb2009-03-13 08:56:09 +05302049 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002050 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302051
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002052 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302053 WARN_ON(tx_rateindex >= hw->max_rates);
2054
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002055 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002056 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302057
Felix Fietkaub572d032010-11-14 15:20:07 +01002058 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002059 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302060 tx_info->status.ampdu_len = nframes;
2061 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002062
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002063 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002064 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002065 /*
2066 * If an underrun error is seen assume it as an excessive
2067 * retry only if max frame trigger level has been reached
2068 * (2 KB for single stream, and 4 KB for dual stream).
2069 * Adjust the long retry as if the frame was tried
2070 * hw->max_rate_tries times to affect how rate control updates
2071 * PER for the failed rate.
2072 * In case of congestion on the bus penalizing this type of
2073 * underruns should help hardware actually transmit new frames
2074 * successfully by eventually preferring slower rates.
2075 * This itself should also alleviate congestion on the bus.
2076 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002077 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2078 ATH9K_TX_DELIM_UNDERRUN)) &&
2079 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002080 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002081 tx_info->status.rates[tx_rateindex].count =
2082 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302083 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302084
Felix Fietkau545750d2009-11-23 22:21:01 +01002085 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302086 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002087 tx_info->status.rates[i].idx = -1;
2088 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302089
Felix Fietkau78c46532010-06-25 01:26:16 +02002090 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302091}
2092
Felix Fietkaufce041b2011-05-19 12:20:25 +02002093static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2094 struct ath_tx_status *ts, struct ath_buf *bf,
2095 struct list_head *bf_head)
2096{
2097 int txok;
2098
2099 txq->axq_depth--;
2100 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2101 txq->axq_tx_inprogress = false;
2102 if (bf_is_ampdu_not_probing(bf))
2103 txq->axq_ampdu_depth--;
2104
Felix Fietkaufce041b2011-05-19 12:20:25 +02002105 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002106 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkau156369f2011-12-14 22:08:04 +01002107 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002108 } else
2109 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2110
Felix Fietkaufce041b2011-05-19 12:20:25 +02002111 if (sc->sc_flags & SC_OP_TXAGGR)
2112 ath_txq_schedule(sc, txq);
2113}
2114
Sujithc4288392008-11-18 09:09:30 +05302115static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116{
Sujithcbe61d82009-02-09 13:27:12 +05302117 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002118 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002119 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2120 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302121 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002122 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 int status;
2124
Joe Perches226afe62010-12-02 19:12:37 -08002125 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2126 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2127 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128
Felix Fietkaufce041b2011-05-19 12:20:25 +02002129 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002130 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002131 if (work_pending(&sc->hw_reset_work))
2132 break;
2133
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002134 if (list_empty(&txq->axq_q)) {
2135 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002136 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002137 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138 break;
2139 }
2140 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2141
2142 /*
2143 * There is a race condition that a BH gets scheduled
2144 * after sw writes TxE and before hw re-load the last
2145 * descriptor to get the newly chained one.
2146 * Software must keep the last DONE descriptor as a
2147 * holding descriptor - software does so by marking
2148 * it with the STALE flag.
2149 */
2150 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302151 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002152 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002153 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002154 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002155
2156 bf = list_entry(bf_held->list.next, struct ath_buf,
2157 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158 }
2159
2160 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302161 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162
Felix Fietkau29bffa92010-03-29 20:14:23 -07002163 memset(&ts, 0, sizeof(ts));
2164 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002165 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002167
Ben Greear2dac4fb2011-01-09 23:11:45 -08002168 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169
2170 /*
2171 * Remove ath_buf's of the same transmit unit from txq,
2172 * however leave the last descriptor back as the holding
2173 * descriptor for hw.
2174 */
Sujitha119cc42009-03-30 15:28:38 +05302175 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 if (!list_is_singular(&lastbf->list))
2178 list_cut_position(&bf_head,
2179 &txq->axq_q, lastbf->list.prev);
2180
Felix Fietkaufce041b2011-05-19 12:20:25 +02002181 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002182 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002183 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002184 }
Johannes Berge6a98542008-10-21 12:40:02 +02002185
Felix Fietkaufce041b2011-05-19 12:20:25 +02002186 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002187 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002188 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002189}
2190
Sujith305fe472009-07-23 15:32:29 +05302191static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002192{
2193 struct ath_softc *sc = container_of(work, struct ath_softc,
2194 tx_complete_work.work);
2195 struct ath_txq *txq;
2196 int i;
2197 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002198#ifdef CONFIG_ATH9K_DEBUGFS
2199 sc->tx_complete_poll_work_seen++;
2200#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002201
2202 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2203 if (ATH_TXQ_SETUP(sc, i)) {
2204 txq = &sc->tx.txq[i];
2205 spin_lock_bh(&txq->axq_lock);
2206 if (txq->axq_depth) {
2207 if (txq->axq_tx_inprogress) {
2208 needreset = true;
2209 spin_unlock_bh(&txq->axq_lock);
2210 break;
2211 } else {
2212 txq->axq_tx_inprogress = true;
2213 }
2214 }
2215 spin_unlock_bh(&txq->axq_lock);
2216 }
2217
2218 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002219 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2220 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002221 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002222 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002223 }
2224
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002225 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002226 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2227}
2228
2229
Sujithe8324352009-01-16 21:38:42 +05302230
2231void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002232{
Sujithe8324352009-01-16 21:38:42 +05302233 int i;
2234 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002235
Sujithe8324352009-01-16 21:38:42 +05302236 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002237
2238 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302239 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2240 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002241 }
2242}
2243
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002244void ath_tx_edma_tasklet(struct ath_softc *sc)
2245{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002246 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002247 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2248 struct ath_hw *ah = sc->sc_ah;
2249 struct ath_txq *txq;
2250 struct ath_buf *bf, *lastbf;
2251 struct list_head bf_head;
2252 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002253
2254 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002255 if (work_pending(&sc->hw_reset_work))
2256 break;
2257
Felix Fietkaufce041b2011-05-19 12:20:25 +02002258 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002259 if (status == -EINPROGRESS)
2260 break;
2261 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002262 ath_dbg(common, ATH_DBG_XMIT,
2263 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 break;
2265 }
2266
2267 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002268 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002269 continue;
2270
Felix Fietkaufce041b2011-05-19 12:20:25 +02002271 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002272
2273 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002274
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002275 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2276 spin_unlock_bh(&txq->axq_lock);
2277 return;
2278 }
2279
2280 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2281 struct ath_buf, list);
2282 lastbf = bf->bf_lastbf;
2283
2284 INIT_LIST_HEAD(&bf_head);
2285 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2286 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002287
Felix Fietkaufce041b2011-05-19 12:20:25 +02002288 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2289 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002290
Felix Fietkaufce041b2011-05-19 12:20:25 +02002291 if (!list_empty(&txq->axq_q)) {
2292 struct list_head bf_q;
2293
2294 INIT_LIST_HEAD(&bf_q);
2295 txq->axq_link = NULL;
2296 list_splice_tail_init(&txq->axq_q, &bf_q);
2297 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2298 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002299 }
2300
Felix Fietkaufce041b2011-05-19 12:20:25 +02002301 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002302 spin_unlock_bh(&txq->axq_lock);
2303 }
2304}
2305
Sujithe8324352009-01-16 21:38:42 +05302306/*****************/
2307/* Init, Cleanup */
2308/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002309
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002310static int ath_txstatus_setup(struct ath_softc *sc, int size)
2311{
2312 struct ath_descdma *dd = &sc->txsdma;
2313 u8 txs_len = sc->sc_ah->caps.txs_len;
2314
2315 dd->dd_desc_len = size * txs_len;
2316 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2317 &dd->dd_desc_paddr, GFP_KERNEL);
2318 if (!dd->dd_desc)
2319 return -ENOMEM;
2320
2321 return 0;
2322}
2323
2324static int ath_tx_edma_init(struct ath_softc *sc)
2325{
2326 int err;
2327
2328 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2329 if (!err)
2330 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2331 sc->txsdma.dd_desc_paddr,
2332 ATH_TXSTATUS_RING_SIZE);
2333
2334 return err;
2335}
2336
2337static void ath_tx_edma_cleanup(struct ath_softc *sc)
2338{
2339 struct ath_descdma *dd = &sc->txsdma;
2340
2341 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2342 dd->dd_desc_paddr);
2343}
2344
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002345int ath_tx_init(struct ath_softc *sc, int nbufs)
2346{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002347 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002348 int error = 0;
2349
Sujith797fe5cb2009-03-30 15:28:45 +05302350 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002351
Sujith797fe5cb2009-03-30 15:28:45 +05302352 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002353 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002355 ath_err(common,
2356 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302357 goto err;
2358 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359
Sujith797fe5cb2009-03-30 15:28:45 +05302360 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002361 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302362 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002363 ath_err(common,
2364 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302365 goto err;
2366 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002367
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002368 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2369
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002370 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2371 error = ath_tx_edma_init(sc);
2372 if (error)
2373 goto err;
2374 }
2375
Sujith797fe5cb2009-03-30 15:28:45 +05302376err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002377 if (error != 0)
2378 ath_tx_cleanup(sc);
2379
2380 return error;
2381}
2382
Sujith797fe5cb2009-03-30 15:28:45 +05302383void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384{
Sujithb77f4832008-12-07 21:44:03 +05302385 if (sc->beacon.bdma.dd_desc_len != 0)
2386 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387
Sujithb77f4832008-12-07 21:44:03 +05302388 if (sc->tx.txdma.dd_desc_len != 0)
2389 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002390
2391 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2392 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393}
2394
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002395void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2396{
Sujithc5170162008-10-29 10:13:59 +05302397 struct ath_atx_tid *tid;
2398 struct ath_atx_ac *ac;
2399 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002400
Sujith8ee5afb2008-12-07 21:43:36 +05302401 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302402 tidno < WME_NUM_TID;
2403 tidno++, tid++) {
2404 tid->an = an;
2405 tid->tidno = tidno;
2406 tid->seq_start = tid->seq_next = 0;
2407 tid->baw_size = WME_MAX_BA;
2408 tid->baw_head = tid->baw_tail = 0;
2409 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302410 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302411 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002412 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302413 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302414 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302415 tid->state &= ~AGGR_ADDBA_COMPLETE;
2416 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302417 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418
Sujith8ee5afb2008-12-07 21:43:36 +05302419 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302420 acno < WME_NUM_AC; acno++, ac++) {
2421 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002422 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302423 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424 }
2425}
2426
Sujithb5aa9bf2008-10-29 10:13:31 +05302427void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428{
Felix Fietkau2b409942010-07-07 19:42:08 +02002429 struct ath_atx_ac *ac;
2430 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002432 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302433
Felix Fietkau2b409942010-07-07 19:42:08 +02002434 for (tidno = 0, tid = &an->tid[tidno];
2435 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002436
Felix Fietkau2b409942010-07-07 19:42:08 +02002437 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002438 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002439
Felix Fietkau2b409942010-07-07 19:42:08 +02002440 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441
Felix Fietkau2b409942010-07-07 19:42:08 +02002442 if (tid->sched) {
2443 list_del(&tid->list);
2444 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002445 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002446
2447 if (ac->sched) {
2448 list_del(&ac->list);
2449 tid->ac->sched = false;
2450 }
2451
2452 ath_tid_drain(sc, txq, tid);
2453 tid->state &= ~AGGR_ADDBA_COMPLETE;
2454 tid->state &= ~AGGR_CLEANUP;
2455
2456 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002457 }
2458}