blob: 649a11e52030b03a9e69468ed98aa083a86449b9 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +010056 struct ath_tx_status *ts, int txok);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Felix Fietkau156369f2011-12-14 22:08:04 +0100153static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
154{
155 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
156 seqno << IEEE80211_SEQ_SEQ_SHIFT);
157}
158
Sujithe8324352009-01-16 21:38:42 +0530159static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
160{
Felix Fietkau066dae92010-11-07 14:59:39 +0100161 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200162 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530163 struct ath_buf *bf;
164 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 struct ath_frame_info *fi;
Felix Fietkau156369f2011-12-14 22:08:04 +0100167 bool sendbar = false;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200168
Sujithe8324352009-01-16 21:38:42 +0530169 INIT_LIST_HEAD(&bf_head);
170
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530172 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530173
Felix Fietkau56dc6332011-08-28 00:32:22 +0200174 while ((skb = __skb_dequeue(&tid->buf_q))) {
175 fi = get_frame_info(skb);
176 bf = fi->bf;
177
Felix Fietkaue1566d12010-11-20 03:08:46 +0100178 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200179 if (bf && fi->retries) {
180 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200181 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100182 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
183 sendbar = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200184 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200185 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200186 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100187 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530188 }
189
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500190 if (tid->baw_head == tid->baw_tail) {
191 tid->state &= ~AGGR_ADDBA_COMPLETE;
192 tid->state &= ~AGGR_CLEANUP;
193 }
194
Sujithe8324352009-01-16 21:38:42 +0530195 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau156369f2011-12-14 22:08:04 +0100196
197 if (sendbar)
198 ath_send_bar(tid, tid->seq_start);
Sujithe8324352009-01-16 21:38:42 +0530199}
200
201static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
202 int seqno)
203{
204 int index, cindex;
205
206 index = ATH_BA_INDEX(tid->seq_start, seqno);
207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
208
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200209 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530210
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200211 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530212 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
213 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
214 }
215}
216
217static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100218 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530219{
220 int index, cindex;
221
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100222 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530223 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200224 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530225
226 if (index >= ((tid->baw_tail - tid->baw_head) &
227 (ATH_TID_MAX_BUFS - 1))) {
228 tid->baw_tail = cindex;
229 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
230 }
231}
232
233/*
234 * TODO: For frame(s) that are in the retry state, we will reuse the
235 * sequence number(s) without setting the retry bit. The
236 * alternative is to give up on these and BAR the receiver's window
237 * forward.
238 */
239static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
240 struct ath_atx_tid *tid)
241
242{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200243 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530244 struct ath_buf *bf;
245 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700246 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100247 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700248
249 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530250 INIT_LIST_HEAD(&bf_head);
251
Felix Fietkau56dc6332011-08-28 00:32:22 +0200252 while ((skb = __skb_dequeue(&tid->buf_q))) {
253 fi = get_frame_info(skb);
254 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530255
Felix Fietkau44f1d262011-08-28 00:32:25 +0200256 if (!bf) {
257 spin_unlock(&txq->axq_lock);
258 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
259 spin_lock(&txq->axq_lock);
260 continue;
261 }
262
Felix Fietkau56dc6332011-08-28 00:32:22 +0200263 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530264
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100265 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200266 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530267
268 spin_unlock(&txq->axq_lock);
Felix Fietkau156369f2011-12-14 22:08:04 +0100269 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +0530270 spin_lock(&txq->axq_lock);
271 }
272
273 tid->seq_next = tid->seq_start;
274 tid->baw_tail = tid->baw_head;
275}
276
Sujithfec247c2009-07-27 12:08:16 +0530277static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100278 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530279{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100280 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200281 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530282 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100283 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530284
Sujithfec247c2009-07-27 12:08:16 +0530285 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100286 fi->retries += count;
287
288 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100289 return;
Sujithe8324352009-01-16 21:38:42 +0530290
Sujithe8324352009-01-16 21:38:42 +0530291 hdr = (struct ieee80211_hdr *)skb->data;
292 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200293 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
294 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530295}
296
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200297static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
298{
299 struct ath_buf *bf = NULL;
300
301 spin_lock_bh(&sc->tx.txbuflock);
302
303 if (unlikely(list_empty(&sc->tx.txbuf))) {
304 spin_unlock_bh(&sc->tx.txbuflock);
305 return NULL;
306 }
307
308 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
309 list_del(&bf->list);
310
311 spin_unlock_bh(&sc->tx.txbuflock);
312
313 return bf;
314}
315
316static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
317{
318 spin_lock_bh(&sc->tx.txbuflock);
319 list_add_tail(&bf->list, &sc->tx.txbuf);
320 spin_unlock_bh(&sc->tx.txbuflock);
321}
322
Sujithd43f30152009-01-16 21:38:53 +0530323static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
324{
325 struct ath_buf *tbf;
326
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200327 tbf = ath_tx_get_buffer(sc);
328 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530329 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530330
331 ATH_TXBUF_RESET(tbf);
332
333 tbf->bf_mpdu = bf->bf_mpdu;
334 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400335 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530336 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530337
338 return tbf;
339}
340
Felix Fietkaub572d032010-11-14 15:20:07 +0100341static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
342 struct ath_tx_status *ts, int txok,
343 int *nframes, int *nbad)
344{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100345 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100346 u16 seq_st = 0;
347 u32 ba[WME_BA_BMP_SIZE >> 5];
348 int ba_index;
349 int isaggr = 0;
350
351 *nbad = 0;
352 *nframes = 0;
353
Felix Fietkaub572d032010-11-14 15:20:07 +0100354 isaggr = bf_isaggr(bf);
355 if (isaggr) {
356 seq_st = ts->ts_seqnum;
357 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
358 }
359
360 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100361 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200362 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100363
364 (*nframes)++;
365 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
366 (*nbad)++;
367
368 bf = bf->bf_next;
369 }
370}
371
372
Sujithd43f30152009-01-16 21:38:53 +0530373static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
374 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100375 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530376{
377 struct ath_node *an = NULL;
378 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530379 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100380 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530381 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800382 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530383 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530384 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200385 struct list_head bf_head;
386 struct sk_buff_head bf_pending;
Felix Fietkau156369f2011-12-14 22:08:04 +0100387 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
Sujithe8324352009-01-16 21:38:42 +0530388 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530389 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
390 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200391 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100392 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200393 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200395 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100396 int i, retries;
Felix Fietkau156369f2011-12-14 22:08:04 +0100397 int bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530398
Sujitha22be222009-03-30 15:28:36 +0530399 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530400 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530401
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800402 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800403
Felix Fietkau78c46532010-06-25 01:26:16 +0200404 memcpy(rates, tx_info->control.rates, sizeof(rates));
405
Felix Fietkauda647622011-12-14 22:08:03 +0100406 retries = ts->ts_longretry + 1;
407 for (i = 0; i < ts->ts_rateindex; i++)
408 retries += rates[i].count;
409
Sujith1286ec62009-01-27 13:30:37 +0530410 rcu_read_lock();
411
Ben Greear686b9cb2010-09-23 09:44:36 -0700412 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530413 if (!sta) {
414 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200415
Felix Fietkau31e79a52010-07-12 23:16:34 +0200416 INIT_LIST_HEAD(&bf_head);
417 while (bf) {
418 bf_next = bf->bf_next;
419
Felix Fietkaufce041b2011-05-19 12:20:25 +0200420 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200421 list_move_tail(&bf->list, &bf_head);
422
Felix Fietkau156369f2011-12-14 22:08:04 +0100423 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200424
425 bf = bf_next;
426 }
Sujith1286ec62009-01-27 13:30:37 +0530427 return;
Sujithe8324352009-01-16 21:38:42 +0530428 }
429
Sujith1286ec62009-01-27 13:30:37 +0530430 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100431 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
432 tid = ATH_AN_2_TID(an, tidno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100433 seq_first = tid->seq_start;
Sujith1286ec62009-01-27 13:30:37 +0530434
Felix Fietkaub11b1602010-07-11 12:48:44 +0200435 /*
436 * The hardware occasionally sends a tx status for the wrong TID.
437 * In this case, the BA status cannot be considered valid and all
438 * subframes need to be retransmitted
439 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100440 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200441 txok = false;
442
Sujithe8324352009-01-16 21:38:42 +0530443 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530444 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530445
Sujithd43f30152009-01-16 21:38:53 +0530446 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700447 if (ts->ts_flags & ATH9K_TX_BA) {
448 seq_st = ts->ts_seqnum;
449 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530450 } else {
Sujithd43f30152009-01-16 21:38:53 +0530451 /*
452 * AR5416 can become deaf/mute when BA
453 * issue happens. Chip needs to be reset.
454 * But AP code may have sychronization issues
455 * when perform internal reset in this routine.
456 * Only enable reset in STA mode for now.
457 */
Sujith2660b812009-02-09 13:27:26 +0530458 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530459 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530460 }
461 }
462
Felix Fietkau56dc6332011-08-28 00:32:22 +0200463 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530464
Felix Fietkaub572d032010-11-14 15:20:07 +0100465 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530466 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200467 u16 seqno = bf->bf_state.seqno;
468
Felix Fietkauf0b82202011-01-15 14:30:15 +0100469 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530470 bf_next = bf->bf_next;
471
Felix Fietkau78c46532010-06-25 01:26:16 +0200472 skb = bf->bf_mpdu;
473 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100474 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200475
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200476 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530477 /* transmit completion, subframe is
478 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530479 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530480 } else if (!isaggr && txok) {
481 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530482 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530483 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200484 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530485 /*
486 * cleanup in progress, just fail
487 * the un-acked sub-frames
488 */
489 txfail = 1;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200490 } else if (flush) {
491 txpending = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200492 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
Felix Fietkau26a64252011-10-07 02:28:14 +0200493 if (txok || !an->sleeping)
Felix Fietkauda647622011-12-14 22:08:03 +0100494 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
495 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200496
Felix Fietkau55195412011-04-17 23:28:09 +0200497 txpending = 1;
498 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200499 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200500 txfail_cnt++;
Felix Fietkau156369f2011-12-14 22:08:04 +0100501 bar_index = max_t(int, bar_index,
502 ATH_BA_INDEX(seq_first, seqno));
Sujithe8324352009-01-16 21:38:42 +0530503 }
504 }
505
Felix Fietkaufce041b2011-05-19 12:20:25 +0200506 /*
507 * Make sure the last desc is reclaimed if it
508 * not a holding desc.
509 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200510 INIT_LIST_HEAD(&bf_head);
511 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
512 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530513 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530514
Felix Fietkau90fa5392010-09-20 13:45:38 +0200515 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530516 /*
517 * complete the acked-ones/xretried ones; update
518 * block-ack window
519 */
520 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200521 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530522 spin_unlock_bh(&txq->axq_lock);
523
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530524 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200525 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200526 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530527 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530528 }
529
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700530 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
Felix Fietkau156369f2011-12-14 22:08:04 +0100531 !txfail);
Sujithe8324352009-01-16 21:38:42 +0530532 } else {
Sujithd43f30152009-01-16 21:38:53 +0530533 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400534 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
535 if (bf->bf_next == NULL && bf_last->bf_stale) {
536 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530537
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400538 tbf = ath_clone_txbuf(sc, bf_last);
539 /*
540 * Update tx baw and complete the
541 * frame with failed status if we
542 * run out of tx buf.
543 */
544 if (!tbf) {
545 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200546 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400547 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400548
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400549 ath_tx_complete_buf(sc, bf, txq,
550 &bf_head,
Felix Fietkau156369f2011-12-14 22:08:04 +0100551 ts, 0);
552 bar_index = max_t(int, bar_index,
553 ATH_BA_INDEX(seq_first,
554 seqno));
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400555 break;
556 }
557
Felix Fietkau56dc6332011-08-28 00:32:22 +0200558 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400559 }
Sujithe8324352009-01-16 21:38:42 +0530560 }
561
562 /*
563 * Put this buffer to the temporary pending
564 * queue to retain ordering
565 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200566 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530567 }
568
569 bf = bf_next;
570 }
571
Felix Fietkau156369f2011-12-14 22:08:04 +0100572 if (bar_index >= 0)
573 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
574
Felix Fietkau4cee7862010-07-23 03:53:16 +0200575 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200576 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200577 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200578 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200579
Felix Fietkau4cee7862010-07-23 03:53:16 +0200580 spin_lock_bh(&txq->axq_lock);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200581 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200582 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600583 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200584
585 if (ts->ts_status & ATH9K_TXERR_FILT)
586 tid->ac->clear_ps_filter = true;
587 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200588 spin_unlock_bh(&txq->axq_lock);
589 }
590
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500591 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200592 ath_tx_flush_tid(sc, tid);
593
Sujith1286ec62009-01-27 13:30:37 +0530594 rcu_read_unlock();
595
Felix Fietkau030d6292011-10-07 02:28:13 +0200596 if (needreset) {
597 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200598 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200599 }
Sujithe8324352009-01-16 21:38:42 +0530600}
601
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530602static bool ath_lookup_legacy(struct ath_buf *bf)
603{
604 struct sk_buff *skb;
605 struct ieee80211_tx_info *tx_info;
606 struct ieee80211_tx_rate *rates;
607 int i;
608
609 skb = bf->bf_mpdu;
610 tx_info = IEEE80211_SKB_CB(skb);
611 rates = tx_info->control.rates;
612
Felix Fietkau059ee092011-08-27 10:25:27 +0200613 for (i = 0; i < 4; i++) {
614 if (!rates[i].count || rates[i].idx < 0)
615 break;
616
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530617 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
618 return true;
619 }
620
621 return false;
622}
623
Sujithe8324352009-01-16 21:38:42 +0530624static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
625 struct ath_atx_tid *tid)
626{
Sujithe8324352009-01-16 21:38:42 +0530627 struct sk_buff *skb;
628 struct ieee80211_tx_info *tx_info;
629 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530630 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530631 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530632 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530633 int i;
634
Sujitha22be222009-03-30 15:28:36 +0530635 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530636 tx_info = IEEE80211_SKB_CB(skb);
637 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530638
639 /*
640 * Find the lowest frame length among the rate series that will have a
641 * 4ms transmit duration.
642 * TODO - TXOP limit needs to be considered.
643 */
644 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
645
646 for (i = 0; i < 4; i++) {
647 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100648 int modeidx;
649 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530650 legacy = 1;
651 break;
652 }
653
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200654 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100655 modeidx = MCS_HT40;
656 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200657 modeidx = MCS_HT20;
658
659 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
660 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100661
662 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530663 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530664 }
665 }
666
667 /*
668 * limit aggregate size by the minimum rate if rate selected is
669 * not a probe rate, if rate selected is a probe rate then
670 * avoid aggregation of this packet.
671 */
672 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
673 return 0;
674
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530675 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
676 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
677 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530678 aggr_limit = min((max_4ms_framelen * 3) / 8,
679 (u32)ATH_AMPDU_LIMIT_MAX);
680 else
681 aggr_limit = min(max_4ms_framelen,
682 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530683
684 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300685 * h/w can accept aggregates up to 16 bit lengths (65535).
686 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530687 * as zero. Ignore 65536 since we are constrained by hw.
688 */
Sujith4ef70842009-07-23 15:32:41 +0530689 if (tid->an->maxampdu)
690 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530691
692 return aggr_limit;
693}
694
695/*
Sujithd43f30152009-01-16 21:38:53 +0530696 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530697 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530698 */
699static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530700 struct ath_buf *bf, u16 frmlen,
701 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530702{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530703#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530704 struct sk_buff *skb = bf->bf_mpdu;
705 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530706 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530707 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100708 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200709 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100710 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530711
712 /* Select standard number of delimiters based on frame length alone */
713 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
714
715 /*
716 * If encryption enabled, hardware requires some more padding between
717 * subframes.
718 * TODO - this could be improved to be dependent on the rate.
719 * The hardware can keep up at lower rates, but not higher rates
720 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530721 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
722 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530723 ndelim += ATH_AGGR_ENCRYPTDELIM;
724
725 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530726 * Add delimiter when using RTS/CTS with aggregation
727 * and non enterprise AR9003 card
728 */
Felix Fietkau34597312011-08-29 18:57:54 +0200729 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
730 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530731 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
732
733 /*
Sujithe8324352009-01-16 21:38:42 +0530734 * Convert desired mpdu density from microeconds to bytes based
735 * on highest rate in rate series (i.e. first rate) to determine
736 * required minimum length for subframe. Take into account
737 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530738 *
Sujithe8324352009-01-16 21:38:42 +0530739 * If there is no mpdu density restriction, no further calculation
740 * is needed.
741 */
Sujith4ef70842009-07-23 15:32:41 +0530742
743 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530744 return ndelim;
745
746 rix = tx_info->control.rates[0].idx;
747 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530748 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
749 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
750
751 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530752 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530753 else
Sujith4ef70842009-07-23 15:32:41 +0530754 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530755
756 if (nsymbols == 0)
757 nsymbols = 1;
758
Felix Fietkauc6663872010-04-19 19:57:33 +0200759 streams = HT_RC_2_STREAMS(rix);
760 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530761 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
762
Sujithe8324352009-01-16 21:38:42 +0530763 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530764 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
765 ndelim = max(mindelim, ndelim);
766 }
767
768 return ndelim;
769}
770
771static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530772 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530773 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100774 struct list_head *bf_q,
775 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530776{
777#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200778 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530779 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530780 u16 aggr_limit = 0, al = 0, bpad = 0,
781 al_delta, h_baw = tid->baw_size / 2;
782 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200783 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100784 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200785 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200786 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530787
788 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200789 skb = skb_peek(&tid->buf_q);
790 fi = get_frame_info(skb);
791 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200792 if (!fi->bf)
793 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200794
Felix Fietkau44f1d262011-08-28 00:32:25 +0200795 if (!bf)
796 continue;
797
Felix Fietkau399c6482011-09-14 21:24:17 +0200798 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200799 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200800 if (!bf_first)
801 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530802
Sujithd43f30152009-01-16 21:38:53 +0530803 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200804 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530805 status = ATH_AGGR_BAW_CLOSED;
806 break;
807 }
808
809 if (!rl) {
810 aggr_limit = ath_lookup_rate(sc, bf, tid);
811 rl = 1;
812 }
813
Sujithd43f30152009-01-16 21:38:53 +0530814 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100815 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530816
Sujithd43f30152009-01-16 21:38:53 +0530817 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530818 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
819 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530820 status = ATH_AGGR_LIMITED;
821 break;
822 }
823
Felix Fietkau0299a502010-10-21 02:47:24 +0200824 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200825 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200826 break;
827
Sujithd43f30152009-01-16 21:38:53 +0530828 /* do not exceed subframe limit */
829 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530830 status = ATH_AGGR_LIMITED;
831 break;
832 }
833
Sujithd43f30152009-01-16 21:38:53 +0530834 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530835 al += bpad + al_delta;
836
837 /*
838 * Get the delimiters needed to meet the MPDU
839 * density for this node.
840 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530841 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
842 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530843 bpad = PADBYTES(al_delta) + (ndelim << 2);
844
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530845 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530846 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530847
Sujithd43f30152009-01-16 21:38:53 +0530848 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100849 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200850 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200851 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200852
853 __skb_unlink(skb, &tid->buf_q);
854 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200855 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530856 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200857
Sujithe8324352009-01-16 21:38:42 +0530858 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530859
Felix Fietkau56dc6332011-08-28 00:32:22 +0200860 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530861
Felix Fietkau269c44b2010-11-14 15:20:06 +0100862 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530863
Sujithe8324352009-01-16 21:38:42 +0530864 return status;
865#undef PADBYTES
866}
867
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200868/*
869 * rix - rate index
870 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
871 * width - 0 for 20 MHz, 1 for 40 MHz
872 * half_gi - to use 4us v/s 3.6 us for symbol time
873 */
874static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
875 int width, int half_gi, bool shortPreamble)
876{
877 u32 nbits, nsymbits, duration, nsymbols;
878 int streams;
879
880 /* find number of symbols: PLCP + data */
881 streams = HT_RC_2_STREAMS(rix);
882 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
883 nsymbits = bits_per_symbol[rix % 8][width] * streams;
884 nsymbols = (nbits + nsymbits - 1) / nsymbits;
885
886 if (!half_gi)
887 duration = SYMBOL_TIME(nsymbols);
888 else
889 duration = SYMBOL_TIME_HALFGI(nsymbols);
890
891 /* addup duration for legacy/ht training and signal fields */
892 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
893
894 return duration;
895}
896
Felix Fietkau493cf042011-09-14 21:24:22 +0200897static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
898 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200899{
900 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200901 struct sk_buff *skb;
902 struct ieee80211_tx_info *tx_info;
903 struct ieee80211_tx_rate *rates;
904 const struct ieee80211_rate *rate;
905 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200906 int i;
907 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200908
909 skb = bf->bf_mpdu;
910 tx_info = IEEE80211_SKB_CB(skb);
911 rates = tx_info->control.rates;
912 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200913
914 /* set dur_update_en for l-sig computation except for PS-Poll frames */
915 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200916
917 /*
918 * We check if Short Preamble is needed for the CTS rate by
919 * checking the BSS's global flag.
920 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
921 */
922 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200923 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200924 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200925 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200926
927 for (i = 0; i < 4; i++) {
928 bool is_40, is_sgi, is_sp;
929 int phy;
930
931 if (!rates[i].count || (rates[i].idx < 0))
932 continue;
933
934 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200935 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200936
937 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200938 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
939 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200940 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200941 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
942 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200943 }
944
945 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200946 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200947 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200948 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200949
950 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
951 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
952 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
953
954 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
955 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200956 info->rates[i].Rate = rix | 0x80;
957 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
958 ah->txchainmask, info->rates[i].Rate);
959 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200960 is_40, is_sgi, is_sp);
961 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200962 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200963 continue;
964 }
965
966 /* legacy rates */
967 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
968 !(rate->flags & IEEE80211_RATE_ERP_G))
969 phy = WLAN_RC_PHY_CCK;
970 else
971 phy = WLAN_RC_PHY_OFDM;
972
973 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200974 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200975 if (rate->hw_value_short) {
976 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200977 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200978 } else {
979 is_sp = false;
980 }
981
982 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200983 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200984 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200985 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
986 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200987
Felix Fietkau493cf042011-09-14 21:24:22 +0200988 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200989 phy, rate->bitrate * 100, len, rix, is_sp);
990 }
991
992 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
993 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200994 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200995
996 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200997 if (info->flags & ATH9K_TXDESC_RTSENA)
998 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200999}
1000
Felix Fietkau493cf042011-09-14 21:24:22 +02001001static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1002{
1003 struct ieee80211_hdr *hdr;
1004 enum ath9k_pkt_type htype;
1005 __le16 fc;
1006
1007 hdr = (struct ieee80211_hdr *)skb->data;
1008 fc = hdr->frame_control;
1009
1010 if (ieee80211_is_beacon(fc))
1011 htype = ATH9K_PKT_TYPE_BEACON;
1012 else if (ieee80211_is_probe_resp(fc))
1013 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1014 else if (ieee80211_is_atim(fc))
1015 htype = ATH9K_PKT_TYPE_ATIM;
1016 else if (ieee80211_is_pspoll(fc))
1017 htype = ATH9K_PKT_TYPE_PSPOLL;
1018 else
1019 htype = ATH9K_PKT_TYPE_NORMAL;
1020
1021 return htype;
1022}
1023
1024static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1025 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001026{
1027 struct ath_hw *ah = sc->sc_ah;
1028 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1029 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001030 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001031 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001032
Felix Fietkau493cf042011-09-14 21:24:22 +02001033 memset(&info, 0, sizeof(info));
1034 info.is_first = true;
1035 info.is_last = true;
1036 info.txpower = MAX_RATE_POWER;
1037 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001038
Felix Fietkau493cf042011-09-14 21:24:22 +02001039 info.flags = ATH9K_TXDESC_INTREQ;
1040 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1041 info.flags |= ATH9K_TXDESC_NOACK;
1042 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1043 info.flags |= ATH9K_TXDESC_LDPC;
1044
1045 ath_buf_set_rate(sc, bf, &info, len);
1046
1047 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1048 info.flags |= ATH9K_TXDESC_CLRDMASK;
1049
1050 if (bf->bf_state.bfs_paprd)
1051 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1052
Felix Fietkau399c6482011-09-14 21:24:17 +02001053
1054 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001055 struct sk_buff *skb = bf->bf_mpdu;
1056 struct ath_frame_info *fi = get_frame_info(skb);
1057
1058 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001059 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001060 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001061 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001062 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001063
John W. Linville42cecc32011-09-19 15:42:31 -04001064 info.buf_addr[0] = bf->bf_buf_addr;
1065 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001066 info.pkt_len = fi->framelen;
1067 info.keyix = fi->keyix;
1068 info.keytype = fi->keytype;
1069
1070 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001071 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001072 info.aggr = AGGR_BUF_FIRST;
1073 else if (!bf->bf_next)
1074 info.aggr = AGGR_BUF_LAST;
1075 else
1076 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001077
Felix Fietkau493cf042011-09-14 21:24:22 +02001078 info.ndelim = bf->bf_state.ndelim;
1079 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001080 }
1081
Felix Fietkau493cf042011-09-14 21:24:22 +02001082 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001083 bf = bf->bf_next;
1084 }
1085}
1086
Sujithe8324352009-01-16 21:38:42 +05301087static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1088 struct ath_atx_tid *tid)
1089{
Sujithd43f30152009-01-16 21:38:53 +05301090 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301091 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001092 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301093 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001094 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301095
1096 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001097 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301098 return;
1099
1100 INIT_LIST_HEAD(&bf_q);
1101
Felix Fietkau269c44b2010-11-14 15:20:06 +01001102 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301103
1104 /*
Sujithd43f30152009-01-16 21:38:53 +05301105 * no frames picked up to be aggregated;
1106 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301107 */
1108 if (list_empty(&bf_q))
1109 break;
1110
1111 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301112 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001113 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301114
Felix Fietkau55195412011-04-17 23:28:09 +02001115 if (tid->ac->clear_ps_filter) {
1116 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001117 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1118 } else {
1119 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001120 }
1121
Sujithd43f30152009-01-16 21:38:53 +05301122 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001123 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001124 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1125 bf->bf_state.bf_type = BUF_AMPDU;
1126 } else {
1127 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301128 }
1129
Felix Fietkau493cf042011-09-14 21:24:22 +02001130 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001131 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001132 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301133 status != ATH_AGGR_BAW_CLOSED);
1134}
1135
Felix Fietkau231c3a12010-09-20 19:35:28 +02001136int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1137 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301138{
1139 struct ath_atx_tid *txtid;
1140 struct ath_node *an;
1141
1142 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301143 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001144
1145 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1146 return -EAGAIN;
1147
Sujithf83da962009-07-23 15:32:37 +05301148 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001149 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001150 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001151
Felix Fietkau2ed72222011-01-10 17:05:49 -07001152 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1153 txtid->baw_head = txtid->baw_tail = 0;
1154
Felix Fietkau231c3a12010-09-20 19:35:28 +02001155 return 0;
Sujithe8324352009-01-16 21:38:42 +05301156}
1157
Sujithf83da962009-07-23 15:32:37 +05301158void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301159{
1160 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1161 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001162 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301163
1164 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301165 return;
Sujithe8324352009-01-16 21:38:42 +05301166
1167 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301168 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301169 return;
Sujithe8324352009-01-16 21:38:42 +05301170 }
1171
Sujithe8324352009-01-16 21:38:42 +05301172 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001173 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001174
1175 /*
1176 * If frames are still being transmitted for this TID, they will be
1177 * cleaned up during tx completion. To prevent race conditions, this
1178 * TID can only be reused after all in-progress subframes have been
1179 * completed.
1180 */
1181 if (txtid->baw_head != txtid->baw_tail)
1182 txtid->state |= AGGR_CLEANUP;
1183 else
1184 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301185 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301186
Felix Fietkau90fa5392010-09-20 13:45:38 +02001187 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301188}
1189
Johannes Berg042ec452011-09-29 16:04:26 +02001190void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1191 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001192{
1193 struct ath_atx_tid *tid;
1194 struct ath_atx_ac *ac;
1195 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001196 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001197 int tidno;
1198
1199 for (tidno = 0, tid = &an->tid[tidno];
1200 tidno < WME_NUM_TID; tidno++, tid++) {
1201
1202 if (!tid->sched)
1203 continue;
1204
1205 ac = tid->ac;
1206 txq = ac->txq;
1207
1208 spin_lock_bh(&txq->axq_lock);
1209
Johannes Berg042ec452011-09-29 16:04:26 +02001210 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001211
1212 tid->sched = false;
1213 list_del(&tid->list);
1214
1215 if (ac->sched) {
1216 ac->sched = false;
1217 list_del(&ac->list);
1218 }
1219
1220 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001221
Johannes Berg042ec452011-09-29 16:04:26 +02001222 ieee80211_sta_set_buffered(sta, tidno, buffered);
1223 }
Felix Fietkau55195412011-04-17 23:28:09 +02001224}
1225
1226void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1227{
1228 struct ath_atx_tid *tid;
1229 struct ath_atx_ac *ac;
1230 struct ath_txq *txq;
1231 int tidno;
1232
1233 for (tidno = 0, tid = &an->tid[tidno];
1234 tidno < WME_NUM_TID; tidno++, tid++) {
1235
1236 ac = tid->ac;
1237 txq = ac->txq;
1238
1239 spin_lock_bh(&txq->axq_lock);
1240 ac->clear_ps_filter = true;
1241
Felix Fietkau56dc6332011-08-28 00:32:22 +02001242 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001243 ath_tx_queue_tid(txq, tid);
1244 ath_txq_schedule(sc, txq);
1245 }
1246
1247 spin_unlock_bh(&txq->axq_lock);
1248 }
1249}
1250
Sujithe8324352009-01-16 21:38:42 +05301251void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1252{
1253 struct ath_atx_tid *txtid;
1254 struct ath_node *an;
1255
1256 an = (struct ath_node *)sta->drv_priv;
1257
1258 if (sc->sc_flags & SC_OP_TXAGGR) {
1259 txtid = ATH_AN_2_TID(an, tid);
1260 txtid->baw_size =
1261 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1262 txtid->state |= AGGR_ADDBA_COMPLETE;
1263 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1264 ath_tx_resume_tid(sc, txtid);
1265 }
1266}
1267
Sujithe8324352009-01-16 21:38:42 +05301268/********************/
1269/* Queue Management */
1270/********************/
1271
Sujithe8324352009-01-16 21:38:42 +05301272static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1273 struct ath_txq *txq)
1274{
1275 struct ath_atx_ac *ac, *ac_tmp;
1276 struct ath_atx_tid *tid, *tid_tmp;
1277
1278 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1279 list_del(&ac->list);
1280 ac->sched = false;
1281 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1282 list_del(&tid->list);
1283 tid->sched = false;
1284 ath_tid_drain(sc, txq, tid);
1285 }
1286 }
1287}
1288
1289struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1290{
Sujithcbe61d82009-02-09 13:27:12 +05301291 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301292 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001293 static const int subtype_txq_to_hwq[] = {
1294 [WME_AC_BE] = ATH_TXQ_AC_BE,
1295 [WME_AC_BK] = ATH_TXQ_AC_BK,
1296 [WME_AC_VI] = ATH_TXQ_AC_VI,
1297 [WME_AC_VO] = ATH_TXQ_AC_VO,
1298 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001299 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301300
1301 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001302 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301303 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1304 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1305 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1306 qi.tqi_physCompBuf = 0;
1307
1308 /*
1309 * Enable interrupts only for EOL and DESC conditions.
1310 * We mark tx descriptors to receive a DESC interrupt
1311 * when a tx queue gets deep; otherwise waiting for the
1312 * EOL to reap descriptors. Note that this is done to
1313 * reduce interrupt load and this only defers reaping
1314 * descriptors, never transmitting frames. Aside from
1315 * reducing interrupts this also permits more concurrency.
1316 * The only potential downside is if the tx queue backs
1317 * up in which case the top half of the kernel may backup
1318 * due to a lack of tx descriptors.
1319 *
1320 * The UAPSD queue is an exception, since we take a desc-
1321 * based intr on the EOSP frames.
1322 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001323 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1324 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1325 TXQ_FLAG_TXERRINT_ENABLE;
1326 } else {
1327 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1328 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1329 else
1330 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1331 TXQ_FLAG_TXDESCINT_ENABLE;
1332 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001333 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1334 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301335 /*
1336 * NB: don't print a message, this happens
1337 * normally on parts with too few tx queues
1338 */
1339 return NULL;
1340 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001341 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1342 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301343
Ben Greear60f2d1d2011-01-09 23:11:52 -08001344 txq->axq_qnum = axq_qnum;
1345 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301346 txq->axq_link = NULL;
1347 INIT_LIST_HEAD(&txq->axq_q);
1348 INIT_LIST_HEAD(&txq->axq_acq);
1349 spin_lock_init(&txq->axq_lock);
1350 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001351 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001352 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001353 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001354
1355 txq->txq_headidx = txq->txq_tailidx = 0;
1356 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1357 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301358 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001359 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301360}
1361
Sujithe8324352009-01-16 21:38:42 +05301362int ath_txq_update(struct ath_softc *sc, int qnum,
1363 struct ath9k_tx_queue_info *qinfo)
1364{
Sujithcbe61d82009-02-09 13:27:12 +05301365 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301366 int error = 0;
1367 struct ath9k_tx_queue_info qi;
1368
1369 if (qnum == sc->beacon.beaconq) {
1370 /*
1371 * XXX: for beacon queue, we just save the parameter.
1372 * It will be picked up by ath_beaconq_config when
1373 * it's necessary.
1374 */
1375 sc->beacon.beacon_qi = *qinfo;
1376 return 0;
1377 }
1378
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001379 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301380
1381 ath9k_hw_get_txq_props(ah, qnum, &qi);
1382 qi.tqi_aifs = qinfo->tqi_aifs;
1383 qi.tqi_cwmin = qinfo->tqi_cwmin;
1384 qi.tqi_cwmax = qinfo->tqi_cwmax;
1385 qi.tqi_burstTime = qinfo->tqi_burstTime;
1386 qi.tqi_readyTime = qinfo->tqi_readyTime;
1387
1388 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001389 ath_err(ath9k_hw_common(sc->sc_ah),
1390 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301391 error = -EIO;
1392 } else {
1393 ath9k_hw_resettxqueue(ah, qnum);
1394 }
1395
1396 return error;
1397}
1398
1399int ath_cabq_update(struct ath_softc *sc)
1400{
1401 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001402 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301403 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301404
1405 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1406 /*
1407 * Ensure the readytime % is within the bounds.
1408 */
Sujith17d79042009-02-09 13:27:03 +05301409 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1410 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1411 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1412 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301413
Steve Brown9814f6b2011-02-07 17:10:39 -07001414 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301415 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301416 ath_txq_update(sc, qnum, &qi);
1417
1418 return 0;
1419}
1420
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001421static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1422{
1423 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1424 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1425}
1426
Felix Fietkaufce041b2011-05-19 12:20:25 +02001427static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1428 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301429 __releases(txq->axq_lock)
1430 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301431{
1432 struct ath_buf *bf, *lastbf;
1433 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001434 struct ath_tx_status ts;
1435
1436 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001437 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301438 INIT_LIST_HEAD(&bf_head);
1439
Felix Fietkaufce041b2011-05-19 12:20:25 +02001440 while (!list_empty(list)) {
1441 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301442
Felix Fietkaufce041b2011-05-19 12:20:25 +02001443 if (bf->bf_stale) {
1444 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301445
Felix Fietkaufce041b2011-05-19 12:20:25 +02001446 ath_tx_return_buffer(sc, bf);
1447 continue;
Sujithe8324352009-01-16 21:38:42 +05301448 }
1449
1450 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001451 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001452
Sujithe8324352009-01-16 21:38:42 +05301453 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001454 if (bf_is_ampdu_not_probing(bf))
1455 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301456
Felix Fietkaufce041b2011-05-19 12:20:25 +02001457 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301458 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001459 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1460 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301461 else
Felix Fietkau156369f2011-12-14 22:08:04 +01001462 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001463 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001464 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001465}
1466
1467/*
1468 * Drain a given TX queue (could be Beacon or Data)
1469 *
1470 * This assumes output has been stopped and
1471 * we do not need to block ath_tx_tasklet.
1472 */
1473void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1474{
1475 spin_lock_bh(&txq->axq_lock);
1476 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1477 int idx = txq->txq_tailidx;
1478
1479 while (!list_empty(&txq->txq_fifo[idx])) {
1480 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1481 retry_tx);
1482
1483 INCR(idx, ATH_TXFIFO_DEPTH);
1484 }
1485 txq->txq_tailidx = idx;
1486 }
1487
1488 txq->axq_link = NULL;
1489 txq->axq_tx_inprogress = false;
1490 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001491
1492 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001493 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1494 ath_txq_drain_pending_buffers(sc, txq);
1495
1496 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301497}
1498
Felix Fietkau080e1a22010-12-05 20:17:53 +01001499bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301500{
Sujithcbe61d82009-02-09 13:27:12 +05301501 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001502 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301503 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001504 int i;
1505 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301506
1507 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001508 return true;
Sujith043a0402009-01-16 21:38:47 +05301509
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001510 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301511
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001512 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301513 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001514 if (!ATH_TXQ_SETUP(sc, i))
1515 continue;
1516
Felix Fietkau34d25812011-10-07 02:28:12 +02001517 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1518 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301519 }
1520
Felix Fietkau080e1a22010-12-05 20:17:53 +01001521 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001522 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301523
1524 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001525 if (!ATH_TXQ_SETUP(sc, i))
1526 continue;
1527
1528 /*
1529 * The caller will resume queues with ieee80211_wake_queues.
1530 * Mark the queue as not stopped to prevent ath_tx_complete
1531 * from waking the queue too early.
1532 */
1533 txq = &sc->tx.txq[i];
1534 txq->stopped = false;
1535 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301536 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001537
1538 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301539}
1540
Sujithe8324352009-01-16 21:38:42 +05301541void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1542{
1543 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1544 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1545}
1546
Ben Greear7755bad2011-01-18 17:30:00 -08001547/* For each axq_acq entry, for each tid, try to schedule packets
1548 * for transmit until ampdu_depth has reached min Q depth.
1549 */
Sujithe8324352009-01-16 21:38:42 +05301550void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1551{
Ben Greear7755bad2011-01-18 17:30:00 -08001552 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1553 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301554
Felix Fietkau236de512011-09-03 01:40:25 +02001555 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001556 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301557 return;
1558
1559 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001560 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301561
Ben Greear7755bad2011-01-18 17:30:00 -08001562 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1563 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1564 list_del(&ac->list);
1565 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301566
Ben Greear7755bad2011-01-18 17:30:00 -08001567 while (!list_empty(&ac->tid_q)) {
1568 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1569 list);
1570 list_del(&tid->list);
1571 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301572
Ben Greear7755bad2011-01-18 17:30:00 -08001573 if (tid->paused)
1574 continue;
Sujithe8324352009-01-16 21:38:42 +05301575
Ben Greear7755bad2011-01-18 17:30:00 -08001576 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301577
Ben Greear7755bad2011-01-18 17:30:00 -08001578 /*
1579 * add tid to round-robin queue if more frames
1580 * are pending for the tid
1581 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001582 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001583 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301584
Ben Greear7755bad2011-01-18 17:30:00 -08001585 if (tid == last_tid ||
1586 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1587 break;
Sujithe8324352009-01-16 21:38:42 +05301588 }
Ben Greear7755bad2011-01-18 17:30:00 -08001589
1590 if (!list_empty(&ac->tid_q)) {
1591 if (!ac->sched) {
1592 ac->sched = true;
1593 list_add_tail(&ac->list, &txq->axq_acq);
1594 }
1595 }
1596
1597 if (ac == last_ac ||
1598 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1599 return;
Sujithe8324352009-01-16 21:38:42 +05301600 }
1601}
1602
Sujithe8324352009-01-16 21:38:42 +05301603/***********/
1604/* TX, DMA */
1605/***********/
1606
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001608 * Insert a chain of ath_buf (descriptors) on a txq and
1609 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001610 */
Sujith102e0572008-10-29 10:15:16 +05301611static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001612 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001613{
Sujithcbe61d82009-02-09 13:27:12 +05301614 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001615 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001616 struct ath_buf *bf, *bf_last;
1617 bool puttxbuf = false;
1618 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301619
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001620 /*
1621 * Insert the frame on the outbound list and
1622 * pass it on to the hardware.
1623 */
1624
1625 if (list_empty(head))
1626 return;
1627
Felix Fietkaufce041b2011-05-19 12:20:25 +02001628 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001629 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001630 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001631
Joe Perches226afe62010-12-02 19:12:37 -08001632 ath_dbg(common, ATH_DBG_QUEUE,
1633 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001634
Felix Fietkaufce041b2011-05-19 12:20:25 +02001635 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1636 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001637 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001638 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001639 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001640 list_splice_tail_init(head, &txq->axq_q);
1641
Felix Fietkaufce041b2011-05-19 12:20:25 +02001642 if (txq->axq_link) {
1643 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001644 ath_dbg(common, ATH_DBG_XMIT,
1645 "link[%u] (%p)=%llx (%p)\n",
1646 txq->axq_qnum, txq->axq_link,
1647 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001648 } else if (!edma)
1649 puttxbuf = true;
1650
1651 txq->axq_link = bf_last->bf_desc;
1652 }
1653
1654 if (puttxbuf) {
1655 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1656 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1657 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1658 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1659 }
1660
1661 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001662 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001663 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001664 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001665
1666 if (!internal) {
1667 txq->axq_depth++;
1668 if (bf_is_ampdu_not_probing(bf))
1669 txq->axq_ampdu_depth++;
1670 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001671}
1672
Sujithe8324352009-01-16 21:38:42 +05301673static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001674 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301675{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001676 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001677 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001678 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301679
1680 /*
1681 * Do not queue to h/w when any of the following conditions is true:
1682 * - there are pending frames in software queue
1683 * - the TID is currently paused for ADDBA/BAR request
1684 * - seqno is not within block-ack window
1685 * - h/w queue depth exceeds low water mark
1686 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001687 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001688 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001689 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001690 /*
Sujithe8324352009-01-16 21:38:42 +05301691 * Add this frame to software queue for scheduling later
1692 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001693 */
Ben Greearbda8add2011-01-09 23:11:48 -08001694 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001695 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001696 if (!txctl->an || !txctl->an->sleeping)
1697 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301698 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001699 }
1700
Felix Fietkau44f1d262011-08-28 00:32:25 +02001701 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1702 if (!bf)
1703 return;
1704
Felix Fietkau399c6482011-09-14 21:24:17 +02001705 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001706 INIT_LIST_HEAD(&bf_head);
1707 list_add(&bf->list, &bf_head);
1708
Sujithe8324352009-01-16 21:38:42 +05301709 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001710 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301711
1712 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001713 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301714 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001715 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001716 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301717}
1718
Felix Fietkau82b873a2010-11-11 03:18:37 +01001719static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001720 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001721{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001722 struct ath_frame_info *fi = get_frame_info(skb);
1723 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301724 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001725
Felix Fietkau44f1d262011-08-28 00:32:25 +02001726 bf = fi->bf;
1727 if (!bf)
1728 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1729
1730 if (!bf)
1731 return;
1732
1733 INIT_LIST_HEAD(&bf_head);
1734 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001735 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301736
1737 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001738 if (tid)
1739 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301740
Sujithd43f30152009-01-16 21:38:53 +05301741 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001742 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001743 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301744 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001745}
1746
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001747static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1748 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301749{
1750 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001751 struct ieee80211_sta *sta = tx_info->control.sta;
1752 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001753 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001754 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001755 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001756 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301757
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001758 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301759
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001760 if (sta)
1761 an = (struct ath_node *) sta->drv_priv;
1762
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001763 memset(fi, 0, sizeof(*fi));
1764 if (hw_key)
1765 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001766 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1767 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001768 else
1769 fi->keyix = ATH9K_TXKEYIX_INVALID;
1770 fi->keytype = keytype;
1771 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301772}
1773
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301774u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1775{
1776 struct ath_hw *ah = sc->sc_ah;
1777 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301778 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1779 (curchan->channelFlags & CHANNEL_5GHZ) &&
1780 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301781 return 0x3;
1782 else
1783 return chainmask;
1784}
1785
Felix Fietkau44f1d262011-08-28 00:32:25 +02001786/*
1787 * Assign a descriptor (and sequence number if necessary,
1788 * and map buffer for DMA. Frees skb on error
1789 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001790static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001791 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001792 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001793 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301794{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001795 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001796 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001797 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001798 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001799 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001800
1801 bf = ath_tx_get_buffer(sc);
1802 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001803 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001804 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001805 }
Sujithe8324352009-01-16 21:38:42 +05301806
Sujithe8324352009-01-16 21:38:42 +05301807 ATH_TXBUF_RESET(bf);
1808
Felix Fietkaufa05f872011-08-28 00:32:24 +02001809 if (tid) {
1810 seqno = tid->seq_next;
1811 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1812 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1813 bf->bf_state.seqno = seqno;
1814 }
1815
Sujithe8324352009-01-16 21:38:42 +05301816 bf->bf_mpdu = skb;
1817
Ben Greearc1739eb32010-10-14 12:45:29 -07001818 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1819 skb->len, DMA_TO_DEVICE);
1820 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301821 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001822 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001823 ath_err(ath9k_hw_common(sc->sc_ah),
1824 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001825 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001826 goto error;
Sujithe8324352009-01-16 21:38:42 +05301827 }
1828
Felix Fietkau56dc6332011-08-28 00:32:22 +02001829 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001830
1831 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001832
1833error:
1834 dev_kfree_skb_any(skb);
1835 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001836}
1837
1838/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001839static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001840 struct ath_tx_control *txctl)
1841{
Felix Fietkau04caf862010-11-14 15:20:12 +01001842 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1843 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001844 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001845 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001846 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301847
Sujithe8324352009-01-16 21:38:42 +05301848 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301849 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1850 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001851 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1852 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001853 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001854
Felix Fietkau066dae92010-11-07 14:59:39 +01001855 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001856 }
1857
1858 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001859 /*
1860 * Try aggregation if it's a unicast data frame
1861 * and the destination is HT capable.
1862 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001863 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301864 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001865 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1866 if (!bf)
1867 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001868
Felix Fietkau82b873a2010-11-11 03:18:37 +01001869 bf->bf_state.bfs_paprd = txctl->paprd;
1870
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301871 if (txctl->paprd)
1872 bf->bf_state.bfs_paprd_timestamp = jiffies;
1873
Felix Fietkau44f1d262011-08-28 00:32:25 +02001874 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301875 }
1876
Felix Fietkaufa05f872011-08-28 00:32:24 +02001877out:
Sujithe8324352009-01-16 21:38:42 +05301878 spin_unlock_bh(&txctl->txq->axq_lock);
1879}
1880
1881/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001882int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301883 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001884{
Felix Fietkau28d16702010-11-14 15:20:10 +01001885 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1886 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001887 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001888 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001889 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001890 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001891 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001892 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001893 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001894
Ben Greeara9927ba2010-12-06 21:13:49 -08001895 /* NOTE: sta can be NULL according to net/mac80211.h */
1896 if (sta)
1897 txctl->an = (struct ath_node *)sta->drv_priv;
1898
Felix Fietkau04caf862010-11-14 15:20:12 +01001899 if (info->control.hw_key)
1900 frmlen += info->control.hw_key->icv_len;
1901
Felix Fietkau28d16702010-11-14 15:20:10 +01001902 /*
1903 * As a temporary workaround, assign seq# here; this will likely need
1904 * to be cleaned up to work better with Beacon transmission and virtual
1905 * BSSes.
1906 */
1907 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1908 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1909 sc->tx.seq_no += 0x10;
1910 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1911 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1912 }
1913
John W. Linville42cecc32011-09-19 15:42:31 -04001914 /* Add the padding after the header if this is not already done */
1915 padpos = ath9k_cmn_padpos(hdr->frame_control);
1916 padsize = padpos & 3;
1917 if (padsize && skb->len > padpos) {
1918 if (skb_headroom(skb) < padsize)
1919 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001920
John W. Linville42cecc32011-09-19 15:42:31 -04001921 skb_push(skb, padsize);
1922 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001923 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001924 }
1925
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001926 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1927 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1928 !ieee80211_is_data(hdr->frame_control))
1929 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1930
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001931 setup_frame_info(hw, skb, frmlen);
1932
1933 /*
1934 * At this point, the vif, hw_key and sta pointers in the tx control
1935 * info are no longer valid (overwritten by the ath_frame_info data.
1936 */
1937
Felix Fietkau066dae92010-11-07 14:59:39 +01001938 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001939 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001940 if (txq == sc->tx.txq_map[q] &&
1941 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001942 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001943 txq->stopped = 1;
1944 }
1945 spin_unlock_bh(&txq->axq_lock);
1946
Felix Fietkau44f1d262011-08-28 00:32:25 +02001947 ath_tx_start_dma(sc, skb, txctl);
1948 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001949}
1950
Sujithe8324352009-01-16 21:38:42 +05301951/*****************/
1952/* TX Completion */
1953/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001954
Sujithe8324352009-01-16 21:38:42 +05301955static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301956 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001957{
Sujithe8324352009-01-16 21:38:42 +05301958 struct ieee80211_hw *hw = sc->hw;
1959 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001960 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001961 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001962 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301963
Joe Perches226afe62010-12-02 19:12:37 -08001964 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301965
Felix Fietkau55797b12011-09-14 21:24:16 +02001966 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301967 /* Frame was ACKed */
1968 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301969
John W. Linville42cecc32011-09-19 15:42:31 -04001970 padpos = ath9k_cmn_padpos(hdr->frame_control);
1971 padsize = padpos & 3;
1972 if (padsize && skb->len>padpos+padsize) {
1973 /*
1974 * Remove MAC header padding before giving the frame back to
1975 * mac80211.
1976 */
1977 memmove(skb->data + padsize, skb->data, padpos);
1978 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301979 }
1980
Felix Fietkauc8e88682011-11-16 13:08:40 +01001981 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05301982 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001983 ath_dbg(common, ATH_DBG_PS,
1984 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301985 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1986 PS_WAIT_FOR_CAB |
1987 PS_WAIT_FOR_PSPOLL_DATA |
1988 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001989 }
1990
Felix Fietkau7545daf2011-01-24 19:23:16 +01001991 q = skb_get_queue_mapping(skb);
1992 if (txq == sc->tx.txq_map[q]) {
1993 spin_lock_bh(&txq->axq_lock);
1994 if (WARN_ON(--txq->pending_frames < 0))
1995 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001996
Felix Fietkau7545daf2011-01-24 19:23:16 +01001997 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1998 ieee80211_wake_queue(sc->hw, q);
1999 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01002000 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002001 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002002 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002003
2004 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05302005}
2006
2007static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002008 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +01002009 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05302010{
2011 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002012 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05302013 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302014 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302015
Felix Fietkau55797b12011-09-14 21:24:16 +02002016 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302017 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302018
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002019 if (ts->ts_status & ATH9K_TXERR_FILT)
2020 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2021
Ben Greearc1739eb32010-10-14 12:45:29 -07002022 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002023 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002024
2025 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302026 if (time_after(jiffies,
2027 bf->bf_state.bfs_paprd_timestamp +
2028 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002029 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002030 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002031 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002032 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002033 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302034 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002035 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002036 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2037 * accidentally reference it later.
2038 */
2039 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302040
2041 /*
2042 * Return the list of ath_buf of this mpdu to free queue
2043 */
2044 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2045 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2046 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2047}
2048
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002049static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2050 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002051 int txok)
Sujithc4288392008-11-18 09:09:30 +05302052{
Sujitha22be222009-03-30 15:28:36 +05302053 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302054 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302055 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002056 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002057 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302058 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302059
Sujith95e4acb2009-03-13 08:56:09 +05302060 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002061 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302062
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002063 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302064 WARN_ON(tx_rateindex >= hw->max_rates);
2065
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002066 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002067 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302068
Felix Fietkaub572d032010-11-14 15:20:07 +01002069 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002070 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302071 tx_info->status.ampdu_len = nframes;
2072 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002073
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002074 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002075 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002076 /*
2077 * If an underrun error is seen assume it as an excessive
2078 * retry only if max frame trigger level has been reached
2079 * (2 KB for single stream, and 4 KB for dual stream).
2080 * Adjust the long retry as if the frame was tried
2081 * hw->max_rate_tries times to affect how rate control updates
2082 * PER for the failed rate.
2083 * In case of congestion on the bus penalizing this type of
2084 * underruns should help hardware actually transmit new frames
2085 * successfully by eventually preferring slower rates.
2086 * This itself should also alleviate congestion on the bus.
2087 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002088 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2089 ATH9K_TX_DELIM_UNDERRUN)) &&
2090 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002091 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002092 tx_info->status.rates[tx_rateindex].count =
2093 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302094 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302095
Felix Fietkau545750d2009-11-23 22:21:01 +01002096 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302097 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002098 tx_info->status.rates[i].idx = -1;
2099 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302100
Felix Fietkau78c46532010-06-25 01:26:16 +02002101 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302102}
2103
Felix Fietkaufce041b2011-05-19 12:20:25 +02002104static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2105 struct ath_tx_status *ts, struct ath_buf *bf,
2106 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302107 __releases(txq->axq_lock)
2108 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002109{
2110 int txok;
2111
2112 txq->axq_depth--;
2113 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2114 txq->axq_tx_inprogress = false;
2115 if (bf_is_ampdu_not_probing(bf))
2116 txq->axq_ampdu_depth--;
2117
2118 spin_unlock_bh(&txq->axq_lock);
2119
2120 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002121 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkau156369f2011-12-14 22:08:04 +01002122 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002123 } else
2124 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2125
2126 spin_lock_bh(&txq->axq_lock);
2127
2128 if (sc->sc_flags & SC_OP_TXAGGR)
2129 ath_txq_schedule(sc, txq);
2130}
2131
Sujithc4288392008-11-18 09:09:30 +05302132static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133{
Sujithcbe61d82009-02-09 13:27:12 +05302134 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002135 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2137 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302138 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002139 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140 int status;
2141
Joe Perches226afe62010-12-02 19:12:37 -08002142 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2143 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2144 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002145
Felix Fietkaufce041b2011-05-19 12:20:25 +02002146 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002148 if (work_pending(&sc->hw_reset_work))
2149 break;
2150
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151 if (list_empty(&txq->axq_q)) {
2152 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002153 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002154 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 break;
2156 }
2157 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2158
2159 /*
2160 * There is a race condition that a BH gets scheduled
2161 * after sw writes TxE and before hw re-load the last
2162 * descriptor to get the newly chained one.
2163 * Software must keep the last DONE descriptor as a
2164 * holding descriptor - software does so by marking
2165 * it with the STALE flag.
2166 */
2167 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302168 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002170 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002172
2173 bf = list_entry(bf_held->list.next, struct ath_buf,
2174 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002175 }
2176
2177 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302178 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002179
Felix Fietkau29bffa92010-03-29 20:14:23 -07002180 memset(&ts, 0, sizeof(ts));
2181 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002182 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002184
Ben Greear2dac4fb2011-01-09 23:11:45 -08002185 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186
2187 /*
2188 * Remove ath_buf's of the same transmit unit from txq,
2189 * however leave the last descriptor back as the holding
2190 * descriptor for hw.
2191 */
Sujitha119cc42009-03-30 15:28:38 +05302192 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002193 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002194 if (!list_is_singular(&lastbf->list))
2195 list_cut_position(&bf_head,
2196 &txq->axq_q, lastbf->list.prev);
2197
Felix Fietkaufce041b2011-05-19 12:20:25 +02002198 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002199 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002200 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002201 }
Johannes Berge6a98542008-10-21 12:40:02 +02002202
Felix Fietkaufce041b2011-05-19 12:20:25 +02002203 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002204 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002205 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002206}
2207
Sujith305fe472009-07-23 15:32:29 +05302208static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002209{
2210 struct ath_softc *sc = container_of(work, struct ath_softc,
2211 tx_complete_work.work);
2212 struct ath_txq *txq;
2213 int i;
2214 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002215#ifdef CONFIG_ATH9K_DEBUGFS
2216 sc->tx_complete_poll_work_seen++;
2217#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002218
2219 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2220 if (ATH_TXQ_SETUP(sc, i)) {
2221 txq = &sc->tx.txq[i];
2222 spin_lock_bh(&txq->axq_lock);
2223 if (txq->axq_depth) {
2224 if (txq->axq_tx_inprogress) {
2225 needreset = true;
2226 spin_unlock_bh(&txq->axq_lock);
2227 break;
2228 } else {
2229 txq->axq_tx_inprogress = true;
2230 }
2231 }
2232 spin_unlock_bh(&txq->axq_lock);
2233 }
2234
2235 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002236 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2237 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002238 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002239 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002240 }
2241
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002242 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002243 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2244}
2245
2246
Sujithe8324352009-01-16 21:38:42 +05302247
2248void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002249{
Sujithe8324352009-01-16 21:38:42 +05302250 int i;
2251 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002252
Sujithe8324352009-01-16 21:38:42 +05302253 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002254
2255 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302256 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2257 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002258 }
2259}
2260
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261void ath_tx_edma_tasklet(struct ath_softc *sc)
2262{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2265 struct ath_hw *ah = sc->sc_ah;
2266 struct ath_txq *txq;
2267 struct ath_buf *bf, *lastbf;
2268 struct list_head bf_head;
2269 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002270
2271 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002272 if (work_pending(&sc->hw_reset_work))
2273 break;
2274
Felix Fietkaufce041b2011-05-19 12:20:25 +02002275 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002276 if (status == -EINPROGRESS)
2277 break;
2278 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002279 ath_dbg(common, ATH_DBG_XMIT,
2280 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002281 break;
2282 }
2283
2284 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002285 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002286 continue;
2287
Felix Fietkaufce041b2011-05-19 12:20:25 +02002288 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002289
2290 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002291
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002292 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2293 spin_unlock_bh(&txq->axq_lock);
2294 return;
2295 }
2296
2297 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2298 struct ath_buf, list);
2299 lastbf = bf->bf_lastbf;
2300
2301 INIT_LIST_HEAD(&bf_head);
2302 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2303 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002304
Felix Fietkaufce041b2011-05-19 12:20:25 +02002305 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2306 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002307
Felix Fietkaufce041b2011-05-19 12:20:25 +02002308 if (!list_empty(&txq->axq_q)) {
2309 struct list_head bf_q;
2310
2311 INIT_LIST_HEAD(&bf_q);
2312 txq->axq_link = NULL;
2313 list_splice_tail_init(&txq->axq_q, &bf_q);
2314 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2315 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002316 }
2317
Felix Fietkaufce041b2011-05-19 12:20:25 +02002318 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002319 spin_unlock_bh(&txq->axq_lock);
2320 }
2321}
2322
Sujithe8324352009-01-16 21:38:42 +05302323/*****************/
2324/* Init, Cleanup */
2325/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002326
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002327static int ath_txstatus_setup(struct ath_softc *sc, int size)
2328{
2329 struct ath_descdma *dd = &sc->txsdma;
2330 u8 txs_len = sc->sc_ah->caps.txs_len;
2331
2332 dd->dd_desc_len = size * txs_len;
2333 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2334 &dd->dd_desc_paddr, GFP_KERNEL);
2335 if (!dd->dd_desc)
2336 return -ENOMEM;
2337
2338 return 0;
2339}
2340
2341static int ath_tx_edma_init(struct ath_softc *sc)
2342{
2343 int err;
2344
2345 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2346 if (!err)
2347 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2348 sc->txsdma.dd_desc_paddr,
2349 ATH_TXSTATUS_RING_SIZE);
2350
2351 return err;
2352}
2353
2354static void ath_tx_edma_cleanup(struct ath_softc *sc)
2355{
2356 struct ath_descdma *dd = &sc->txsdma;
2357
2358 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2359 dd->dd_desc_paddr);
2360}
2361
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362int ath_tx_init(struct ath_softc *sc, int nbufs)
2363{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002364 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002365 int error = 0;
2366
Sujith797fe5cb2009-03-30 15:28:45 +05302367 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002368
Sujith797fe5cb2009-03-30 15:28:45 +05302369 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002370 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302371 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002372 ath_err(common,
2373 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302374 goto err;
2375 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376
Sujith797fe5cb2009-03-30 15:28:45 +05302377 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002378 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302379 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002380 ath_err(common,
2381 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302382 goto err;
2383 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002385 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2386
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002387 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2388 error = ath_tx_edma_init(sc);
2389 if (error)
2390 goto err;
2391 }
2392
Sujith797fe5cb2009-03-30 15:28:45 +05302393err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394 if (error != 0)
2395 ath_tx_cleanup(sc);
2396
2397 return error;
2398}
2399
Sujith797fe5cb2009-03-30 15:28:45 +05302400void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401{
Sujithb77f4832008-12-07 21:44:03 +05302402 if (sc->beacon.bdma.dd_desc_len != 0)
2403 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002404
Sujithb77f4832008-12-07 21:44:03 +05302405 if (sc->tx.txdma.dd_desc_len != 0)
2406 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002407
2408 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2409 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410}
2411
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002412void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2413{
Sujithc5170162008-10-29 10:13:59 +05302414 struct ath_atx_tid *tid;
2415 struct ath_atx_ac *ac;
2416 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417
Sujith8ee5afb2008-12-07 21:43:36 +05302418 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302419 tidno < WME_NUM_TID;
2420 tidno++, tid++) {
2421 tid->an = an;
2422 tid->tidno = tidno;
2423 tid->seq_start = tid->seq_next = 0;
2424 tid->baw_size = WME_MAX_BA;
2425 tid->baw_head = tid->baw_tail = 0;
2426 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302427 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302428 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002429 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302430 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302431 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302432 tid->state &= ~AGGR_ADDBA_COMPLETE;
2433 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302434 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435
Sujith8ee5afb2008-12-07 21:43:36 +05302436 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302437 acno < WME_NUM_AC; acno++, ac++) {
2438 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002439 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302440 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441 }
2442}
2443
Sujithb5aa9bf2008-10-29 10:13:31 +05302444void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002445{
Felix Fietkau2b409942010-07-07 19:42:08 +02002446 struct ath_atx_ac *ac;
2447 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002448 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002449 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302450
Felix Fietkau2b409942010-07-07 19:42:08 +02002451 for (tidno = 0, tid = &an->tid[tidno];
2452 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002453
Felix Fietkau2b409942010-07-07 19:42:08 +02002454 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002455 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002456
Felix Fietkau2b409942010-07-07 19:42:08 +02002457 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002458
Felix Fietkau2b409942010-07-07 19:42:08 +02002459 if (tid->sched) {
2460 list_del(&tid->list);
2461 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002462 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002463
2464 if (ac->sched) {
2465 list_del(&ac->list);
2466 tid->ac->sched = false;
2467 }
2468
2469 ath_tid_drain(sc, txq, tid);
2470 tid->state &= ~AGGR_ADDBA_COMPLETE;
2471 tid->state &= ~AGGR_CLEANUP;
2472
2473 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002474 }
2475}