blob: 8766796b48b86ac25274143f53fc916427634dce [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +010056 struct ath_tx_status *ts, int txok);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Felix Fietkau156369f2011-12-14 22:08:04 +0100153static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
154{
155 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
156 seqno << IEEE80211_SEQ_SEQ_SHIFT);
157}
158
Sujithe8324352009-01-16 21:38:42 +0530159static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
160{
Felix Fietkau066dae92010-11-07 14:59:39 +0100161 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200162 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530163 struct ath_buf *bf;
164 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 struct ath_frame_info *fi;
Felix Fietkau156369f2011-12-14 22:08:04 +0100167 bool sendbar = false;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200168
Sujithe8324352009-01-16 21:38:42 +0530169 INIT_LIST_HEAD(&bf_head);
170
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530172
Felix Fietkau56dc6332011-08-28 00:32:22 +0200173 while ((skb = __skb_dequeue(&tid->buf_q))) {
174 fi = get_frame_info(skb);
175 bf = fi->bf;
176
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 if (bf && fi->retries) {
178 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200179 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100180 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
181 sendbar = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200182 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200183 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200184 }
Sujithe8324352009-01-16 21:38:42 +0530185 }
186
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500187 if (tid->baw_head == tid->baw_tail) {
188 tid->state &= ~AGGR_ADDBA_COMPLETE;
189 tid->state &= ~AGGR_CLEANUP;
190 }
191
Felix Fietkau156369f2011-12-14 22:08:04 +0100192 if (sendbar)
193 ath_send_bar(tid, tid->seq_start);
Sujithe8324352009-01-16 21:38:42 +0530194}
195
196static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 int seqno)
198{
199 int index, cindex;
200
201 index = ATH_BA_INDEX(tid->seq_start, seqno);
202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
203
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200204 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530205
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200206 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530207 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
208 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
209 }
210}
211
212static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100213 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530214{
215 int index, cindex;
216
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100217 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530218 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200219 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530220
221 if (index >= ((tid->baw_tail - tid->baw_head) &
222 (ATH_TID_MAX_BUFS - 1))) {
223 tid->baw_tail = cindex;
224 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
225 }
226}
227
228/*
229 * TODO: For frame(s) that are in the retry state, we will reuse the
230 * sequence number(s) without setting the retry bit. The
231 * alternative is to give up on these and BAR the receiver's window
232 * forward.
233 */
234static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
235 struct ath_atx_tid *tid)
236
237{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200238 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530239 struct ath_buf *bf;
240 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100242 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700243
244 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530245 INIT_LIST_HEAD(&bf_head);
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 while ((skb = __skb_dequeue(&tid->buf_q))) {
248 fi = get_frame_info(skb);
249 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530250
Felix Fietkau44f1d262011-08-28 00:32:25 +0200251 if (!bf) {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200252 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200253 continue;
254 }
255
Felix Fietkau56dc6332011-08-28 00:32:22 +0200256 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530257
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200259 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530260
Felix Fietkau156369f2011-12-14 22:08:04 +0100261 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +0530262 }
263
264 tid->seq_next = tid->seq_start;
265 tid->baw_tail = tid->baw_head;
266}
267
Sujithfec247c2009-07-27 12:08:16 +0530268static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100269 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530270{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100271 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200272 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530273 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100274 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530275
Sujithfec247c2009-07-27 12:08:16 +0530276 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100277 fi->retries += count;
278
279 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100280 return;
Sujithe8324352009-01-16 21:38:42 +0530281
Sujithe8324352009-01-16 21:38:42 +0530282 hdr = (struct ieee80211_hdr *)skb->data;
283 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200284 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
285 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530286}
287
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200288static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
289{
290 struct ath_buf *bf = NULL;
291
292 spin_lock_bh(&sc->tx.txbuflock);
293
294 if (unlikely(list_empty(&sc->tx.txbuf))) {
295 spin_unlock_bh(&sc->tx.txbuflock);
296 return NULL;
297 }
298
299 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
300 list_del(&bf->list);
301
302 spin_unlock_bh(&sc->tx.txbuflock);
303
304 return bf;
305}
306
307static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
308{
309 spin_lock_bh(&sc->tx.txbuflock);
310 list_add_tail(&bf->list, &sc->tx.txbuf);
311 spin_unlock_bh(&sc->tx.txbuflock);
312}
313
Sujithd43f30152009-01-16 21:38:53 +0530314static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
315{
316 struct ath_buf *tbf;
317
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200318 tbf = ath_tx_get_buffer(sc);
319 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530320 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530321
322 ATH_TXBUF_RESET(tbf);
323
324 tbf->bf_mpdu = bf->bf_mpdu;
325 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400326 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530327 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530328
329 return tbf;
330}
331
Felix Fietkaub572d032010-11-14 15:20:07 +0100332static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
333 struct ath_tx_status *ts, int txok,
334 int *nframes, int *nbad)
335{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100336 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100337 u16 seq_st = 0;
338 u32 ba[WME_BA_BMP_SIZE >> 5];
339 int ba_index;
340 int isaggr = 0;
341
342 *nbad = 0;
343 *nframes = 0;
344
Felix Fietkaub572d032010-11-14 15:20:07 +0100345 isaggr = bf_isaggr(bf);
346 if (isaggr) {
347 seq_st = ts->ts_seqnum;
348 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
349 }
350
351 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100352 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200353 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100354
355 (*nframes)++;
356 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
357 (*nbad)++;
358
359 bf = bf->bf_next;
360 }
361}
362
363
Sujithd43f30152009-01-16 21:38:53 +0530364static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
365 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100366 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530367{
368 struct ath_node *an = NULL;
369 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530370 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100371 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530372 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800373 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530374 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530375 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200376 struct list_head bf_head;
377 struct sk_buff_head bf_pending;
Felix Fietkau156369f2011-12-14 22:08:04 +0100378 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
Sujithe8324352009-01-16 21:38:42 +0530379 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530380 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
381 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200382 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100383 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200384 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100385 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200386 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100387 int i, retries;
Felix Fietkau156369f2011-12-14 22:08:04 +0100388 int bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530389
Sujitha22be222009-03-30 15:28:36 +0530390 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530391 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530392
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800393 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800394
Felix Fietkau78c46532010-06-25 01:26:16 +0200395 memcpy(rates, tx_info->control.rates, sizeof(rates));
396
Felix Fietkauda647622011-12-14 22:08:03 +0100397 retries = ts->ts_longretry + 1;
398 for (i = 0; i < ts->ts_rateindex; i++)
399 retries += rates[i].count;
400
Sujith1286ec62009-01-27 13:30:37 +0530401 rcu_read_lock();
402
Ben Greear686b9cb2010-09-23 09:44:36 -0700403 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530404 if (!sta) {
405 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200406
Felix Fietkau31e79a52010-07-12 23:16:34 +0200407 INIT_LIST_HEAD(&bf_head);
408 while (bf) {
409 bf_next = bf->bf_next;
410
Felix Fietkaufce041b2011-05-19 12:20:25 +0200411 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200412 list_move_tail(&bf->list, &bf_head);
413
Felix Fietkau156369f2011-12-14 22:08:04 +0100414 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200415
416 bf = bf_next;
417 }
Sujith1286ec62009-01-27 13:30:37 +0530418 return;
Sujithe8324352009-01-16 21:38:42 +0530419 }
420
Sujith1286ec62009-01-27 13:30:37 +0530421 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100422 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
423 tid = ATH_AN_2_TID(an, tidno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100424 seq_first = tid->seq_start;
Sujith1286ec62009-01-27 13:30:37 +0530425
Felix Fietkaub11b1602010-07-11 12:48:44 +0200426 /*
427 * The hardware occasionally sends a tx status for the wrong TID.
428 * In this case, the BA status cannot be considered valid and all
429 * subframes need to be retransmitted
430 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100431 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200432 txok = false;
433
Sujithe8324352009-01-16 21:38:42 +0530434 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530435 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530436
Sujithd43f30152009-01-16 21:38:53 +0530437 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700438 if (ts->ts_flags & ATH9K_TX_BA) {
439 seq_st = ts->ts_seqnum;
440 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530441 } else {
Sujithd43f30152009-01-16 21:38:53 +0530442 /*
443 * AR5416 can become deaf/mute when BA
444 * issue happens. Chip needs to be reset.
445 * But AP code may have sychronization issues
446 * when perform internal reset in this routine.
447 * Only enable reset in STA mode for now.
448 */
Sujith2660b812009-02-09 13:27:26 +0530449 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530450 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530451 }
452 }
453
Felix Fietkau56dc6332011-08-28 00:32:22 +0200454 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530455
Felix Fietkaub572d032010-11-14 15:20:07 +0100456 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530457 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200458 u16 seqno = bf->bf_state.seqno;
459
Felix Fietkauf0b82202011-01-15 14:30:15 +0100460 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530461 bf_next = bf->bf_next;
462
Felix Fietkau78c46532010-06-25 01:26:16 +0200463 skb = bf->bf_mpdu;
464 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100465 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200466
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200467 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530468 /* transmit completion, subframe is
469 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530470 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530471 } else if (!isaggr && txok) {
472 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530473 acked_cnt++;
Felix Fietkaub0477012011-12-14 22:08:05 +0100474 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
475 /*
476 * cleanup in progress, just fail
477 * the un-acked sub-frames
478 */
479 txfail = 1;
480 } else if (flush) {
481 txpending = 1;
482 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
483 if (txok || !an->sleeping)
484 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
485 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200486
Felix Fietkaub0477012011-12-14 22:08:05 +0100487 txpending = 1;
488 } else {
489 txfail = 1;
490 txfail_cnt++;
491 bar_index = max_t(int, bar_index,
492 ATH_BA_INDEX(seq_first, seqno));
Sujithe8324352009-01-16 21:38:42 +0530493 }
494
Felix Fietkaufce041b2011-05-19 12:20:25 +0200495 /*
496 * Make sure the last desc is reclaimed if it
497 * not a holding desc.
498 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200499 INIT_LIST_HEAD(&bf_head);
500 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
501 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530502 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530503
Felix Fietkau90fa5392010-09-20 13:45:38 +0200504 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530505 /*
506 * complete the acked-ones/xretried ones; update
507 * block-ack window
508 */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200509 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530510
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530511 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200512 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200513 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530514 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530515 }
516
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700517 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
Felix Fietkau156369f2011-12-14 22:08:04 +0100518 !txfail);
Sujithe8324352009-01-16 21:38:42 +0530519 } else {
Sujithd43f30152009-01-16 21:38:53 +0530520 /* retry the un-acked ones */
Felix Fietkaub0477012011-12-14 22:08:05 +0100521 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
522 bf->bf_next == NULL && bf_last->bf_stale) {
523 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530524
Felix Fietkaub0477012011-12-14 22:08:05 +0100525 tbf = ath_clone_txbuf(sc, bf_last);
526 /*
527 * Update tx baw and complete the
528 * frame with failed status if we
529 * run out of tx buf.
530 */
531 if (!tbf) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100532 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400533
Felix Fietkaub0477012011-12-14 22:08:05 +0100534 ath_tx_complete_buf(sc, bf, txq,
535 &bf_head, ts, 0);
536 bar_index = max_t(int, bar_index,
537 ATH_BA_INDEX(seq_first, seqno));
538 break;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400539 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100540
541 fi->bf = tbf;
Sujithe8324352009-01-16 21:38:42 +0530542 }
543
544 /*
545 * Put this buffer to the temporary pending
546 * queue to retain ordering
547 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200548 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530549 }
550
551 bf = bf_next;
552 }
553
Felix Fietkau156369f2011-12-14 22:08:04 +0100554 if (bar_index >= 0)
555 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
556
Felix Fietkau4cee7862010-07-23 03:53:16 +0200557 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200558 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200559 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200560 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200561
Felix Fietkau56dc6332011-08-28 00:32:22 +0200562 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200563 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600564 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200565
566 if (ts->ts_status & ATH9K_TXERR_FILT)
567 tid->ac->clear_ps_filter = true;
568 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200569 }
570
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500571 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200572 ath_tx_flush_tid(sc, tid);
573
Sujith1286ec62009-01-27 13:30:37 +0530574 rcu_read_unlock();
575
Felix Fietkau030d6292011-10-07 02:28:13 +0200576 if (needreset) {
577 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200578 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200579 }
Sujithe8324352009-01-16 21:38:42 +0530580}
581
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530582static bool ath_lookup_legacy(struct ath_buf *bf)
583{
584 struct sk_buff *skb;
585 struct ieee80211_tx_info *tx_info;
586 struct ieee80211_tx_rate *rates;
587 int i;
588
589 skb = bf->bf_mpdu;
590 tx_info = IEEE80211_SKB_CB(skb);
591 rates = tx_info->control.rates;
592
Felix Fietkau059ee092011-08-27 10:25:27 +0200593 for (i = 0; i < 4; i++) {
594 if (!rates[i].count || rates[i].idx < 0)
595 break;
596
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
598 return true;
599 }
600
601 return false;
602}
603
Sujithe8324352009-01-16 21:38:42 +0530604static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
605 struct ath_atx_tid *tid)
606{
Sujithe8324352009-01-16 21:38:42 +0530607 struct sk_buff *skb;
608 struct ieee80211_tx_info *tx_info;
609 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530610 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530611 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530612 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530613 int i;
614
Sujitha22be222009-03-30 15:28:36 +0530615 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530616 tx_info = IEEE80211_SKB_CB(skb);
617 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530618
619 /*
620 * Find the lowest frame length among the rate series that will have a
621 * 4ms transmit duration.
622 * TODO - TXOP limit needs to be considered.
623 */
624 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
625
626 for (i = 0; i < 4; i++) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100627 int modeidx;
Sujithe8324352009-01-16 21:38:42 +0530628
Felix Fietkaub0477012011-12-14 22:08:05 +0100629 if (!rates[i].count)
630 continue;
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200631
Felix Fietkaub0477012011-12-14 22:08:05 +0100632 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
633 legacy = 1;
634 break;
Sujithe8324352009-01-16 21:38:42 +0530635 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100636
637 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
638 modeidx = MCS_HT40;
639 else
640 modeidx = MCS_HT20;
641
642 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
643 modeidx++;
644
645 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
646 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530647 }
648
649 /*
650 * limit aggregate size by the minimum rate if rate selected is
651 * not a probe rate, if rate selected is a probe rate then
652 * avoid aggregation of this packet.
653 */
654 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
655 return 0;
656
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530657 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
658 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
659 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530660 aggr_limit = min((max_4ms_framelen * 3) / 8,
661 (u32)ATH_AMPDU_LIMIT_MAX);
662 else
663 aggr_limit = min(max_4ms_framelen,
664 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530665
666 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300667 * h/w can accept aggregates up to 16 bit lengths (65535).
668 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530669 * as zero. Ignore 65536 since we are constrained by hw.
670 */
Sujith4ef70842009-07-23 15:32:41 +0530671 if (tid->an->maxampdu)
672 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530673
674 return aggr_limit;
675}
676
677/*
Sujithd43f30152009-01-16 21:38:53 +0530678 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530679 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530680 */
681static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530682 struct ath_buf *bf, u16 frmlen,
683 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530684{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530685#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530686 struct sk_buff *skb = bf->bf_mpdu;
687 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530688 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530689 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100690 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200691 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100692 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530693
694 /* Select standard number of delimiters based on frame length alone */
695 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
696
697 /*
698 * If encryption enabled, hardware requires some more padding between
699 * subframes.
700 * TODO - this could be improved to be dependent on the rate.
701 * The hardware can keep up at lower rates, but not higher rates
702 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530703 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
704 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530705 ndelim += ATH_AGGR_ENCRYPTDELIM;
706
707 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530708 * Add delimiter when using RTS/CTS with aggregation
709 * and non enterprise AR9003 card
710 */
Felix Fietkau34597312011-08-29 18:57:54 +0200711 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
712 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530713 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
714
715 /*
Sujithe8324352009-01-16 21:38:42 +0530716 * Convert desired mpdu density from microeconds to bytes based
717 * on highest rate in rate series (i.e. first rate) to determine
718 * required minimum length for subframe. Take into account
719 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530720 *
Sujithe8324352009-01-16 21:38:42 +0530721 * If there is no mpdu density restriction, no further calculation
722 * is needed.
723 */
Sujith4ef70842009-07-23 15:32:41 +0530724
725 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530726 return ndelim;
727
728 rix = tx_info->control.rates[0].idx;
729 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530730 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
731 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
732
733 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530734 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530735 else
Sujith4ef70842009-07-23 15:32:41 +0530736 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530737
738 if (nsymbols == 0)
739 nsymbols = 1;
740
Felix Fietkauc6663872010-04-19 19:57:33 +0200741 streams = HT_RC_2_STREAMS(rix);
742 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530743 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
744
Sujithe8324352009-01-16 21:38:42 +0530745 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530746 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
747 ndelim = max(mindelim, ndelim);
748 }
749
750 return ndelim;
751}
752
753static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530754 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530755 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100756 struct list_head *bf_q,
757 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530758{
759#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200760 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530761 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530762 u16 aggr_limit = 0, al = 0, bpad = 0,
763 al_delta, h_baw = tid->baw_size / 2;
764 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200765 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200767 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200768 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530769
770 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200771 skb = skb_peek(&tid->buf_q);
772 fi = get_frame_info(skb);
773 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200774 if (!fi->bf)
775 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200776
Felix Fietkau44f1d262011-08-28 00:32:25 +0200777 if (!bf)
778 continue;
779
Felix Fietkau399c6482011-09-14 21:24:17 +0200780 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200781 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200782 if (!bf_first)
783 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530784
Sujithd43f30152009-01-16 21:38:53 +0530785 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200786 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530787 status = ATH_AGGR_BAW_CLOSED;
788 break;
789 }
790
791 if (!rl) {
792 aggr_limit = ath_lookup_rate(sc, bf, tid);
793 rl = 1;
794 }
795
Sujithd43f30152009-01-16 21:38:53 +0530796 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530798
Sujithd43f30152009-01-16 21:38:53 +0530799 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530800 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
801 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530802 status = ATH_AGGR_LIMITED;
803 break;
804 }
805
Felix Fietkau0299a502010-10-21 02:47:24 +0200806 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200807 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200808 break;
809
Sujithd43f30152009-01-16 21:38:53 +0530810 /* do not exceed subframe limit */
811 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530812 status = ATH_AGGR_LIMITED;
813 break;
814 }
815
Sujithd43f30152009-01-16 21:38:53 +0530816 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530817 al += bpad + al_delta;
818
819 /*
820 * Get the delimiters needed to meet the MPDU
821 * density for this node.
822 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530823 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
824 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530825 bpad = PADBYTES(al_delta) + (ndelim << 2);
826
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530827 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530828 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100831 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200832 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200833 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200834
835 __skb_unlink(skb, &tid->buf_q);
836 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200837 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530838 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200839
Sujithe8324352009-01-16 21:38:42 +0530840 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530841
Felix Fietkau56dc6332011-08-28 00:32:22 +0200842 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530843
Felix Fietkau269c44b2010-11-14 15:20:06 +0100844 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530845
Sujithe8324352009-01-16 21:38:42 +0530846 return status;
847#undef PADBYTES
848}
849
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200850/*
851 * rix - rate index
852 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
853 * width - 0 for 20 MHz, 1 for 40 MHz
854 * half_gi - to use 4us v/s 3.6 us for symbol time
855 */
856static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
857 int width, int half_gi, bool shortPreamble)
858{
859 u32 nbits, nsymbits, duration, nsymbols;
860 int streams;
861
862 /* find number of symbols: PLCP + data */
863 streams = HT_RC_2_STREAMS(rix);
864 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
865 nsymbits = bits_per_symbol[rix % 8][width] * streams;
866 nsymbols = (nbits + nsymbits - 1) / nsymbits;
867
868 if (!half_gi)
869 duration = SYMBOL_TIME(nsymbols);
870 else
871 duration = SYMBOL_TIME_HALFGI(nsymbols);
872
873 /* addup duration for legacy/ht training and signal fields */
874 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
875
876 return duration;
877}
878
Felix Fietkau493cf042011-09-14 21:24:22 +0200879static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
880 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200881{
882 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200883 struct sk_buff *skb;
884 struct ieee80211_tx_info *tx_info;
885 struct ieee80211_tx_rate *rates;
886 const struct ieee80211_rate *rate;
887 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200888 int i;
889 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200890
891 skb = bf->bf_mpdu;
892 tx_info = IEEE80211_SKB_CB(skb);
893 rates = tx_info->control.rates;
894 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200895
896 /* set dur_update_en for l-sig computation except for PS-Poll frames */
897 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200898
899 /*
900 * We check if Short Preamble is needed for the CTS rate by
901 * checking the BSS's global flag.
902 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
903 */
904 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200905 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200906 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200907 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200908
909 for (i = 0; i < 4; i++) {
910 bool is_40, is_sgi, is_sp;
911 int phy;
912
913 if (!rates[i].count || (rates[i].idx < 0))
914 continue;
915
916 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200917 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200918
919 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200920 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
921 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200922 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200923 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
924 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200925 }
926
927 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200928 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200929 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200930 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200931
932 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
933 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
934 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
935
936 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
937 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200938 info->rates[i].Rate = rix | 0x80;
939 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
940 ah->txchainmask, info->rates[i].Rate);
941 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200942 is_40, is_sgi, is_sp);
943 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200944 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200945 continue;
946 }
947
948 /* legacy rates */
949 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
950 !(rate->flags & IEEE80211_RATE_ERP_G))
951 phy = WLAN_RC_PHY_CCK;
952 else
953 phy = WLAN_RC_PHY_OFDM;
954
955 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200956 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200957 if (rate->hw_value_short) {
958 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200959 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200960 } else {
961 is_sp = false;
962 }
963
964 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200965 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200966 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200967 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
968 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200969
Felix Fietkau493cf042011-09-14 21:24:22 +0200970 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200971 phy, rate->bitrate * 100, len, rix, is_sp);
972 }
973
974 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
975 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200976 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200977
978 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200979 if (info->flags & ATH9K_TXDESC_RTSENA)
980 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200981}
982
Felix Fietkau493cf042011-09-14 21:24:22 +0200983static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
984{
985 struct ieee80211_hdr *hdr;
986 enum ath9k_pkt_type htype;
987 __le16 fc;
988
989 hdr = (struct ieee80211_hdr *)skb->data;
990 fc = hdr->frame_control;
991
992 if (ieee80211_is_beacon(fc))
993 htype = ATH9K_PKT_TYPE_BEACON;
994 else if (ieee80211_is_probe_resp(fc))
995 htype = ATH9K_PKT_TYPE_PROBE_RESP;
996 else if (ieee80211_is_atim(fc))
997 htype = ATH9K_PKT_TYPE_ATIM;
998 else if (ieee80211_is_pspoll(fc))
999 htype = ATH9K_PKT_TYPE_PSPOLL;
1000 else
1001 htype = ATH9K_PKT_TYPE_NORMAL;
1002
1003 return htype;
1004}
1005
1006static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1007 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001008{
1009 struct ath_hw *ah = sc->sc_ah;
1010 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1011 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001012 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001013 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001014
Felix Fietkau493cf042011-09-14 21:24:22 +02001015 memset(&info, 0, sizeof(info));
1016 info.is_first = true;
1017 info.is_last = true;
1018 info.txpower = MAX_RATE_POWER;
1019 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001020
Felix Fietkau493cf042011-09-14 21:24:22 +02001021 info.flags = ATH9K_TXDESC_INTREQ;
1022 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1023 info.flags |= ATH9K_TXDESC_NOACK;
1024 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1025 info.flags |= ATH9K_TXDESC_LDPC;
1026
1027 ath_buf_set_rate(sc, bf, &info, len);
1028
1029 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1030 info.flags |= ATH9K_TXDESC_CLRDMASK;
1031
1032 if (bf->bf_state.bfs_paprd)
1033 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1034
Felix Fietkau399c6482011-09-14 21:24:17 +02001035
1036 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001037 struct sk_buff *skb = bf->bf_mpdu;
1038 struct ath_frame_info *fi = get_frame_info(skb);
1039
1040 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001041 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001042 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001043 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001044 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001045
John W. Linville42cecc32011-09-19 15:42:31 -04001046 info.buf_addr[0] = bf->bf_buf_addr;
1047 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001048 info.pkt_len = fi->framelen;
1049 info.keyix = fi->keyix;
1050 info.keytype = fi->keytype;
1051
1052 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001053 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001054 info.aggr = AGGR_BUF_FIRST;
1055 else if (!bf->bf_next)
1056 info.aggr = AGGR_BUF_LAST;
1057 else
1058 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001059
Felix Fietkau493cf042011-09-14 21:24:22 +02001060 info.ndelim = bf->bf_state.ndelim;
1061 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001062 }
1063
Felix Fietkau493cf042011-09-14 21:24:22 +02001064 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001065 bf = bf->bf_next;
1066 }
1067}
1068
Sujithe8324352009-01-16 21:38:42 +05301069static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1070 struct ath_atx_tid *tid)
1071{
Sujithd43f30152009-01-16 21:38:53 +05301072 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301073 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001074 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301075 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001076 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301077
1078 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001079 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301080 return;
1081
1082 INIT_LIST_HEAD(&bf_q);
1083
Felix Fietkau269c44b2010-11-14 15:20:06 +01001084 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301085
1086 /*
Sujithd43f30152009-01-16 21:38:53 +05301087 * no frames picked up to be aggregated;
1088 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301089 */
1090 if (list_empty(&bf_q))
1091 break;
1092
1093 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301094 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001095 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301096
Felix Fietkau55195412011-04-17 23:28:09 +02001097 if (tid->ac->clear_ps_filter) {
1098 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001099 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1100 } else {
1101 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001102 }
1103
Sujithd43f30152009-01-16 21:38:53 +05301104 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001105 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001106 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1107 bf->bf_state.bf_type = BUF_AMPDU;
1108 } else {
1109 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301110 }
1111
Felix Fietkau493cf042011-09-14 21:24:22 +02001112 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001113 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001114 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301115 status != ATH_AGGR_BAW_CLOSED);
1116}
1117
Felix Fietkau231c3a12010-09-20 19:35:28 +02001118int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1119 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301120{
1121 struct ath_atx_tid *txtid;
1122 struct ath_node *an;
1123
1124 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301125 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001126
1127 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1128 return -EAGAIN;
1129
Sujithf83da962009-07-23 15:32:37 +05301130 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001131 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001132 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001133
Felix Fietkau2ed72222011-01-10 17:05:49 -07001134 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1135 txtid->baw_head = txtid->baw_tail = 0;
1136
Felix Fietkau231c3a12010-09-20 19:35:28 +02001137 return 0;
Sujithe8324352009-01-16 21:38:42 +05301138}
1139
Sujithf83da962009-07-23 15:32:37 +05301140void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301141{
1142 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1143 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001144 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301145
1146 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301147 return;
Sujithe8324352009-01-16 21:38:42 +05301148
1149 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301150 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301151 return;
Sujithe8324352009-01-16 21:38:42 +05301152 }
1153
Sujithe8324352009-01-16 21:38:42 +05301154 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001155 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001156
1157 /*
1158 * If frames are still being transmitted for this TID, they will be
1159 * cleaned up during tx completion. To prevent race conditions, this
1160 * TID can only be reused after all in-progress subframes have been
1161 * completed.
1162 */
1163 if (txtid->baw_head != txtid->baw_tail)
1164 txtid->state |= AGGR_CLEANUP;
1165 else
1166 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +05301167
Felix Fietkau90fa5392010-09-20 13:45:38 +02001168 ath_tx_flush_tid(sc, txtid);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001169 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301170}
1171
Johannes Berg042ec452011-09-29 16:04:26 +02001172void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1173 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001174{
1175 struct ath_atx_tid *tid;
1176 struct ath_atx_ac *ac;
1177 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001178 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001179 int tidno;
1180
1181 for (tidno = 0, tid = &an->tid[tidno];
1182 tidno < WME_NUM_TID; tidno++, tid++) {
1183
1184 if (!tid->sched)
1185 continue;
1186
1187 ac = tid->ac;
1188 txq = ac->txq;
1189
1190 spin_lock_bh(&txq->axq_lock);
1191
Johannes Berg042ec452011-09-29 16:04:26 +02001192 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001193
1194 tid->sched = false;
1195 list_del(&tid->list);
1196
1197 if (ac->sched) {
1198 ac->sched = false;
1199 list_del(&ac->list);
1200 }
1201
1202 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001203
Johannes Berg042ec452011-09-29 16:04:26 +02001204 ieee80211_sta_set_buffered(sta, tidno, buffered);
1205 }
Felix Fietkau55195412011-04-17 23:28:09 +02001206}
1207
1208void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1209{
1210 struct ath_atx_tid *tid;
1211 struct ath_atx_ac *ac;
1212 struct ath_txq *txq;
1213 int tidno;
1214
1215 for (tidno = 0, tid = &an->tid[tidno];
1216 tidno < WME_NUM_TID; tidno++, tid++) {
1217
1218 ac = tid->ac;
1219 txq = ac->txq;
1220
1221 spin_lock_bh(&txq->axq_lock);
1222 ac->clear_ps_filter = true;
1223
Felix Fietkau56dc6332011-08-28 00:32:22 +02001224 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001225 ath_tx_queue_tid(txq, tid);
1226 ath_txq_schedule(sc, txq);
1227 }
1228
1229 spin_unlock_bh(&txq->axq_lock);
1230 }
1231}
1232
Sujithe8324352009-01-16 21:38:42 +05301233void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1234{
1235 struct ath_atx_tid *txtid;
1236 struct ath_node *an;
1237
1238 an = (struct ath_node *)sta->drv_priv;
1239
1240 if (sc->sc_flags & SC_OP_TXAGGR) {
1241 txtid = ATH_AN_2_TID(an, tid);
1242 txtid->baw_size =
1243 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1244 txtid->state |= AGGR_ADDBA_COMPLETE;
1245 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1246 ath_tx_resume_tid(sc, txtid);
1247 }
1248}
1249
Sujithe8324352009-01-16 21:38:42 +05301250/********************/
1251/* Queue Management */
1252/********************/
1253
Sujithe8324352009-01-16 21:38:42 +05301254static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1255 struct ath_txq *txq)
1256{
1257 struct ath_atx_ac *ac, *ac_tmp;
1258 struct ath_atx_tid *tid, *tid_tmp;
1259
1260 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1261 list_del(&ac->list);
1262 ac->sched = false;
1263 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1264 list_del(&tid->list);
1265 tid->sched = false;
1266 ath_tid_drain(sc, txq, tid);
1267 }
1268 }
1269}
1270
1271struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1272{
Sujithcbe61d82009-02-09 13:27:12 +05301273 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301274 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001275 static const int subtype_txq_to_hwq[] = {
1276 [WME_AC_BE] = ATH_TXQ_AC_BE,
1277 [WME_AC_BK] = ATH_TXQ_AC_BK,
1278 [WME_AC_VI] = ATH_TXQ_AC_VI,
1279 [WME_AC_VO] = ATH_TXQ_AC_VO,
1280 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001281 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301282
1283 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001284 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301285 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1286 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1287 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1288 qi.tqi_physCompBuf = 0;
1289
1290 /*
1291 * Enable interrupts only for EOL and DESC conditions.
1292 * We mark tx descriptors to receive a DESC interrupt
1293 * when a tx queue gets deep; otherwise waiting for the
1294 * EOL to reap descriptors. Note that this is done to
1295 * reduce interrupt load and this only defers reaping
1296 * descriptors, never transmitting frames. Aside from
1297 * reducing interrupts this also permits more concurrency.
1298 * The only potential downside is if the tx queue backs
1299 * up in which case the top half of the kernel may backup
1300 * due to a lack of tx descriptors.
1301 *
1302 * The UAPSD queue is an exception, since we take a desc-
1303 * based intr on the EOSP frames.
1304 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001305 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1306 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1307 TXQ_FLAG_TXERRINT_ENABLE;
1308 } else {
1309 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1310 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1311 else
1312 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1313 TXQ_FLAG_TXDESCINT_ENABLE;
1314 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001315 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1316 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301317 /*
1318 * NB: don't print a message, this happens
1319 * normally on parts with too few tx queues
1320 */
1321 return NULL;
1322 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001323 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1324 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301325
Ben Greear60f2d1d2011-01-09 23:11:52 -08001326 txq->axq_qnum = axq_qnum;
1327 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301328 txq->axq_link = NULL;
1329 INIT_LIST_HEAD(&txq->axq_q);
1330 INIT_LIST_HEAD(&txq->axq_acq);
1331 spin_lock_init(&txq->axq_lock);
1332 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001333 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001334 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001335 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001336
1337 txq->txq_headidx = txq->txq_tailidx = 0;
1338 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1339 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301340 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001341 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301342}
1343
Sujithe8324352009-01-16 21:38:42 +05301344int ath_txq_update(struct ath_softc *sc, int qnum,
1345 struct ath9k_tx_queue_info *qinfo)
1346{
Sujithcbe61d82009-02-09 13:27:12 +05301347 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301348 int error = 0;
1349 struct ath9k_tx_queue_info qi;
1350
1351 if (qnum == sc->beacon.beaconq) {
1352 /*
1353 * XXX: for beacon queue, we just save the parameter.
1354 * It will be picked up by ath_beaconq_config when
1355 * it's necessary.
1356 */
1357 sc->beacon.beacon_qi = *qinfo;
1358 return 0;
1359 }
1360
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001361 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301362
1363 ath9k_hw_get_txq_props(ah, qnum, &qi);
1364 qi.tqi_aifs = qinfo->tqi_aifs;
1365 qi.tqi_cwmin = qinfo->tqi_cwmin;
1366 qi.tqi_cwmax = qinfo->tqi_cwmax;
1367 qi.tqi_burstTime = qinfo->tqi_burstTime;
1368 qi.tqi_readyTime = qinfo->tqi_readyTime;
1369
1370 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001371 ath_err(ath9k_hw_common(sc->sc_ah),
1372 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301373 error = -EIO;
1374 } else {
1375 ath9k_hw_resettxqueue(ah, qnum);
1376 }
1377
1378 return error;
1379}
1380
1381int ath_cabq_update(struct ath_softc *sc)
1382{
1383 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001384 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301385 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301386
1387 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1388 /*
1389 * Ensure the readytime % is within the bounds.
1390 */
Sujith17d79042009-02-09 13:27:03 +05301391 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1392 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1393 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1394 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301395
Steve Brown9814f6b2011-02-07 17:10:39 -07001396 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301397 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301398 ath_txq_update(sc, qnum, &qi);
1399
1400 return 0;
1401}
1402
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001403static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1404{
1405 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1406 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1407}
1408
Felix Fietkaufce041b2011-05-19 12:20:25 +02001409static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1410 struct list_head *list, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301411{
1412 struct ath_buf *bf, *lastbf;
1413 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001414 struct ath_tx_status ts;
1415
1416 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001417 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301418 INIT_LIST_HEAD(&bf_head);
1419
Felix Fietkaufce041b2011-05-19 12:20:25 +02001420 while (!list_empty(list)) {
1421 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301422
Felix Fietkaufce041b2011-05-19 12:20:25 +02001423 if (bf->bf_stale) {
1424 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301425
Felix Fietkaufce041b2011-05-19 12:20:25 +02001426 ath_tx_return_buffer(sc, bf);
1427 continue;
Sujithe8324352009-01-16 21:38:42 +05301428 }
1429
1430 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001431 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001432
Sujithe8324352009-01-16 21:38:42 +05301433 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001434 if (bf_is_ampdu_not_probing(bf))
1435 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301436
1437 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001438 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1439 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301440 else
Felix Fietkau156369f2011-12-14 22:08:04 +01001441 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001442 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001443}
1444
1445/*
1446 * Drain a given TX queue (could be Beacon or Data)
1447 *
1448 * This assumes output has been stopped and
1449 * we do not need to block ath_tx_tasklet.
1450 */
1451void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1452{
1453 spin_lock_bh(&txq->axq_lock);
1454 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1455 int idx = txq->txq_tailidx;
1456
1457 while (!list_empty(&txq->txq_fifo[idx])) {
1458 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1459 retry_tx);
1460
1461 INCR(idx, ATH_TXFIFO_DEPTH);
1462 }
1463 txq->txq_tailidx = idx;
1464 }
1465
1466 txq->axq_link = NULL;
1467 txq->axq_tx_inprogress = false;
1468 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001469
1470 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001471 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1472 ath_txq_drain_pending_buffers(sc, txq);
1473
1474 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301475}
1476
Felix Fietkau080e1a22010-12-05 20:17:53 +01001477bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301478{
Sujithcbe61d82009-02-09 13:27:12 +05301479 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001480 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301481 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001482 int i;
1483 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301484
1485 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001486 return true;
Sujith043a0402009-01-16 21:38:47 +05301487
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001488 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301489
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001490 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301491 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001492 if (!ATH_TXQ_SETUP(sc, i))
1493 continue;
1494
Felix Fietkau34d25812011-10-07 02:28:12 +02001495 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1496 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301497 }
1498
Felix Fietkau080e1a22010-12-05 20:17:53 +01001499 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001500 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301501
1502 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001503 if (!ATH_TXQ_SETUP(sc, i))
1504 continue;
1505
1506 /*
1507 * The caller will resume queues with ieee80211_wake_queues.
1508 * Mark the queue as not stopped to prevent ath_tx_complete
1509 * from waking the queue too early.
1510 */
1511 txq = &sc->tx.txq[i];
1512 txq->stopped = false;
1513 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301514 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001515
1516 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301517}
1518
Sujithe8324352009-01-16 21:38:42 +05301519void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1520{
1521 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1522 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1523}
1524
Ben Greear7755bad2011-01-18 17:30:00 -08001525/* For each axq_acq entry, for each tid, try to schedule packets
1526 * for transmit until ampdu_depth has reached min Q depth.
1527 */
Sujithe8324352009-01-16 21:38:42 +05301528void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1529{
Ben Greear7755bad2011-01-18 17:30:00 -08001530 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1531 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301532
Felix Fietkau236de512011-09-03 01:40:25 +02001533 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001534 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301535 return;
1536
1537 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001538 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301539
Ben Greear7755bad2011-01-18 17:30:00 -08001540 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1541 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1542 list_del(&ac->list);
1543 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301544
Ben Greear7755bad2011-01-18 17:30:00 -08001545 while (!list_empty(&ac->tid_q)) {
1546 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1547 list);
1548 list_del(&tid->list);
1549 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301550
Ben Greear7755bad2011-01-18 17:30:00 -08001551 if (tid->paused)
1552 continue;
Sujithe8324352009-01-16 21:38:42 +05301553
Ben Greear7755bad2011-01-18 17:30:00 -08001554 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301555
Ben Greear7755bad2011-01-18 17:30:00 -08001556 /*
1557 * add tid to round-robin queue if more frames
1558 * are pending for the tid
1559 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001560 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001561 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301562
Ben Greear7755bad2011-01-18 17:30:00 -08001563 if (tid == last_tid ||
1564 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1565 break;
Sujithe8324352009-01-16 21:38:42 +05301566 }
Ben Greear7755bad2011-01-18 17:30:00 -08001567
Felix Fietkaub0477012011-12-14 22:08:05 +01001568 if (!list_empty(&ac->tid_q) && !ac->sched) {
1569 ac->sched = true;
1570 list_add_tail(&ac->list, &txq->axq_acq);
Ben Greear7755bad2011-01-18 17:30:00 -08001571 }
1572
1573 if (ac == last_ac ||
1574 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1575 return;
Sujithe8324352009-01-16 21:38:42 +05301576 }
1577}
1578
Sujithe8324352009-01-16 21:38:42 +05301579/***********/
1580/* TX, DMA */
1581/***********/
1582
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001584 * Insert a chain of ath_buf (descriptors) on a txq and
1585 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001586 */
Sujith102e0572008-10-29 10:15:16 +05301587static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001588 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001589{
Sujithcbe61d82009-02-09 13:27:12 +05301590 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001591 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001592 struct ath_buf *bf, *bf_last;
1593 bool puttxbuf = false;
1594 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301595
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001596 /*
1597 * Insert the frame on the outbound list and
1598 * pass it on to the hardware.
1599 */
1600
1601 if (list_empty(head))
1602 return;
1603
Felix Fietkaufce041b2011-05-19 12:20:25 +02001604 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001605 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001606 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607
Joe Perches226afe62010-12-02 19:12:37 -08001608 ath_dbg(common, ATH_DBG_QUEUE,
1609 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001610
Felix Fietkaufce041b2011-05-19 12:20:25 +02001611 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1612 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001613 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001614 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001615 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001616 list_splice_tail_init(head, &txq->axq_q);
1617
Felix Fietkaufce041b2011-05-19 12:20:25 +02001618 if (txq->axq_link) {
1619 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001620 ath_dbg(common, ATH_DBG_XMIT,
1621 "link[%u] (%p)=%llx (%p)\n",
1622 txq->axq_qnum, txq->axq_link,
1623 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001624 } else if (!edma)
1625 puttxbuf = true;
1626
1627 txq->axq_link = bf_last->bf_desc;
1628 }
1629
1630 if (puttxbuf) {
1631 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1632 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1633 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1634 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1635 }
1636
1637 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001638 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001639 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001640 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001641
1642 if (!internal) {
1643 txq->axq_depth++;
1644 if (bf_is_ampdu_not_probing(bf))
1645 txq->axq_ampdu_depth++;
1646 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001647}
1648
Sujithe8324352009-01-16 21:38:42 +05301649static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001650 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301651{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001652 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001653 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001654 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301655
1656 /*
1657 * Do not queue to h/w when any of the following conditions is true:
1658 * - there are pending frames in software queue
1659 * - the TID is currently paused for ADDBA/BAR request
1660 * - seqno is not within block-ack window
1661 * - h/w queue depth exceeds low water mark
1662 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001663 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001664 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001665 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001666 /*
Sujithe8324352009-01-16 21:38:42 +05301667 * Add this frame to software queue for scheduling later
1668 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001669 */
Ben Greearbda8add2011-01-09 23:11:48 -08001670 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001671 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001672 if (!txctl->an || !txctl->an->sleeping)
1673 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301674 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001675 }
1676
Felix Fietkau44f1d262011-08-28 00:32:25 +02001677 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1678 if (!bf)
1679 return;
1680
Felix Fietkau399c6482011-09-14 21:24:17 +02001681 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001682 INIT_LIST_HEAD(&bf_head);
1683 list_add(&bf->list, &bf_head);
1684
Sujithe8324352009-01-16 21:38:42 +05301685 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001686 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301687
1688 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001689 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301690 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001691 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001692 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301693}
1694
Felix Fietkau82b873a2010-11-11 03:18:37 +01001695static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001696 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001697{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001698 struct ath_frame_info *fi = get_frame_info(skb);
1699 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301700 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001701
Felix Fietkau44f1d262011-08-28 00:32:25 +02001702 bf = fi->bf;
1703 if (!bf)
1704 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1705
1706 if (!bf)
1707 return;
1708
1709 INIT_LIST_HEAD(&bf_head);
1710 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001711 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301712
Sujithd43f30152009-01-16 21:38:53 +05301713 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001714 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001715 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301716 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001717}
1718
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001719static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1720 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301721{
1722 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001723 struct ieee80211_sta *sta = tx_info->control.sta;
1724 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001725 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001726 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001727 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001728 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301729
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001730 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301731
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001732 if (sta)
1733 an = (struct ath_node *) sta->drv_priv;
1734
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001735 memset(fi, 0, sizeof(*fi));
1736 if (hw_key)
1737 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001738 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1739 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001740 else
1741 fi->keyix = ATH9K_TXKEYIX_INVALID;
1742 fi->keytype = keytype;
1743 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301744}
1745
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301746u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1747{
1748 struct ath_hw *ah = sc->sc_ah;
1749 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301750 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1751 (curchan->channelFlags & CHANNEL_5GHZ) &&
1752 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301753 return 0x3;
1754 else
1755 return chainmask;
1756}
1757
Felix Fietkau44f1d262011-08-28 00:32:25 +02001758/*
1759 * Assign a descriptor (and sequence number if necessary,
1760 * and map buffer for DMA. Frees skb on error
1761 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001762static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001763 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001764 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001765 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301766{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001767 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001768 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001770 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001771 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001772
1773 bf = ath_tx_get_buffer(sc);
1774 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001775 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001776 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001777 }
Sujithe8324352009-01-16 21:38:42 +05301778
Sujithe8324352009-01-16 21:38:42 +05301779 ATH_TXBUF_RESET(bf);
1780
Felix Fietkaufa05f872011-08-28 00:32:24 +02001781 if (tid) {
1782 seqno = tid->seq_next;
1783 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1784 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1785 bf->bf_state.seqno = seqno;
1786 }
1787
Sujithe8324352009-01-16 21:38:42 +05301788 bf->bf_mpdu = skb;
1789
Ben Greearc1739eb32010-10-14 12:45:29 -07001790 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1791 skb->len, DMA_TO_DEVICE);
1792 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301793 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001794 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001795 ath_err(ath9k_hw_common(sc->sc_ah),
1796 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001797 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001798 goto error;
Sujithe8324352009-01-16 21:38:42 +05301799 }
1800
Felix Fietkau56dc6332011-08-28 00:32:22 +02001801 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001802
1803 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001804
1805error:
1806 dev_kfree_skb_any(skb);
1807 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001808}
1809
1810/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001811static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001812 struct ath_tx_control *txctl)
1813{
Felix Fietkau04caf862010-11-14 15:20:12 +01001814 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1815 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001816 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001817 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001818 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301819
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301820 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1821 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001822 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1823 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001824 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001825
Felix Fietkau066dae92010-11-07 14:59:39 +01001826 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001827 }
1828
1829 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001830 /*
1831 * Try aggregation if it's a unicast data frame
1832 * and the destination is HT capable.
1833 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001834 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301835 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001836 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1837 if (!bf)
Felix Fietkau3ad29522011-12-14 22:08:07 +01001838 return;
Felix Fietkau04caf862010-11-14 15:20:12 +01001839
Felix Fietkau82b873a2010-11-11 03:18:37 +01001840 bf->bf_state.bfs_paprd = txctl->paprd;
1841
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301842 if (txctl->paprd)
1843 bf->bf_state.bfs_paprd_timestamp = jiffies;
1844
Felix Fietkau44f1d262011-08-28 00:32:25 +02001845 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301846 }
Sujithe8324352009-01-16 21:38:42 +05301847}
1848
1849/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001850int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301851 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001852{
Felix Fietkau28d16702010-11-14 15:20:10 +01001853 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1854 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001855 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001856 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001857 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001858 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001859 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001860 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001861 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001862
Ben Greeara9927ba2010-12-06 21:13:49 -08001863 /* NOTE: sta can be NULL according to net/mac80211.h */
1864 if (sta)
1865 txctl->an = (struct ath_node *)sta->drv_priv;
1866
Felix Fietkau04caf862010-11-14 15:20:12 +01001867 if (info->control.hw_key)
1868 frmlen += info->control.hw_key->icv_len;
1869
Felix Fietkau28d16702010-11-14 15:20:10 +01001870 /*
1871 * As a temporary workaround, assign seq# here; this will likely need
1872 * to be cleaned up to work better with Beacon transmission and virtual
1873 * BSSes.
1874 */
1875 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1876 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1877 sc->tx.seq_no += 0x10;
1878 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1879 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1880 }
1881
John W. Linville42cecc32011-09-19 15:42:31 -04001882 /* Add the padding after the header if this is not already done */
1883 padpos = ath9k_cmn_padpos(hdr->frame_control);
1884 padsize = padpos & 3;
1885 if (padsize && skb->len > padpos) {
1886 if (skb_headroom(skb) < padsize)
1887 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001888
John W. Linville42cecc32011-09-19 15:42:31 -04001889 skb_push(skb, padsize);
1890 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001891 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001892 }
1893
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001894 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1895 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1896 !ieee80211_is_data(hdr->frame_control))
1897 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1898
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001899 setup_frame_info(hw, skb, frmlen);
1900
1901 /*
1902 * At this point, the vif, hw_key and sta pointers in the tx control
1903 * info are no longer valid (overwritten by the ath_frame_info data.
1904 */
1905
Felix Fietkau066dae92010-11-07 14:59:39 +01001906 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001907 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001908 if (txq == sc->tx.txq_map[q] &&
1909 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001910 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001911 txq->stopped = 1;
1912 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001913
Felix Fietkau44f1d262011-08-28 00:32:25 +02001914 ath_tx_start_dma(sc, skb, txctl);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001915
1916 spin_unlock_bh(&txq->axq_lock);
1917
Felix Fietkau44f1d262011-08-28 00:32:25 +02001918 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001919}
1920
Sujithe8324352009-01-16 21:38:42 +05301921/*****************/
1922/* TX Completion */
1923/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001924
Sujithe8324352009-01-16 21:38:42 +05301925static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301926 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001927{
Sujithe8324352009-01-16 21:38:42 +05301928 struct ieee80211_hw *hw = sc->hw;
1929 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001930 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001931 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001932 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301933
Joe Perches226afe62010-12-02 19:12:37 -08001934 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301935
Felix Fietkau55797b12011-09-14 21:24:16 +02001936 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301937 /* Frame was ACKed */
1938 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301939
John W. Linville42cecc32011-09-19 15:42:31 -04001940 padpos = ath9k_cmn_padpos(hdr->frame_control);
1941 padsize = padpos & 3;
1942 if (padsize && skb->len>padpos+padsize) {
1943 /*
1944 * Remove MAC header padding before giving the frame back to
1945 * mac80211.
1946 */
1947 memmove(skb->data + padsize, skb->data, padpos);
1948 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301949 }
1950
Felix Fietkauc8e88682011-11-16 13:08:40 +01001951 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05301952 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001953 ath_dbg(common, ATH_DBG_PS,
1954 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301955 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1956 PS_WAIT_FOR_CAB |
1957 PS_WAIT_FOR_PSPOLL_DATA |
1958 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001959 }
1960
Felix Fietkau7545daf2011-01-24 19:23:16 +01001961 q = skb_get_queue_mapping(skb);
1962 if (txq == sc->tx.txq_map[q]) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001963 if (WARN_ON(--txq->pending_frames < 0))
1964 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001965
Felix Fietkau7545daf2011-01-24 19:23:16 +01001966 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1967 ieee80211_wake_queue(sc->hw, q);
1968 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001969 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001970 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001971
1972 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301973}
1974
1975static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001976 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +01001977 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301978{
1979 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001980 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301981 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301982 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301983
Felix Fietkau55797b12011-09-14 21:24:16 +02001984 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301985 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301986
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001987 if (ts->ts_status & ATH9K_TXERR_FILT)
1988 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1989
Ben Greearc1739eb32010-10-14 12:45:29 -07001990 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001991 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001992
1993 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301994 if (time_after(jiffies,
1995 bf->bf_state.bfs_paprd_timestamp +
1996 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001997 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001998 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001999 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002000 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002001 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302002 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002003 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002004 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2005 * accidentally reference it later.
2006 */
2007 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302008
2009 /*
2010 * Return the list of ath_buf of this mpdu to free queue
2011 */
2012 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2013 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2014 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2015}
2016
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002017static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2018 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002019 int txok)
Sujithc4288392008-11-18 09:09:30 +05302020{
Sujitha22be222009-03-30 15:28:36 +05302021 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302022 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302023 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002024 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002025 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302026 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302027
Sujith95e4acb2009-03-13 08:56:09 +05302028 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002029 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302030
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002031 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302032 WARN_ON(tx_rateindex >= hw->max_rates);
2033
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002034 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002035 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302036
Felix Fietkaub572d032010-11-14 15:20:07 +01002037 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002038 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302039 tx_info->status.ampdu_len = nframes;
2040 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002041
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002042 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002043 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002044 /*
2045 * If an underrun error is seen assume it as an excessive
2046 * retry only if max frame trigger level has been reached
2047 * (2 KB for single stream, and 4 KB for dual stream).
2048 * Adjust the long retry as if the frame was tried
2049 * hw->max_rate_tries times to affect how rate control updates
2050 * PER for the failed rate.
2051 * In case of congestion on the bus penalizing this type of
2052 * underruns should help hardware actually transmit new frames
2053 * successfully by eventually preferring slower rates.
2054 * This itself should also alleviate congestion on the bus.
2055 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002056 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2057 ATH9K_TX_DELIM_UNDERRUN)) &&
2058 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002059 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002060 tx_info->status.rates[tx_rateindex].count =
2061 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302062 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302063
Felix Fietkau545750d2009-11-23 22:21:01 +01002064 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302065 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002066 tx_info->status.rates[i].idx = -1;
2067 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302068
Felix Fietkau78c46532010-06-25 01:26:16 +02002069 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302070}
2071
Felix Fietkaufce041b2011-05-19 12:20:25 +02002072static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2073 struct ath_tx_status *ts, struct ath_buf *bf,
2074 struct list_head *bf_head)
2075{
2076 int txok;
2077
2078 txq->axq_depth--;
2079 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2080 txq->axq_tx_inprogress = false;
2081 if (bf_is_ampdu_not_probing(bf))
2082 txq->axq_ampdu_depth--;
2083
Felix Fietkaufce041b2011-05-19 12:20:25 +02002084 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002085 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkau156369f2011-12-14 22:08:04 +01002086 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002087 } else
2088 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2089
Felix Fietkaufce041b2011-05-19 12:20:25 +02002090 if (sc->sc_flags & SC_OP_TXAGGR)
2091 ath_txq_schedule(sc, txq);
2092}
2093
Sujithc4288392008-11-18 09:09:30 +05302094static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002095{
Sujithcbe61d82009-02-09 13:27:12 +05302096 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002097 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002098 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2099 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302100 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002101 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002102 int status;
2103
Joe Perches226afe62010-12-02 19:12:37 -08002104 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2105 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2106 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002107
Felix Fietkaufce041b2011-05-19 12:20:25 +02002108 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002110 if (work_pending(&sc->hw_reset_work))
2111 break;
2112
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113 if (list_empty(&txq->axq_q)) {
2114 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002115 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002116 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002117 break;
2118 }
2119 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2120
2121 /*
2122 * There is a race condition that a BH gets scheduled
2123 * after sw writes TxE and before hw re-load the last
2124 * descriptor to get the newly chained one.
2125 * Software must keep the last DONE descriptor as a
2126 * holding descriptor - software does so by marking
2127 * it with the STALE flag.
2128 */
2129 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302130 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002132 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002134
2135 bf = list_entry(bf_held->list.next, struct ath_buf,
2136 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002137 }
2138
2139 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302140 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141
Felix Fietkau29bffa92010-03-29 20:14:23 -07002142 memset(&ts, 0, sizeof(ts));
2143 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002144 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002145 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002146
Ben Greear2dac4fb2011-01-09 23:11:45 -08002147 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148
2149 /*
2150 * Remove ath_buf's of the same transmit unit from txq,
2151 * however leave the last descriptor back as the holding
2152 * descriptor for hw.
2153 */
Sujitha119cc42009-03-30 15:28:38 +05302154 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156 if (!list_is_singular(&lastbf->list))
2157 list_cut_position(&bf_head,
2158 &txq->axq_q, lastbf->list.prev);
2159
Felix Fietkaufce041b2011-05-19 12:20:25 +02002160 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002161 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002162 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 }
Johannes Berge6a98542008-10-21 12:40:02 +02002164
Felix Fietkaufce041b2011-05-19 12:20:25 +02002165 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002167 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168}
2169
Sujith305fe472009-07-23 15:32:29 +05302170static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002171{
2172 struct ath_softc *sc = container_of(work, struct ath_softc,
2173 tx_complete_work.work);
2174 struct ath_txq *txq;
2175 int i;
2176 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002177#ifdef CONFIG_ATH9K_DEBUGFS
2178 sc->tx_complete_poll_work_seen++;
2179#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002180
2181 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2182 if (ATH_TXQ_SETUP(sc, i)) {
2183 txq = &sc->tx.txq[i];
2184 spin_lock_bh(&txq->axq_lock);
2185 if (txq->axq_depth) {
2186 if (txq->axq_tx_inprogress) {
2187 needreset = true;
2188 spin_unlock_bh(&txq->axq_lock);
2189 break;
2190 } else {
2191 txq->axq_tx_inprogress = true;
2192 }
2193 }
2194 spin_unlock_bh(&txq->axq_lock);
2195 }
2196
2197 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002198 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2199 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002200 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002201 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002202 }
2203
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002204 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002205 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2206}
2207
2208
Sujithe8324352009-01-16 21:38:42 +05302209
2210void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002211{
Sujithe8324352009-01-16 21:38:42 +05302212 int i;
2213 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002214
Sujithe8324352009-01-16 21:38:42 +05302215 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002216
2217 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302218 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2219 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002220 }
2221}
2222
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002223void ath_tx_edma_tasklet(struct ath_softc *sc)
2224{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002225 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002226 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2227 struct ath_hw *ah = sc->sc_ah;
2228 struct ath_txq *txq;
2229 struct ath_buf *bf, *lastbf;
2230 struct list_head bf_head;
2231 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002232
2233 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002234 if (work_pending(&sc->hw_reset_work))
2235 break;
2236
Felix Fietkaufce041b2011-05-19 12:20:25 +02002237 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002238 if (status == -EINPROGRESS)
2239 break;
2240 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002241 ath_dbg(common, ATH_DBG_XMIT,
2242 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002243 break;
2244 }
2245
2246 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002247 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248 continue;
2249
Felix Fietkaufce041b2011-05-19 12:20:25 +02002250 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251
2252 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002253
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002254 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2255 spin_unlock_bh(&txq->axq_lock);
2256 return;
2257 }
2258
2259 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2260 struct ath_buf, list);
2261 lastbf = bf->bf_lastbf;
2262
2263 INIT_LIST_HEAD(&bf_head);
2264 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2265 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002266
Felix Fietkaufce041b2011-05-19 12:20:25 +02002267 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2268 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002269
Felix Fietkaufce041b2011-05-19 12:20:25 +02002270 if (!list_empty(&txq->axq_q)) {
2271 struct list_head bf_q;
2272
2273 INIT_LIST_HEAD(&bf_q);
2274 txq->axq_link = NULL;
2275 list_splice_tail_init(&txq->axq_q, &bf_q);
2276 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2277 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002278 }
2279
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002281 spin_unlock_bh(&txq->axq_lock);
2282 }
2283}
2284
Sujithe8324352009-01-16 21:38:42 +05302285/*****************/
2286/* Init, Cleanup */
2287/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002288
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002289static int ath_txstatus_setup(struct ath_softc *sc, int size)
2290{
2291 struct ath_descdma *dd = &sc->txsdma;
2292 u8 txs_len = sc->sc_ah->caps.txs_len;
2293
2294 dd->dd_desc_len = size * txs_len;
2295 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2296 &dd->dd_desc_paddr, GFP_KERNEL);
2297 if (!dd->dd_desc)
2298 return -ENOMEM;
2299
2300 return 0;
2301}
2302
2303static int ath_tx_edma_init(struct ath_softc *sc)
2304{
2305 int err;
2306
2307 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2308 if (!err)
2309 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2310 sc->txsdma.dd_desc_paddr,
2311 ATH_TXSTATUS_RING_SIZE);
2312
2313 return err;
2314}
2315
2316static void ath_tx_edma_cleanup(struct ath_softc *sc)
2317{
2318 struct ath_descdma *dd = &sc->txsdma;
2319
2320 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2321 dd->dd_desc_paddr);
2322}
2323
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002324int ath_tx_init(struct ath_softc *sc, int nbufs)
2325{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002326 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002327 int error = 0;
2328
Sujith797fe5cb2009-03-30 15:28:45 +05302329 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002330
Sujith797fe5cb2009-03-30 15:28:45 +05302331 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002332 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302333 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002334 ath_err(common,
2335 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302336 goto err;
2337 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002338
Sujith797fe5cb2009-03-30 15:28:45 +05302339 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002340 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302341 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002342 ath_err(common,
2343 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302344 goto err;
2345 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002346
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002347 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2348
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002349 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2350 error = ath_tx_edma_init(sc);
2351 if (error)
2352 goto err;
2353 }
2354
Sujith797fe5cb2009-03-30 15:28:45 +05302355err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002356 if (error != 0)
2357 ath_tx_cleanup(sc);
2358
2359 return error;
2360}
2361
Sujith797fe5cb2009-03-30 15:28:45 +05302362void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363{
Sujithb77f4832008-12-07 21:44:03 +05302364 if (sc->beacon.bdma.dd_desc_len != 0)
2365 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujithb77f4832008-12-07 21:44:03 +05302367 if (sc->tx.txdma.dd_desc_len != 0)
2368 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002369
2370 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2371 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002372}
2373
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2375{
Sujithc5170162008-10-29 10:13:59 +05302376 struct ath_atx_tid *tid;
2377 struct ath_atx_ac *ac;
2378 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Sujith8ee5afb2008-12-07 21:43:36 +05302380 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302381 tidno < WME_NUM_TID;
2382 tidno++, tid++) {
2383 tid->an = an;
2384 tid->tidno = tidno;
2385 tid->seq_start = tid->seq_next = 0;
2386 tid->baw_size = WME_MAX_BA;
2387 tid->baw_head = tid->baw_tail = 0;
2388 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302389 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302390 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002391 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302392 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302393 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302394 tid->state &= ~AGGR_ADDBA_COMPLETE;
2395 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302396 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397
Sujith8ee5afb2008-12-07 21:43:36 +05302398 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302399 acno < WME_NUM_AC; acno++, ac++) {
2400 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002401 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302402 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002403 }
2404}
2405
Sujithb5aa9bf2008-10-29 10:13:31 +05302406void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407{
Felix Fietkau2b409942010-07-07 19:42:08 +02002408 struct ath_atx_ac *ac;
2409 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002411 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302412
Felix Fietkau2b409942010-07-07 19:42:08 +02002413 for (tidno = 0, tid = &an->tid[tidno];
2414 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415
Felix Fietkau2b409942010-07-07 19:42:08 +02002416 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002417 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418
Felix Fietkau2b409942010-07-07 19:42:08 +02002419 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 if (tid->sched) {
2422 list_del(&tid->list);
2423 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002425
2426 if (ac->sched) {
2427 list_del(&ac->list);
2428 tid->ac->sched = false;
2429 }
2430
2431 ath_tid_drain(sc, txq, tid);
2432 tid->state &= ~AGGR_ADDBA_COMPLETE;
2433 tid->state &= ~AGGR_CLEANUP;
2434
2435 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002436 }
2437}