blob: b1a37d28deee75e2e21da5c76b80823cd563010e [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +010056 struct ath_tx_status *ts, int txok);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Felix Fietkau156369f2011-12-14 22:08:04 +0100153static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
154{
155 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
156 seqno << IEEE80211_SEQ_SEQ_SHIFT);
157}
158
Sujithe8324352009-01-16 21:38:42 +0530159static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
160{
Felix Fietkau066dae92010-11-07 14:59:39 +0100161 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200162 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530163 struct ath_buf *bf;
164 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 struct ath_frame_info *fi;
Felix Fietkau156369f2011-12-14 22:08:04 +0100167 bool sendbar = false;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200168
Sujithe8324352009-01-16 21:38:42 +0530169 INIT_LIST_HEAD(&bf_head);
170
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530172 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530173
Felix Fietkau56dc6332011-08-28 00:32:22 +0200174 while ((skb = __skb_dequeue(&tid->buf_q))) {
175 fi = get_frame_info(skb);
176 bf = fi->bf;
177
Felix Fietkaue1566d12010-11-20 03:08:46 +0100178 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200179 if (bf && fi->retries) {
180 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200181 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100182 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
183 sendbar = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200184 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200185 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200186 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100187 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530188 }
189
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500190 if (tid->baw_head == tid->baw_tail) {
191 tid->state &= ~AGGR_ADDBA_COMPLETE;
192 tid->state &= ~AGGR_CLEANUP;
193 }
194
Sujithe8324352009-01-16 21:38:42 +0530195 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau156369f2011-12-14 22:08:04 +0100196
197 if (sendbar)
198 ath_send_bar(tid, tid->seq_start);
Sujithe8324352009-01-16 21:38:42 +0530199}
200
201static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
202 int seqno)
203{
204 int index, cindex;
205
206 index = ATH_BA_INDEX(tid->seq_start, seqno);
207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
208
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200209 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530210
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200211 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530212 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
213 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
214 }
215}
216
217static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100218 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530219{
220 int index, cindex;
221
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100222 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530223 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200224 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530225
226 if (index >= ((tid->baw_tail - tid->baw_head) &
227 (ATH_TID_MAX_BUFS - 1))) {
228 tid->baw_tail = cindex;
229 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
230 }
231}
232
233/*
234 * TODO: For frame(s) that are in the retry state, we will reuse the
235 * sequence number(s) without setting the retry bit. The
236 * alternative is to give up on these and BAR the receiver's window
237 * forward.
238 */
239static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
240 struct ath_atx_tid *tid)
241
242{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200243 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530244 struct ath_buf *bf;
245 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700246 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100247 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700248
249 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530250 INIT_LIST_HEAD(&bf_head);
251
Felix Fietkau56dc6332011-08-28 00:32:22 +0200252 while ((skb = __skb_dequeue(&tid->buf_q))) {
253 fi = get_frame_info(skb);
254 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530255
Felix Fietkau44f1d262011-08-28 00:32:25 +0200256 if (!bf) {
257 spin_unlock(&txq->axq_lock);
258 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
259 spin_lock(&txq->axq_lock);
260 continue;
261 }
262
Felix Fietkau56dc6332011-08-28 00:32:22 +0200263 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530264
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100265 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200266 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530267
268 spin_unlock(&txq->axq_lock);
Felix Fietkau156369f2011-12-14 22:08:04 +0100269 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +0530270 spin_lock(&txq->axq_lock);
271 }
272
273 tid->seq_next = tid->seq_start;
274 tid->baw_tail = tid->baw_head;
275}
276
Sujithfec247c2009-07-27 12:08:16 +0530277static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100278 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530279{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100280 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200281 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530282 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100283 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530284
Sujithfec247c2009-07-27 12:08:16 +0530285 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100286 fi->retries += count;
287
288 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100289 return;
Sujithe8324352009-01-16 21:38:42 +0530290
Sujithe8324352009-01-16 21:38:42 +0530291 hdr = (struct ieee80211_hdr *)skb->data;
292 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200293 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
294 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530295}
296
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200297static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
298{
299 struct ath_buf *bf = NULL;
300
301 spin_lock_bh(&sc->tx.txbuflock);
302
303 if (unlikely(list_empty(&sc->tx.txbuf))) {
304 spin_unlock_bh(&sc->tx.txbuflock);
305 return NULL;
306 }
307
308 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
309 list_del(&bf->list);
310
311 spin_unlock_bh(&sc->tx.txbuflock);
312
313 return bf;
314}
315
316static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
317{
318 spin_lock_bh(&sc->tx.txbuflock);
319 list_add_tail(&bf->list, &sc->tx.txbuf);
320 spin_unlock_bh(&sc->tx.txbuflock);
321}
322
Sujithd43f30152009-01-16 21:38:53 +0530323static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
324{
325 struct ath_buf *tbf;
326
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200327 tbf = ath_tx_get_buffer(sc);
328 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530329 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530330
331 ATH_TXBUF_RESET(tbf);
332
333 tbf->bf_mpdu = bf->bf_mpdu;
334 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400335 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530336 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530337
338 return tbf;
339}
340
Felix Fietkaub572d032010-11-14 15:20:07 +0100341static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
342 struct ath_tx_status *ts, int txok,
343 int *nframes, int *nbad)
344{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100345 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100346 u16 seq_st = 0;
347 u32 ba[WME_BA_BMP_SIZE >> 5];
348 int ba_index;
349 int isaggr = 0;
350
351 *nbad = 0;
352 *nframes = 0;
353
Felix Fietkaub572d032010-11-14 15:20:07 +0100354 isaggr = bf_isaggr(bf);
355 if (isaggr) {
356 seq_st = ts->ts_seqnum;
357 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
358 }
359
360 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100361 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200362 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100363
364 (*nframes)++;
365 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
366 (*nbad)++;
367
368 bf = bf->bf_next;
369 }
370}
371
372
Sujithd43f30152009-01-16 21:38:53 +0530373static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
374 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100375 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530376{
377 struct ath_node *an = NULL;
378 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530379 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100380 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530381 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800382 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530383 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530384 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200385 struct list_head bf_head;
386 struct sk_buff_head bf_pending;
Felix Fietkau156369f2011-12-14 22:08:04 +0100387 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
Sujithe8324352009-01-16 21:38:42 +0530388 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530389 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
390 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200391 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100392 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200393 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200395 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100396 int i, retries;
Felix Fietkau156369f2011-12-14 22:08:04 +0100397 int bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530398
Sujitha22be222009-03-30 15:28:36 +0530399 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530400 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530401
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800402 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800403
Felix Fietkau78c46532010-06-25 01:26:16 +0200404 memcpy(rates, tx_info->control.rates, sizeof(rates));
405
Felix Fietkauda647622011-12-14 22:08:03 +0100406 retries = ts->ts_longretry + 1;
407 for (i = 0; i < ts->ts_rateindex; i++)
408 retries += rates[i].count;
409
Sujith1286ec62009-01-27 13:30:37 +0530410 rcu_read_lock();
411
Ben Greear686b9cb2010-09-23 09:44:36 -0700412 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530413 if (!sta) {
414 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200415
Felix Fietkau31e79a52010-07-12 23:16:34 +0200416 INIT_LIST_HEAD(&bf_head);
417 while (bf) {
418 bf_next = bf->bf_next;
419
Felix Fietkaufce041b2011-05-19 12:20:25 +0200420 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200421 list_move_tail(&bf->list, &bf_head);
422
Felix Fietkau156369f2011-12-14 22:08:04 +0100423 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200424
425 bf = bf_next;
426 }
Sujith1286ec62009-01-27 13:30:37 +0530427 return;
Sujithe8324352009-01-16 21:38:42 +0530428 }
429
Sujith1286ec62009-01-27 13:30:37 +0530430 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100431 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
432 tid = ATH_AN_2_TID(an, tidno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100433 seq_first = tid->seq_start;
Sujith1286ec62009-01-27 13:30:37 +0530434
Felix Fietkaub11b1602010-07-11 12:48:44 +0200435 /*
436 * The hardware occasionally sends a tx status for the wrong TID.
437 * In this case, the BA status cannot be considered valid and all
438 * subframes need to be retransmitted
439 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100440 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200441 txok = false;
442
Sujithe8324352009-01-16 21:38:42 +0530443 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530444 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530445
Sujithd43f30152009-01-16 21:38:53 +0530446 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700447 if (ts->ts_flags & ATH9K_TX_BA) {
448 seq_st = ts->ts_seqnum;
449 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530450 } else {
Sujithd43f30152009-01-16 21:38:53 +0530451 /*
452 * AR5416 can become deaf/mute when BA
453 * issue happens. Chip needs to be reset.
454 * But AP code may have sychronization issues
455 * when perform internal reset in this routine.
456 * Only enable reset in STA mode for now.
457 */
Sujith2660b812009-02-09 13:27:26 +0530458 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530459 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530460 }
461 }
462
Felix Fietkau56dc6332011-08-28 00:32:22 +0200463 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530464
Felix Fietkaub572d032010-11-14 15:20:07 +0100465 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530466 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200467 u16 seqno = bf->bf_state.seqno;
468
Felix Fietkauf0b82202011-01-15 14:30:15 +0100469 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530470 bf_next = bf->bf_next;
471
Felix Fietkau78c46532010-06-25 01:26:16 +0200472 skb = bf->bf_mpdu;
473 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100474 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200475
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200476 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530477 /* transmit completion, subframe is
478 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530479 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530480 } else if (!isaggr && txok) {
481 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530482 acked_cnt++;
Felix Fietkaub0477012011-12-14 22:08:05 +0100483 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
484 /*
485 * cleanup in progress, just fail
486 * the un-acked sub-frames
487 */
488 txfail = 1;
489 } else if (flush) {
490 txpending = 1;
491 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
492 if (txok || !an->sleeping)
493 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
494 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200495
Felix Fietkaub0477012011-12-14 22:08:05 +0100496 txpending = 1;
497 } else {
498 txfail = 1;
499 txfail_cnt++;
500 bar_index = max_t(int, bar_index,
501 ATH_BA_INDEX(seq_first, seqno));
Sujithe8324352009-01-16 21:38:42 +0530502 }
503
Felix Fietkaufce041b2011-05-19 12:20:25 +0200504 /*
505 * Make sure the last desc is reclaimed if it
506 * not a holding desc.
507 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200508 INIT_LIST_HEAD(&bf_head);
509 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
510 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530511 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530512
Felix Fietkau90fa5392010-09-20 13:45:38 +0200513 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530514 /*
515 * complete the acked-ones/xretried ones; update
516 * block-ack window
517 */
518 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200519 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530520 spin_unlock_bh(&txq->axq_lock);
521
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530522 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200523 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200524 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530525 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530526 }
527
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700528 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
Felix Fietkau156369f2011-12-14 22:08:04 +0100529 !txfail);
Sujithe8324352009-01-16 21:38:42 +0530530 } else {
Sujithd43f30152009-01-16 21:38:53 +0530531 /* retry the un-acked ones */
Felix Fietkaub0477012011-12-14 22:08:05 +0100532 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
533 bf->bf_next == NULL && bf_last->bf_stale) {
534 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530535
Felix Fietkaub0477012011-12-14 22:08:05 +0100536 tbf = ath_clone_txbuf(sc, bf_last);
537 /*
538 * Update tx baw and complete the
539 * frame with failed status if we
540 * run out of tx buf.
541 */
542 if (!tbf) {
543 spin_lock_bh(&txq->axq_lock);
544 ath_tx_update_baw(sc, tid, seqno);
545 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400546
Felix Fietkaub0477012011-12-14 22:08:05 +0100547 ath_tx_complete_buf(sc, bf, txq,
548 &bf_head, ts, 0);
549 bar_index = max_t(int, bar_index,
550 ATH_BA_INDEX(seq_first, seqno));
551 break;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400552 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100553
554 fi->bf = tbf;
Sujithe8324352009-01-16 21:38:42 +0530555 }
556
557 /*
558 * Put this buffer to the temporary pending
559 * queue to retain ordering
560 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200561 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530562 }
563
564 bf = bf_next;
565 }
566
Felix Fietkau156369f2011-12-14 22:08:04 +0100567 if (bar_index >= 0)
568 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
569
Felix Fietkau4cee7862010-07-23 03:53:16 +0200570 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200571 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200572 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200573 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200574
Felix Fietkau4cee7862010-07-23 03:53:16 +0200575 spin_lock_bh(&txq->axq_lock);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200576 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200577 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600578 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200579
580 if (ts->ts_status & ATH9K_TXERR_FILT)
581 tid->ac->clear_ps_filter = true;
582 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200583 spin_unlock_bh(&txq->axq_lock);
584 }
585
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500586 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200587 ath_tx_flush_tid(sc, tid);
588
Sujith1286ec62009-01-27 13:30:37 +0530589 rcu_read_unlock();
590
Felix Fietkau030d6292011-10-07 02:28:13 +0200591 if (needreset) {
592 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200593 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200594 }
Sujithe8324352009-01-16 21:38:42 +0530595}
596
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530597static bool ath_lookup_legacy(struct ath_buf *bf)
598{
599 struct sk_buff *skb;
600 struct ieee80211_tx_info *tx_info;
601 struct ieee80211_tx_rate *rates;
602 int i;
603
604 skb = bf->bf_mpdu;
605 tx_info = IEEE80211_SKB_CB(skb);
606 rates = tx_info->control.rates;
607
Felix Fietkau059ee092011-08-27 10:25:27 +0200608 for (i = 0; i < 4; i++) {
609 if (!rates[i].count || rates[i].idx < 0)
610 break;
611
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530612 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
613 return true;
614 }
615
616 return false;
617}
618
Sujithe8324352009-01-16 21:38:42 +0530619static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
620 struct ath_atx_tid *tid)
621{
Sujithe8324352009-01-16 21:38:42 +0530622 struct sk_buff *skb;
623 struct ieee80211_tx_info *tx_info;
624 struct ieee80211_tx_rate *rates;
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530625 struct ath_mci_profile *mci = &sc->btcoex.mci;
Sujithd43f30152009-01-16 21:38:53 +0530626 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530627 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530628 int i;
629
Sujitha22be222009-03-30 15:28:36 +0530630 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530631 tx_info = IEEE80211_SKB_CB(skb);
632 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530633
634 /*
635 * Find the lowest frame length among the rate series that will have a
636 * 4ms transmit duration.
637 * TODO - TXOP limit needs to be considered.
638 */
639 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
640
641 for (i = 0; i < 4; i++) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100642 int modeidx;
Sujithe8324352009-01-16 21:38:42 +0530643
Felix Fietkaub0477012011-12-14 22:08:05 +0100644 if (!rates[i].count)
645 continue;
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200646
Felix Fietkaub0477012011-12-14 22:08:05 +0100647 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
648 legacy = 1;
649 break;
Sujithe8324352009-01-16 21:38:42 +0530650 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100651
652 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
653 modeidx = MCS_HT40;
654 else
655 modeidx = MCS_HT20;
656
657 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
658 modeidx++;
659
660 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
661 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530662 }
663
664 /*
665 * limit aggregate size by the minimum rate if rate selected is
666 * not a probe rate, if rate selected is a probe rate then
667 * avoid aggregation of this packet.
668 */
669 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
670 return 0;
671
Rajkumar Manoharan7dc181c2011-10-24 18:19:49 +0530672 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
673 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
674 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530675 aggr_limit = min((max_4ms_framelen * 3) / 8,
676 (u32)ATH_AMPDU_LIMIT_MAX);
677 else
678 aggr_limit = min(max_4ms_framelen,
679 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530680
681 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300682 * h/w can accept aggregates up to 16 bit lengths (65535).
683 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530684 * as zero. Ignore 65536 since we are constrained by hw.
685 */
Sujith4ef70842009-07-23 15:32:41 +0530686 if (tid->an->maxampdu)
687 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530688
689 return aggr_limit;
690}
691
692/*
Sujithd43f30152009-01-16 21:38:53 +0530693 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530694 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530695 */
696static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530697 struct ath_buf *bf, u16 frmlen,
698 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530699{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530700#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530701 struct sk_buff *skb = bf->bf_mpdu;
702 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530703 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530704 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100705 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200706 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100707 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530708
709 /* Select standard number of delimiters based on frame length alone */
710 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
711
712 /*
713 * If encryption enabled, hardware requires some more padding between
714 * subframes.
715 * TODO - this could be improved to be dependent on the rate.
716 * The hardware can keep up at lower rates, but not higher rates
717 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530718 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
719 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530720 ndelim += ATH_AGGR_ENCRYPTDELIM;
721
722 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530723 * Add delimiter when using RTS/CTS with aggregation
724 * and non enterprise AR9003 card
725 */
Felix Fietkau34597312011-08-29 18:57:54 +0200726 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
727 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530728 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
729
730 /*
Sujithe8324352009-01-16 21:38:42 +0530731 * Convert desired mpdu density from microeconds to bytes based
732 * on highest rate in rate series (i.e. first rate) to determine
733 * required minimum length for subframe. Take into account
734 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530735 *
Sujithe8324352009-01-16 21:38:42 +0530736 * If there is no mpdu density restriction, no further calculation
737 * is needed.
738 */
Sujith4ef70842009-07-23 15:32:41 +0530739
740 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530741 return ndelim;
742
743 rix = tx_info->control.rates[0].idx;
744 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530745 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
746 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
747
748 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530749 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530750 else
Sujith4ef70842009-07-23 15:32:41 +0530751 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530752
753 if (nsymbols == 0)
754 nsymbols = 1;
755
Felix Fietkauc6663872010-04-19 19:57:33 +0200756 streams = HT_RC_2_STREAMS(rix);
757 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530758 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
759
Sujithe8324352009-01-16 21:38:42 +0530760 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530761 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
762 ndelim = max(mindelim, ndelim);
763 }
764
765 return ndelim;
766}
767
768static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530769 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530770 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100771 struct list_head *bf_q,
772 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530773{
774#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200775 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530776 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530777 u16 aggr_limit = 0, al = 0, bpad = 0,
778 al_delta, h_baw = tid->baw_size / 2;
779 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200780 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100781 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200782 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200783 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530784
785 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200786 skb = skb_peek(&tid->buf_q);
787 fi = get_frame_info(skb);
788 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200789 if (!fi->bf)
790 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200791
Felix Fietkau44f1d262011-08-28 00:32:25 +0200792 if (!bf)
793 continue;
794
Felix Fietkau399c6482011-09-14 21:24:17 +0200795 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200796 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200797 if (!bf_first)
798 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530799
Sujithd43f30152009-01-16 21:38:53 +0530800 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200801 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530802 status = ATH_AGGR_BAW_CLOSED;
803 break;
804 }
805
806 if (!rl) {
807 aggr_limit = ath_lookup_rate(sc, bf, tid);
808 rl = 1;
809 }
810
Sujithd43f30152009-01-16 21:38:53 +0530811 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100812 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530813
Sujithd43f30152009-01-16 21:38:53 +0530814 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530815 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
816 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530817 status = ATH_AGGR_LIMITED;
818 break;
819 }
820
Felix Fietkau0299a502010-10-21 02:47:24 +0200821 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200822 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200823 break;
824
Sujithd43f30152009-01-16 21:38:53 +0530825 /* do not exceed subframe limit */
826 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530827 status = ATH_AGGR_LIMITED;
828 break;
829 }
830
Sujithd43f30152009-01-16 21:38:53 +0530831 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530832 al += bpad + al_delta;
833
834 /*
835 * Get the delimiters needed to meet the MPDU
836 * density for this node.
837 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530838 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
839 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530840 bpad = PADBYTES(al_delta) + (ndelim << 2);
841
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530842 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530843 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530844
Sujithd43f30152009-01-16 21:38:53 +0530845 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100846 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200847 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200848 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200849
850 __skb_unlink(skb, &tid->buf_q);
851 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200852 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530853 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200854
Sujithe8324352009-01-16 21:38:42 +0530855 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530856
Felix Fietkau56dc6332011-08-28 00:32:22 +0200857 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530858
Felix Fietkau269c44b2010-11-14 15:20:06 +0100859 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530860
Sujithe8324352009-01-16 21:38:42 +0530861 return status;
862#undef PADBYTES
863}
864
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200865/*
866 * rix - rate index
867 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
868 * width - 0 for 20 MHz, 1 for 40 MHz
869 * half_gi - to use 4us v/s 3.6 us for symbol time
870 */
871static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
872 int width, int half_gi, bool shortPreamble)
873{
874 u32 nbits, nsymbits, duration, nsymbols;
875 int streams;
876
877 /* find number of symbols: PLCP + data */
878 streams = HT_RC_2_STREAMS(rix);
879 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
880 nsymbits = bits_per_symbol[rix % 8][width] * streams;
881 nsymbols = (nbits + nsymbits - 1) / nsymbits;
882
883 if (!half_gi)
884 duration = SYMBOL_TIME(nsymbols);
885 else
886 duration = SYMBOL_TIME_HALFGI(nsymbols);
887
888 /* addup duration for legacy/ht training and signal fields */
889 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
890
891 return duration;
892}
893
Felix Fietkau493cf042011-09-14 21:24:22 +0200894static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
895 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200896{
897 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200898 struct sk_buff *skb;
899 struct ieee80211_tx_info *tx_info;
900 struct ieee80211_tx_rate *rates;
901 const struct ieee80211_rate *rate;
902 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200903 int i;
904 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200905
906 skb = bf->bf_mpdu;
907 tx_info = IEEE80211_SKB_CB(skb);
908 rates = tx_info->control.rates;
909 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200910
911 /* set dur_update_en for l-sig computation except for PS-Poll frames */
912 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200913
914 /*
915 * We check if Short Preamble is needed for the CTS rate by
916 * checking the BSS's global flag.
917 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
918 */
919 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200920 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200921 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200922 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200923
924 for (i = 0; i < 4; i++) {
925 bool is_40, is_sgi, is_sp;
926 int phy;
927
928 if (!rates[i].count || (rates[i].idx < 0))
929 continue;
930
931 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200932 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200933
934 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200935 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
936 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200937 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200938 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
939 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200940 }
941
942 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200943 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200944 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200945 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200946
947 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
948 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
949 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
950
951 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
952 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200953 info->rates[i].Rate = rix | 0x80;
954 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
955 ah->txchainmask, info->rates[i].Rate);
956 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200957 is_40, is_sgi, is_sp);
958 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200959 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200960 continue;
961 }
962
963 /* legacy rates */
964 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
965 !(rate->flags & IEEE80211_RATE_ERP_G))
966 phy = WLAN_RC_PHY_CCK;
967 else
968 phy = WLAN_RC_PHY_OFDM;
969
970 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200971 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200972 if (rate->hw_value_short) {
973 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200974 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200975 } else {
976 is_sp = false;
977 }
978
979 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200980 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200981 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200982 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
983 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200984
Felix Fietkau493cf042011-09-14 21:24:22 +0200985 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200986 phy, rate->bitrate * 100, len, rix, is_sp);
987 }
988
989 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
990 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200991 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200992
993 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200994 if (info->flags & ATH9K_TXDESC_RTSENA)
995 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200996}
997
Felix Fietkau493cf042011-09-14 21:24:22 +0200998static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
999{
1000 struct ieee80211_hdr *hdr;
1001 enum ath9k_pkt_type htype;
1002 __le16 fc;
1003
1004 hdr = (struct ieee80211_hdr *)skb->data;
1005 fc = hdr->frame_control;
1006
1007 if (ieee80211_is_beacon(fc))
1008 htype = ATH9K_PKT_TYPE_BEACON;
1009 else if (ieee80211_is_probe_resp(fc))
1010 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1011 else if (ieee80211_is_atim(fc))
1012 htype = ATH9K_PKT_TYPE_ATIM;
1013 else if (ieee80211_is_pspoll(fc))
1014 htype = ATH9K_PKT_TYPE_PSPOLL;
1015 else
1016 htype = ATH9K_PKT_TYPE_NORMAL;
1017
1018 return htype;
1019}
1020
1021static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1022 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001023{
1024 struct ath_hw *ah = sc->sc_ah;
1025 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1026 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001027 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001028 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001029
Felix Fietkau493cf042011-09-14 21:24:22 +02001030 memset(&info, 0, sizeof(info));
1031 info.is_first = true;
1032 info.is_last = true;
1033 info.txpower = MAX_RATE_POWER;
1034 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001035
Felix Fietkau493cf042011-09-14 21:24:22 +02001036 info.flags = ATH9K_TXDESC_INTREQ;
1037 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1038 info.flags |= ATH9K_TXDESC_NOACK;
1039 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1040 info.flags |= ATH9K_TXDESC_LDPC;
1041
1042 ath_buf_set_rate(sc, bf, &info, len);
1043
1044 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1045 info.flags |= ATH9K_TXDESC_CLRDMASK;
1046
1047 if (bf->bf_state.bfs_paprd)
1048 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1049
Felix Fietkau399c6482011-09-14 21:24:17 +02001050
1051 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001052 struct sk_buff *skb = bf->bf_mpdu;
1053 struct ath_frame_info *fi = get_frame_info(skb);
1054
1055 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001056 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001057 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001058 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001059 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001060
John W. Linville42cecc32011-09-19 15:42:31 -04001061 info.buf_addr[0] = bf->bf_buf_addr;
1062 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001063 info.pkt_len = fi->framelen;
1064 info.keyix = fi->keyix;
1065 info.keytype = fi->keytype;
1066
1067 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001068 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001069 info.aggr = AGGR_BUF_FIRST;
1070 else if (!bf->bf_next)
1071 info.aggr = AGGR_BUF_LAST;
1072 else
1073 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001074
Felix Fietkau493cf042011-09-14 21:24:22 +02001075 info.ndelim = bf->bf_state.ndelim;
1076 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001077 }
1078
Felix Fietkau493cf042011-09-14 21:24:22 +02001079 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001080 bf = bf->bf_next;
1081 }
1082}
1083
Sujithe8324352009-01-16 21:38:42 +05301084static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1085 struct ath_atx_tid *tid)
1086{
Sujithd43f30152009-01-16 21:38:53 +05301087 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301088 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001089 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301090 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001091 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301092
1093 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001094 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301095 return;
1096
1097 INIT_LIST_HEAD(&bf_q);
1098
Felix Fietkau269c44b2010-11-14 15:20:06 +01001099 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301100
1101 /*
Sujithd43f30152009-01-16 21:38:53 +05301102 * no frames picked up to be aggregated;
1103 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301104 */
1105 if (list_empty(&bf_q))
1106 break;
1107
1108 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301109 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001110 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301111
Felix Fietkau55195412011-04-17 23:28:09 +02001112 if (tid->ac->clear_ps_filter) {
1113 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001114 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1115 } else {
1116 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001117 }
1118
Sujithd43f30152009-01-16 21:38:53 +05301119 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001120 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001121 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1122 bf->bf_state.bf_type = BUF_AMPDU;
1123 } else {
1124 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301125 }
1126
Felix Fietkau493cf042011-09-14 21:24:22 +02001127 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001128 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001129 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301130 status != ATH_AGGR_BAW_CLOSED);
1131}
1132
Felix Fietkau231c3a12010-09-20 19:35:28 +02001133int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1134 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301135{
1136 struct ath_atx_tid *txtid;
1137 struct ath_node *an;
1138
1139 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301140 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001141
1142 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1143 return -EAGAIN;
1144
Sujithf83da962009-07-23 15:32:37 +05301145 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001146 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001147 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001148
Felix Fietkau2ed72222011-01-10 17:05:49 -07001149 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1150 txtid->baw_head = txtid->baw_tail = 0;
1151
Felix Fietkau231c3a12010-09-20 19:35:28 +02001152 return 0;
Sujithe8324352009-01-16 21:38:42 +05301153}
1154
Sujithf83da962009-07-23 15:32:37 +05301155void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301156{
1157 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1158 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001159 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301160
1161 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301162 return;
Sujithe8324352009-01-16 21:38:42 +05301163
1164 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301165 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301166 return;
Sujithe8324352009-01-16 21:38:42 +05301167 }
1168
Sujithe8324352009-01-16 21:38:42 +05301169 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001170 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001171
1172 /*
1173 * If frames are still being transmitted for this TID, they will be
1174 * cleaned up during tx completion. To prevent race conditions, this
1175 * TID can only be reused after all in-progress subframes have been
1176 * completed.
1177 */
1178 if (txtid->baw_head != txtid->baw_tail)
1179 txtid->state |= AGGR_CLEANUP;
1180 else
1181 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301182 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301183
Felix Fietkau90fa5392010-09-20 13:45:38 +02001184 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301185}
1186
Johannes Berg042ec452011-09-29 16:04:26 +02001187void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1188 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001189{
1190 struct ath_atx_tid *tid;
1191 struct ath_atx_ac *ac;
1192 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001193 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001194 int tidno;
1195
1196 for (tidno = 0, tid = &an->tid[tidno];
1197 tidno < WME_NUM_TID; tidno++, tid++) {
1198
1199 if (!tid->sched)
1200 continue;
1201
1202 ac = tid->ac;
1203 txq = ac->txq;
1204
1205 spin_lock_bh(&txq->axq_lock);
1206
Johannes Berg042ec452011-09-29 16:04:26 +02001207 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001208
1209 tid->sched = false;
1210 list_del(&tid->list);
1211
1212 if (ac->sched) {
1213 ac->sched = false;
1214 list_del(&ac->list);
1215 }
1216
1217 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001218
Johannes Berg042ec452011-09-29 16:04:26 +02001219 ieee80211_sta_set_buffered(sta, tidno, buffered);
1220 }
Felix Fietkau55195412011-04-17 23:28:09 +02001221}
1222
1223void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1224{
1225 struct ath_atx_tid *tid;
1226 struct ath_atx_ac *ac;
1227 struct ath_txq *txq;
1228 int tidno;
1229
1230 for (tidno = 0, tid = &an->tid[tidno];
1231 tidno < WME_NUM_TID; tidno++, tid++) {
1232
1233 ac = tid->ac;
1234 txq = ac->txq;
1235
1236 spin_lock_bh(&txq->axq_lock);
1237 ac->clear_ps_filter = true;
1238
Felix Fietkau56dc6332011-08-28 00:32:22 +02001239 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001240 ath_tx_queue_tid(txq, tid);
1241 ath_txq_schedule(sc, txq);
1242 }
1243
1244 spin_unlock_bh(&txq->axq_lock);
1245 }
1246}
1247
Sujithe8324352009-01-16 21:38:42 +05301248void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1249{
1250 struct ath_atx_tid *txtid;
1251 struct ath_node *an;
1252
1253 an = (struct ath_node *)sta->drv_priv;
1254
1255 if (sc->sc_flags & SC_OP_TXAGGR) {
1256 txtid = ATH_AN_2_TID(an, tid);
1257 txtid->baw_size =
1258 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1259 txtid->state |= AGGR_ADDBA_COMPLETE;
1260 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1261 ath_tx_resume_tid(sc, txtid);
1262 }
1263}
1264
Sujithe8324352009-01-16 21:38:42 +05301265/********************/
1266/* Queue Management */
1267/********************/
1268
Sujithe8324352009-01-16 21:38:42 +05301269static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1270 struct ath_txq *txq)
1271{
1272 struct ath_atx_ac *ac, *ac_tmp;
1273 struct ath_atx_tid *tid, *tid_tmp;
1274
1275 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1276 list_del(&ac->list);
1277 ac->sched = false;
1278 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1279 list_del(&tid->list);
1280 tid->sched = false;
1281 ath_tid_drain(sc, txq, tid);
1282 }
1283 }
1284}
1285
1286struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1287{
Sujithcbe61d82009-02-09 13:27:12 +05301288 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301289 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001290 static const int subtype_txq_to_hwq[] = {
1291 [WME_AC_BE] = ATH_TXQ_AC_BE,
1292 [WME_AC_BK] = ATH_TXQ_AC_BK,
1293 [WME_AC_VI] = ATH_TXQ_AC_VI,
1294 [WME_AC_VO] = ATH_TXQ_AC_VO,
1295 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001296 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301297
1298 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001299 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301300 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1301 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1302 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1303 qi.tqi_physCompBuf = 0;
1304
1305 /*
1306 * Enable interrupts only for EOL and DESC conditions.
1307 * We mark tx descriptors to receive a DESC interrupt
1308 * when a tx queue gets deep; otherwise waiting for the
1309 * EOL to reap descriptors. Note that this is done to
1310 * reduce interrupt load and this only defers reaping
1311 * descriptors, never transmitting frames. Aside from
1312 * reducing interrupts this also permits more concurrency.
1313 * The only potential downside is if the tx queue backs
1314 * up in which case the top half of the kernel may backup
1315 * due to a lack of tx descriptors.
1316 *
1317 * The UAPSD queue is an exception, since we take a desc-
1318 * based intr on the EOSP frames.
1319 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001320 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1321 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1322 TXQ_FLAG_TXERRINT_ENABLE;
1323 } else {
1324 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1325 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1326 else
1327 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1328 TXQ_FLAG_TXDESCINT_ENABLE;
1329 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001330 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1331 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301332 /*
1333 * NB: don't print a message, this happens
1334 * normally on parts with too few tx queues
1335 */
1336 return NULL;
1337 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001338 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1339 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301340
Ben Greear60f2d1d2011-01-09 23:11:52 -08001341 txq->axq_qnum = axq_qnum;
1342 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301343 txq->axq_link = NULL;
1344 INIT_LIST_HEAD(&txq->axq_q);
1345 INIT_LIST_HEAD(&txq->axq_acq);
1346 spin_lock_init(&txq->axq_lock);
1347 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001348 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001349 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001350 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001351
1352 txq->txq_headidx = txq->txq_tailidx = 0;
1353 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1354 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301355 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001356 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301357}
1358
Sujithe8324352009-01-16 21:38:42 +05301359int ath_txq_update(struct ath_softc *sc, int qnum,
1360 struct ath9k_tx_queue_info *qinfo)
1361{
Sujithcbe61d82009-02-09 13:27:12 +05301362 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301363 int error = 0;
1364 struct ath9k_tx_queue_info qi;
1365
1366 if (qnum == sc->beacon.beaconq) {
1367 /*
1368 * XXX: for beacon queue, we just save the parameter.
1369 * It will be picked up by ath_beaconq_config when
1370 * it's necessary.
1371 */
1372 sc->beacon.beacon_qi = *qinfo;
1373 return 0;
1374 }
1375
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001376 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301377
1378 ath9k_hw_get_txq_props(ah, qnum, &qi);
1379 qi.tqi_aifs = qinfo->tqi_aifs;
1380 qi.tqi_cwmin = qinfo->tqi_cwmin;
1381 qi.tqi_cwmax = qinfo->tqi_cwmax;
1382 qi.tqi_burstTime = qinfo->tqi_burstTime;
1383 qi.tqi_readyTime = qinfo->tqi_readyTime;
1384
1385 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001386 ath_err(ath9k_hw_common(sc->sc_ah),
1387 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301388 error = -EIO;
1389 } else {
1390 ath9k_hw_resettxqueue(ah, qnum);
1391 }
1392
1393 return error;
1394}
1395
1396int ath_cabq_update(struct ath_softc *sc)
1397{
1398 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001399 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301400 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301401
1402 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1403 /*
1404 * Ensure the readytime % is within the bounds.
1405 */
Sujith17d79042009-02-09 13:27:03 +05301406 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1407 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1408 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1409 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301410
Steve Brown9814f6b2011-02-07 17:10:39 -07001411 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301412 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301413 ath_txq_update(sc, qnum, &qi);
1414
1415 return 0;
1416}
1417
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001418static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1419{
1420 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1421 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1422}
1423
Felix Fietkaufce041b2011-05-19 12:20:25 +02001424static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1425 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301426 __releases(txq->axq_lock)
1427 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301428{
1429 struct ath_buf *bf, *lastbf;
1430 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001431 struct ath_tx_status ts;
1432
1433 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001434 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301435 INIT_LIST_HEAD(&bf_head);
1436
Felix Fietkaufce041b2011-05-19 12:20:25 +02001437 while (!list_empty(list)) {
1438 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301439
Felix Fietkaufce041b2011-05-19 12:20:25 +02001440 if (bf->bf_stale) {
1441 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301442
Felix Fietkaufce041b2011-05-19 12:20:25 +02001443 ath_tx_return_buffer(sc, bf);
1444 continue;
Sujithe8324352009-01-16 21:38:42 +05301445 }
1446
1447 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001448 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001449
Sujithe8324352009-01-16 21:38:42 +05301450 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001451 if (bf_is_ampdu_not_probing(bf))
1452 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301453
Felix Fietkaufce041b2011-05-19 12:20:25 +02001454 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301455 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001456 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1457 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301458 else
Felix Fietkau156369f2011-12-14 22:08:04 +01001459 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001460 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001461 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001462}
1463
1464/*
1465 * Drain a given TX queue (could be Beacon or Data)
1466 *
1467 * This assumes output has been stopped and
1468 * we do not need to block ath_tx_tasklet.
1469 */
1470void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1471{
1472 spin_lock_bh(&txq->axq_lock);
1473 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1474 int idx = txq->txq_tailidx;
1475
1476 while (!list_empty(&txq->txq_fifo[idx])) {
1477 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1478 retry_tx);
1479
1480 INCR(idx, ATH_TXFIFO_DEPTH);
1481 }
1482 txq->txq_tailidx = idx;
1483 }
1484
1485 txq->axq_link = NULL;
1486 txq->axq_tx_inprogress = false;
1487 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001488
1489 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001490 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1491 ath_txq_drain_pending_buffers(sc, txq);
1492
1493 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301494}
1495
Felix Fietkau080e1a22010-12-05 20:17:53 +01001496bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301497{
Sujithcbe61d82009-02-09 13:27:12 +05301498 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001499 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301500 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001501 int i;
1502 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301503
1504 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001505 return true;
Sujith043a0402009-01-16 21:38:47 +05301506
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001507 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301508
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001509 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301510 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001511 if (!ATH_TXQ_SETUP(sc, i))
1512 continue;
1513
Felix Fietkau34d25812011-10-07 02:28:12 +02001514 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1515 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301516 }
1517
Felix Fietkau080e1a22010-12-05 20:17:53 +01001518 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001519 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301520
1521 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001522 if (!ATH_TXQ_SETUP(sc, i))
1523 continue;
1524
1525 /*
1526 * The caller will resume queues with ieee80211_wake_queues.
1527 * Mark the queue as not stopped to prevent ath_tx_complete
1528 * from waking the queue too early.
1529 */
1530 txq = &sc->tx.txq[i];
1531 txq->stopped = false;
1532 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301533 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001534
1535 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301536}
1537
Sujithe8324352009-01-16 21:38:42 +05301538void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1539{
1540 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1541 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1542}
1543
Ben Greear7755bad2011-01-18 17:30:00 -08001544/* For each axq_acq entry, for each tid, try to schedule packets
1545 * for transmit until ampdu_depth has reached min Q depth.
1546 */
Sujithe8324352009-01-16 21:38:42 +05301547void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1548{
Ben Greear7755bad2011-01-18 17:30:00 -08001549 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1550 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301551
Felix Fietkau236de512011-09-03 01:40:25 +02001552 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001553 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301554 return;
1555
1556 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001557 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301558
Ben Greear7755bad2011-01-18 17:30:00 -08001559 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1560 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1561 list_del(&ac->list);
1562 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301563
Ben Greear7755bad2011-01-18 17:30:00 -08001564 while (!list_empty(&ac->tid_q)) {
1565 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1566 list);
1567 list_del(&tid->list);
1568 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301569
Ben Greear7755bad2011-01-18 17:30:00 -08001570 if (tid->paused)
1571 continue;
Sujithe8324352009-01-16 21:38:42 +05301572
Ben Greear7755bad2011-01-18 17:30:00 -08001573 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301574
Ben Greear7755bad2011-01-18 17:30:00 -08001575 /*
1576 * add tid to round-robin queue if more frames
1577 * are pending for the tid
1578 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001579 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001580 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301581
Ben Greear7755bad2011-01-18 17:30:00 -08001582 if (tid == last_tid ||
1583 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1584 break;
Sujithe8324352009-01-16 21:38:42 +05301585 }
Ben Greear7755bad2011-01-18 17:30:00 -08001586
Felix Fietkaub0477012011-12-14 22:08:05 +01001587 if (!list_empty(&ac->tid_q) && !ac->sched) {
1588 ac->sched = true;
1589 list_add_tail(&ac->list, &txq->axq_acq);
Ben Greear7755bad2011-01-18 17:30:00 -08001590 }
1591
1592 if (ac == last_ac ||
1593 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1594 return;
Sujithe8324352009-01-16 21:38:42 +05301595 }
1596}
1597
Sujithe8324352009-01-16 21:38:42 +05301598/***********/
1599/* TX, DMA */
1600/***********/
1601
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001603 * Insert a chain of ath_buf (descriptors) on a txq and
1604 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001605 */
Sujith102e0572008-10-29 10:15:16 +05301606static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001607 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001608{
Sujithcbe61d82009-02-09 13:27:12 +05301609 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001610 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001611 struct ath_buf *bf, *bf_last;
1612 bool puttxbuf = false;
1613 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301614
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001615 /*
1616 * Insert the frame on the outbound list and
1617 * pass it on to the hardware.
1618 */
1619
1620 if (list_empty(head))
1621 return;
1622
Felix Fietkaufce041b2011-05-19 12:20:25 +02001623 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001624 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001625 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001626
Joe Perches226afe62010-12-02 19:12:37 -08001627 ath_dbg(common, ATH_DBG_QUEUE,
1628 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001629
Felix Fietkaufce041b2011-05-19 12:20:25 +02001630 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1631 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001632 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001633 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001634 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001635 list_splice_tail_init(head, &txq->axq_q);
1636
Felix Fietkaufce041b2011-05-19 12:20:25 +02001637 if (txq->axq_link) {
1638 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001639 ath_dbg(common, ATH_DBG_XMIT,
1640 "link[%u] (%p)=%llx (%p)\n",
1641 txq->axq_qnum, txq->axq_link,
1642 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001643 } else if (!edma)
1644 puttxbuf = true;
1645
1646 txq->axq_link = bf_last->bf_desc;
1647 }
1648
1649 if (puttxbuf) {
1650 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1651 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1652 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1653 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1654 }
1655
1656 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001657 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001658 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001659 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001660
1661 if (!internal) {
1662 txq->axq_depth++;
1663 if (bf_is_ampdu_not_probing(bf))
1664 txq->axq_ampdu_depth++;
1665 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001666}
1667
Sujithe8324352009-01-16 21:38:42 +05301668static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001669 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301670{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001671 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001672 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001673 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301674
1675 /*
1676 * Do not queue to h/w when any of the following conditions is true:
1677 * - there are pending frames in software queue
1678 * - the TID is currently paused for ADDBA/BAR request
1679 * - seqno is not within block-ack window
1680 * - h/w queue depth exceeds low water mark
1681 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001682 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001683 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001684 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001685 /*
Sujithe8324352009-01-16 21:38:42 +05301686 * Add this frame to software queue for scheduling later
1687 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001688 */
Ben Greearbda8add2011-01-09 23:11:48 -08001689 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001690 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001691 if (!txctl->an || !txctl->an->sleeping)
1692 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301693 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001694 }
1695
Felix Fietkau44f1d262011-08-28 00:32:25 +02001696 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1697 if (!bf)
1698 return;
1699
Felix Fietkau399c6482011-09-14 21:24:17 +02001700 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001701 INIT_LIST_HEAD(&bf_head);
1702 list_add(&bf->list, &bf_head);
1703
Sujithe8324352009-01-16 21:38:42 +05301704 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001705 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301706
1707 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001708 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301709 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001710 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001711 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301712}
1713
Felix Fietkau82b873a2010-11-11 03:18:37 +01001714static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001715 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001716{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001717 struct ath_frame_info *fi = get_frame_info(skb);
1718 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301719 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001720
Felix Fietkau44f1d262011-08-28 00:32:25 +02001721 bf = fi->bf;
1722 if (!bf)
1723 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1724
1725 if (!bf)
1726 return;
1727
1728 INIT_LIST_HEAD(&bf_head);
1729 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001730 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301731
1732 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001733 if (tid)
1734 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301735
Sujithd43f30152009-01-16 21:38:53 +05301736 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001737 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001738 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301739 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001740}
1741
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001742static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1743 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301744{
1745 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001746 struct ieee80211_sta *sta = tx_info->control.sta;
1747 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001748 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001749 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001750 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001751 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301752
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001753 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301754
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001755 if (sta)
1756 an = (struct ath_node *) sta->drv_priv;
1757
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001758 memset(fi, 0, sizeof(*fi));
1759 if (hw_key)
1760 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001761 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1762 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001763 else
1764 fi->keyix = ATH9K_TXKEYIX_INVALID;
1765 fi->keytype = keytype;
1766 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301767}
1768
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301769u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1770{
1771 struct ath_hw *ah = sc->sc_ah;
1772 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301773 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1774 (curchan->channelFlags & CHANNEL_5GHZ) &&
1775 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301776 return 0x3;
1777 else
1778 return chainmask;
1779}
1780
Felix Fietkau44f1d262011-08-28 00:32:25 +02001781/*
1782 * Assign a descriptor (and sequence number if necessary,
1783 * and map buffer for DMA. Frees skb on error
1784 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001785static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001786 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001787 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001788 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301789{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001790 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001791 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001792 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001793 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001794 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001795
1796 bf = ath_tx_get_buffer(sc);
1797 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001798 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001799 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001800 }
Sujithe8324352009-01-16 21:38:42 +05301801
Sujithe8324352009-01-16 21:38:42 +05301802 ATH_TXBUF_RESET(bf);
1803
Felix Fietkaufa05f872011-08-28 00:32:24 +02001804 if (tid) {
1805 seqno = tid->seq_next;
1806 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1807 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1808 bf->bf_state.seqno = seqno;
1809 }
1810
Sujithe8324352009-01-16 21:38:42 +05301811 bf->bf_mpdu = skb;
1812
Ben Greearc1739eb32010-10-14 12:45:29 -07001813 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1814 skb->len, DMA_TO_DEVICE);
1815 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301816 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001817 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001818 ath_err(ath9k_hw_common(sc->sc_ah),
1819 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001820 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001821 goto error;
Sujithe8324352009-01-16 21:38:42 +05301822 }
1823
Felix Fietkau56dc6332011-08-28 00:32:22 +02001824 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001825
1826 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001827
1828error:
1829 dev_kfree_skb_any(skb);
1830 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001831}
1832
1833/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001834static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001835 struct ath_tx_control *txctl)
1836{
Felix Fietkau04caf862010-11-14 15:20:12 +01001837 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1838 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001839 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001840 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001841 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301842
Sujithe8324352009-01-16 21:38:42 +05301843 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301844 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1845 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001846 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1847 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001848 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001849
Felix Fietkau066dae92010-11-07 14:59:39 +01001850 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001851 }
1852
1853 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001854 /*
1855 * Try aggregation if it's a unicast data frame
1856 * and the destination is HT capable.
1857 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001858 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301859 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001860 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1861 if (!bf)
1862 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001863
Felix Fietkau82b873a2010-11-11 03:18:37 +01001864 bf->bf_state.bfs_paprd = txctl->paprd;
1865
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301866 if (txctl->paprd)
1867 bf->bf_state.bfs_paprd_timestamp = jiffies;
1868
Felix Fietkau44f1d262011-08-28 00:32:25 +02001869 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301870 }
1871
Felix Fietkaufa05f872011-08-28 00:32:24 +02001872out:
Sujithe8324352009-01-16 21:38:42 +05301873 spin_unlock_bh(&txctl->txq->axq_lock);
1874}
1875
1876/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001877int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301878 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001879{
Felix Fietkau28d16702010-11-14 15:20:10 +01001880 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1881 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001882 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001883 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001884 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001885 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001886 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001887 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001888 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001889
Ben Greeara9927ba2010-12-06 21:13:49 -08001890 /* NOTE: sta can be NULL according to net/mac80211.h */
1891 if (sta)
1892 txctl->an = (struct ath_node *)sta->drv_priv;
1893
Felix Fietkau04caf862010-11-14 15:20:12 +01001894 if (info->control.hw_key)
1895 frmlen += info->control.hw_key->icv_len;
1896
Felix Fietkau28d16702010-11-14 15:20:10 +01001897 /*
1898 * As a temporary workaround, assign seq# here; this will likely need
1899 * to be cleaned up to work better with Beacon transmission and virtual
1900 * BSSes.
1901 */
1902 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1903 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1904 sc->tx.seq_no += 0x10;
1905 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1906 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1907 }
1908
John W. Linville42cecc32011-09-19 15:42:31 -04001909 /* Add the padding after the header if this is not already done */
1910 padpos = ath9k_cmn_padpos(hdr->frame_control);
1911 padsize = padpos & 3;
1912 if (padsize && skb->len > padpos) {
1913 if (skb_headroom(skb) < padsize)
1914 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001915
John W. Linville42cecc32011-09-19 15:42:31 -04001916 skb_push(skb, padsize);
1917 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001918 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001919 }
1920
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001921 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1922 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1923 !ieee80211_is_data(hdr->frame_control))
1924 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1925
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001926 setup_frame_info(hw, skb, frmlen);
1927
1928 /*
1929 * At this point, the vif, hw_key and sta pointers in the tx control
1930 * info are no longer valid (overwritten by the ath_frame_info data.
1931 */
1932
Felix Fietkau066dae92010-11-07 14:59:39 +01001933 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001934 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001935 if (txq == sc->tx.txq_map[q] &&
1936 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001937 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001938 txq->stopped = 1;
1939 }
1940 spin_unlock_bh(&txq->axq_lock);
1941
Felix Fietkau44f1d262011-08-28 00:32:25 +02001942 ath_tx_start_dma(sc, skb, txctl);
1943 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001944}
1945
Sujithe8324352009-01-16 21:38:42 +05301946/*****************/
1947/* TX Completion */
1948/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001949
Sujithe8324352009-01-16 21:38:42 +05301950static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301951 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001952{
Sujithe8324352009-01-16 21:38:42 +05301953 struct ieee80211_hw *hw = sc->hw;
1954 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001955 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001956 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001957 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301958
Joe Perches226afe62010-12-02 19:12:37 -08001959 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301960
Felix Fietkau55797b12011-09-14 21:24:16 +02001961 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301962 /* Frame was ACKed */
1963 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301964
John W. Linville42cecc32011-09-19 15:42:31 -04001965 padpos = ath9k_cmn_padpos(hdr->frame_control);
1966 padsize = padpos & 3;
1967 if (padsize && skb->len>padpos+padsize) {
1968 /*
1969 * Remove MAC header padding before giving the frame back to
1970 * mac80211.
1971 */
1972 memmove(skb->data + padsize, skb->data, padpos);
1973 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301974 }
1975
Felix Fietkauc8e88682011-11-16 13:08:40 +01001976 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05301977 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001978 ath_dbg(common, ATH_DBG_PS,
1979 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301980 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1981 PS_WAIT_FOR_CAB |
1982 PS_WAIT_FOR_PSPOLL_DATA |
1983 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001984 }
1985
Felix Fietkau7545daf2011-01-24 19:23:16 +01001986 q = skb_get_queue_mapping(skb);
1987 if (txq == sc->tx.txq_map[q]) {
1988 spin_lock_bh(&txq->axq_lock);
1989 if (WARN_ON(--txq->pending_frames < 0))
1990 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001991
Felix Fietkau7545daf2011-01-24 19:23:16 +01001992 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1993 ieee80211_wake_queue(sc->hw, q);
1994 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001995 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001996 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001997 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001998
1999 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05302000}
2001
2002static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002003 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +01002004 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05302005{
2006 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002007 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05302008 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302009 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302010
Felix Fietkau55797b12011-09-14 21:24:16 +02002011 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302012 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302013
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002014 if (ts->ts_status & ATH9K_TXERR_FILT)
2015 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2016
Ben Greearc1739eb32010-10-14 12:45:29 -07002017 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002018 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002019
2020 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302021 if (time_after(jiffies,
2022 bf->bf_state.bfs_paprd_timestamp +
2023 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002024 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002025 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002026 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002027 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002028 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302029 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002030 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002031 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2032 * accidentally reference it later.
2033 */
2034 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302035
2036 /*
2037 * Return the list of ath_buf of this mpdu to free queue
2038 */
2039 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2040 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2041 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2042}
2043
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002044static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2045 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002046 int txok)
Sujithc4288392008-11-18 09:09:30 +05302047{
Sujitha22be222009-03-30 15:28:36 +05302048 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302049 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302050 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002051 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002052 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302053 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302054
Sujith95e4acb2009-03-13 08:56:09 +05302055 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002056 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302057
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002058 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302059 WARN_ON(tx_rateindex >= hw->max_rates);
2060
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002061 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002062 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302063
Felix Fietkaub572d032010-11-14 15:20:07 +01002064 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002065 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302066 tx_info->status.ampdu_len = nframes;
2067 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002068
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002069 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002070 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002071 /*
2072 * If an underrun error is seen assume it as an excessive
2073 * retry only if max frame trigger level has been reached
2074 * (2 KB for single stream, and 4 KB for dual stream).
2075 * Adjust the long retry as if the frame was tried
2076 * hw->max_rate_tries times to affect how rate control updates
2077 * PER for the failed rate.
2078 * In case of congestion on the bus penalizing this type of
2079 * underruns should help hardware actually transmit new frames
2080 * successfully by eventually preferring slower rates.
2081 * This itself should also alleviate congestion on the bus.
2082 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002083 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2084 ATH9K_TX_DELIM_UNDERRUN)) &&
2085 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002086 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002087 tx_info->status.rates[tx_rateindex].count =
2088 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302089 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302090
Felix Fietkau545750d2009-11-23 22:21:01 +01002091 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302092 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002093 tx_info->status.rates[i].idx = -1;
2094 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302095
Felix Fietkau78c46532010-06-25 01:26:16 +02002096 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302097}
2098
Felix Fietkaufce041b2011-05-19 12:20:25 +02002099static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2100 struct ath_tx_status *ts, struct ath_buf *bf,
2101 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302102 __releases(txq->axq_lock)
2103 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002104{
2105 int txok;
2106
2107 txq->axq_depth--;
2108 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2109 txq->axq_tx_inprogress = false;
2110 if (bf_is_ampdu_not_probing(bf))
2111 txq->axq_ampdu_depth--;
2112
2113 spin_unlock_bh(&txq->axq_lock);
2114
2115 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002116 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkau156369f2011-12-14 22:08:04 +01002117 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002118 } else
2119 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2120
2121 spin_lock_bh(&txq->axq_lock);
2122
2123 if (sc->sc_flags & SC_OP_TXAGGR)
2124 ath_txq_schedule(sc, txq);
2125}
2126
Sujithc4288392008-11-18 09:09:30 +05302127static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128{
Sujithcbe61d82009-02-09 13:27:12 +05302129 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002130 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2132 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302133 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002134 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135 int status;
2136
Joe Perches226afe62010-12-02 19:12:37 -08002137 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2138 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2139 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140
Felix Fietkaufce041b2011-05-19 12:20:25 +02002141 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002143 if (work_pending(&sc->hw_reset_work))
2144 break;
2145
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002146 if (list_empty(&txq->axq_q)) {
2147 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002148 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002149 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002150 break;
2151 }
2152 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2153
2154 /*
2155 * There is a race condition that a BH gets scheduled
2156 * after sw writes TxE and before hw re-load the last
2157 * descriptor to get the newly chained one.
2158 * Software must keep the last DONE descriptor as a
2159 * holding descriptor - software does so by marking
2160 * it with the STALE flag.
2161 */
2162 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302163 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002164 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002165 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002167
2168 bf = list_entry(bf_held->list.next, struct ath_buf,
2169 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 }
2171
2172 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302173 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174
Felix Fietkau29bffa92010-03-29 20:14:23 -07002175 memset(&ts, 0, sizeof(ts));
2176 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002177 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002179
Ben Greear2dac4fb2011-01-09 23:11:45 -08002180 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181
2182 /*
2183 * Remove ath_buf's of the same transmit unit from txq,
2184 * however leave the last descriptor back as the holding
2185 * descriptor for hw.
2186 */
Sujitha119cc42009-03-30 15:28:38 +05302187 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002188 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002189 if (!list_is_singular(&lastbf->list))
2190 list_cut_position(&bf_head,
2191 &txq->axq_q, lastbf->list.prev);
2192
Felix Fietkaufce041b2011-05-19 12:20:25 +02002193 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002194 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002195 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002196 }
Johannes Berge6a98542008-10-21 12:40:02 +02002197
Felix Fietkaufce041b2011-05-19 12:20:25 +02002198 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002199 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002200 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002201}
2202
Sujith305fe472009-07-23 15:32:29 +05302203static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002204{
2205 struct ath_softc *sc = container_of(work, struct ath_softc,
2206 tx_complete_work.work);
2207 struct ath_txq *txq;
2208 int i;
2209 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002210#ifdef CONFIG_ATH9K_DEBUGFS
2211 sc->tx_complete_poll_work_seen++;
2212#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002213
2214 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2215 if (ATH_TXQ_SETUP(sc, i)) {
2216 txq = &sc->tx.txq[i];
2217 spin_lock_bh(&txq->axq_lock);
2218 if (txq->axq_depth) {
2219 if (txq->axq_tx_inprogress) {
2220 needreset = true;
2221 spin_unlock_bh(&txq->axq_lock);
2222 break;
2223 } else {
2224 txq->axq_tx_inprogress = true;
2225 }
2226 }
2227 spin_unlock_bh(&txq->axq_lock);
2228 }
2229
2230 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002231 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2232 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002233 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002234 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002235 }
2236
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002237 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002238 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2239}
2240
2241
Sujithe8324352009-01-16 21:38:42 +05302242
2243void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002244{
Sujithe8324352009-01-16 21:38:42 +05302245 int i;
2246 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002247
Sujithe8324352009-01-16 21:38:42 +05302248 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002249
2250 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302251 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2252 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002253 }
2254}
2255
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256void ath_tx_edma_tasklet(struct ath_softc *sc)
2257{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002258 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002259 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2260 struct ath_hw *ah = sc->sc_ah;
2261 struct ath_txq *txq;
2262 struct ath_buf *bf, *lastbf;
2263 struct list_head bf_head;
2264 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002265
2266 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002267 if (work_pending(&sc->hw_reset_work))
2268 break;
2269
Felix Fietkaufce041b2011-05-19 12:20:25 +02002270 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002271 if (status == -EINPROGRESS)
2272 break;
2273 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002274 ath_dbg(common, ATH_DBG_XMIT,
2275 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002276 break;
2277 }
2278
2279 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002281 continue;
2282
Felix Fietkaufce041b2011-05-19 12:20:25 +02002283 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002284
2285 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002286
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002287 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2288 spin_unlock_bh(&txq->axq_lock);
2289 return;
2290 }
2291
2292 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2293 struct ath_buf, list);
2294 lastbf = bf->bf_lastbf;
2295
2296 INIT_LIST_HEAD(&bf_head);
2297 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2298 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002299
Felix Fietkaufce041b2011-05-19 12:20:25 +02002300 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2301 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002302
Felix Fietkaufce041b2011-05-19 12:20:25 +02002303 if (!list_empty(&txq->axq_q)) {
2304 struct list_head bf_q;
2305
2306 INIT_LIST_HEAD(&bf_q);
2307 txq->axq_link = NULL;
2308 list_splice_tail_init(&txq->axq_q, &bf_q);
2309 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2310 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002311 }
2312
Felix Fietkaufce041b2011-05-19 12:20:25 +02002313 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002314 spin_unlock_bh(&txq->axq_lock);
2315 }
2316}
2317
Sujithe8324352009-01-16 21:38:42 +05302318/*****************/
2319/* Init, Cleanup */
2320/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002321
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002322static int ath_txstatus_setup(struct ath_softc *sc, int size)
2323{
2324 struct ath_descdma *dd = &sc->txsdma;
2325 u8 txs_len = sc->sc_ah->caps.txs_len;
2326
2327 dd->dd_desc_len = size * txs_len;
2328 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2329 &dd->dd_desc_paddr, GFP_KERNEL);
2330 if (!dd->dd_desc)
2331 return -ENOMEM;
2332
2333 return 0;
2334}
2335
2336static int ath_tx_edma_init(struct ath_softc *sc)
2337{
2338 int err;
2339
2340 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2341 if (!err)
2342 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2343 sc->txsdma.dd_desc_paddr,
2344 ATH_TXSTATUS_RING_SIZE);
2345
2346 return err;
2347}
2348
2349static void ath_tx_edma_cleanup(struct ath_softc *sc)
2350{
2351 struct ath_descdma *dd = &sc->txsdma;
2352
2353 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2354 dd->dd_desc_paddr);
2355}
2356
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357int ath_tx_init(struct ath_softc *sc, int nbufs)
2358{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002359 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002360 int error = 0;
2361
Sujith797fe5cb2009-03-30 15:28:45 +05302362 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363
Sujith797fe5cb2009-03-30 15:28:45 +05302364 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002365 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302366 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002367 ath_err(common,
2368 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302369 goto err;
2370 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002371
Sujith797fe5cb2009-03-30 15:28:45 +05302372 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002373 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302374 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002375 ath_err(common,
2376 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302377 goto err;
2378 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002380 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2381
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002382 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2383 error = ath_tx_edma_init(sc);
2384 if (error)
2385 goto err;
2386 }
2387
Sujith797fe5cb2009-03-30 15:28:45 +05302388err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389 if (error != 0)
2390 ath_tx_cleanup(sc);
2391
2392 return error;
2393}
2394
Sujith797fe5cb2009-03-30 15:28:45 +05302395void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002396{
Sujithb77f4832008-12-07 21:44:03 +05302397 if (sc->beacon.bdma.dd_desc_len != 0)
2398 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002399
Sujithb77f4832008-12-07 21:44:03 +05302400 if (sc->tx.txdma.dd_desc_len != 0)
2401 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002402
2403 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2404 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405}
2406
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2408{
Sujithc5170162008-10-29 10:13:59 +05302409 struct ath_atx_tid *tid;
2410 struct ath_atx_ac *ac;
2411 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002412
Sujith8ee5afb2008-12-07 21:43:36 +05302413 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302414 tidno < WME_NUM_TID;
2415 tidno++, tid++) {
2416 tid->an = an;
2417 tid->tidno = tidno;
2418 tid->seq_start = tid->seq_next = 0;
2419 tid->baw_size = WME_MAX_BA;
2420 tid->baw_head = tid->baw_tail = 0;
2421 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302422 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302423 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002424 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302425 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302426 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302427 tid->state &= ~AGGR_ADDBA_COMPLETE;
2428 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302429 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002430
Sujith8ee5afb2008-12-07 21:43:36 +05302431 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302432 acno < WME_NUM_AC; acno++, ac++) {
2433 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002434 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302435 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002436 }
2437}
2438
Sujithb5aa9bf2008-10-29 10:13:31 +05302439void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002440{
Felix Fietkau2b409942010-07-07 19:42:08 +02002441 struct ath_atx_ac *ac;
2442 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002444 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302445
Felix Fietkau2b409942010-07-07 19:42:08 +02002446 for (tidno = 0, tid = &an->tid[tidno];
2447 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002448
Felix Fietkau2b409942010-07-07 19:42:08 +02002449 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002450 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002451
Felix Fietkau2b409942010-07-07 19:42:08 +02002452 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002453
Felix Fietkau2b409942010-07-07 19:42:08 +02002454 if (tid->sched) {
2455 list_del(&tid->list);
2456 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002457 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002458
2459 if (ac->sched) {
2460 list_del(&ac->list);
2461 tid->ac->sched = false;
2462 }
2463
2464 ath_tid_drain(sc, txq, tid);
2465 tid->state &= ~AGGR_ADDBA_COMPLETE;
2466 tid->state &= ~AGGR_CLEANUP;
2467
2468 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002469 }
2470}