blob: cc595712f5180479c39871bd0f18af6ff2b5be2c [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010052 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053053static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070054 struct ath_txq *txq, struct list_head *bf_q,
55 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053056static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020057 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010058static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
300 tbf->bf_mpdu = bf->bf_mpdu;
301 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400302 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530303 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530304
305 return tbf;
306}
307
Felix Fietkaub572d032010-11-14 15:20:07 +0100308static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
309 struct ath_tx_status *ts, int txok,
310 int *nframes, int *nbad)
311{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100312 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100313 u16 seq_st = 0;
314 u32 ba[WME_BA_BMP_SIZE >> 5];
315 int ba_index;
316 int isaggr = 0;
317
318 *nbad = 0;
319 *nframes = 0;
320
Felix Fietkaub572d032010-11-14 15:20:07 +0100321 isaggr = bf_isaggr(bf);
322 if (isaggr) {
323 seq_st = ts->ts_seqnum;
324 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
325 }
326
327 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100328 fi = get_frame_info(bf->bf_mpdu);
329 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100330
331 (*nframes)++;
332 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
333 (*nbad)++;
334
335 bf = bf->bf_next;
336 }
337}
338
339
Sujithd43f30152009-01-16 21:38:53 +0530340static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
341 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100342 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530343{
344 struct ath_node *an = NULL;
345 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530346 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100347 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530348 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800349 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530350 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530351 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530352 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530353 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530354 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530355 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
356 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200357 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100358 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200359 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100360 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200361 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800367
Felix Fietkau78c46532010-06-25 01:26:16 +0200368 memcpy(rates, tx_info->control.rates, sizeof(rates));
369
Sujith1286ec62009-01-27 13:30:37 +0530370 rcu_read_lock();
371
Ben Greear686b9cb2010-09-23 09:44:36 -0700372 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530373 if (!sta) {
374 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200375
Felix Fietkau31e79a52010-07-12 23:16:34 +0200376 INIT_LIST_HEAD(&bf_head);
377 while (bf) {
378 bf_next = bf->bf_next;
379
380 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaufce041b2011-05-19 12:20:25 +0200381 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200382 list_move_tail(&bf->list, &bf_head);
383
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
386 0, 0);
387
388 bf = bf_next;
389 }
Sujith1286ec62009-01-27 13:30:37 +0530390 return;
Sujithe8324352009-01-16 21:38:42 +0530391 }
392
Sujith1286ec62009-01-27 13:30:37 +0530393 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530396
Felix Fietkaub11b1602010-07-11 12:48:44 +0200397 /*
398 * The hardware occasionally sends a tx status for the wrong TID.
399 * In this case, the BA status cannot be considered valid and all
400 * subframes need to be retransmitted
401 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100402 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200403 txok = false;
404
Sujithe8324352009-01-16 21:38:42 +0530405 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530406 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530407
Sujithd43f30152009-01-16 21:38:53 +0530408 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700409 if (ts->ts_flags & ATH9K_TX_BA) {
410 seq_st = ts->ts_seqnum;
411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530412 } else {
Sujithd43f30152009-01-16 21:38:53 +0530413 /*
414 * AR5416 can become deaf/mute when BA
415 * issue happens. Chip needs to be reset.
416 * But AP code may have sychronization issues
417 * when perform internal reset in this routine.
418 * Only enable reset in STA mode for now.
419 */
Sujith2660b812009-02-09 13:27:26 +0530420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530421 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530422 }
423 }
424
425 INIT_LIST_HEAD(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
427
Felix Fietkaub572d032010-11-14 15:20:07 +0100428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530429 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100430 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530431 bf_next = bf->bf_next;
432
Felix Fietkau78c46532010-06-25 01:26:16 +0200433 skb = bf->bf_mpdu;
434 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100435 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200436
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530438 /* transmit completion, subframe is
439 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530440 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530441 } else if (!isaggr && txok) {
442 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530443 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530444 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200445 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530446 /*
447 * cleanup in progress, just fail
448 * the un-acked sub-frames
449 */
450 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530463 }
464 }
465
Felix Fietkaufce041b2011-05-19 12:20:25 +0200466 /*
467 * Make sure the last desc is reclaimed if it
468 * not a holding desc.
469 */
470 if (!bf_last->bf_stale || bf_next != NULL)
Sujithd43f30152009-01-16 21:38:53 +0530471 list_move_tail(&bf->list, &bf_head);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200472 else
473 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530474
Felix Fietkau90fa5392010-09-20 13:45:38 +0200475 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530476 /*
477 * complete the acked-ones/xretried ones; update
478 * block-ack window
479 */
480 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100481 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530482 spin_unlock_bh(&txq->axq_lock);
483
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530484 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200485 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100486 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 rc_update = false;
488 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 }
491
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700492 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
493 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530494 } else {
Sujithd43f30152009-01-16 21:38:53 +0530495 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200496 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400497 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
498 if (bf->bf_next == NULL && bf_last->bf_stale) {
499 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530500
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400501 tbf = ath_clone_txbuf(sc, bf_last);
502 /*
503 * Update tx baw and complete the
504 * frame with failed status if we
505 * run out of tx buf.
506 */
507 if (!tbf) {
508 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100509 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400510 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400511
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 bf->bf_state.bf_type |=
513 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100514 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100515 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400516 ath_tx_complete_buf(sc, bf, txq,
517 &bf_head,
518 ts, 0, 0);
519 break;
520 }
521
522 ath9k_hw_cleartxdesc(sc->sc_ah,
523 tbf->bf_desc);
524 list_add_tail(&tbf->list, &bf_head);
525 } else {
526 /*
527 * Clear descriptor status words for
528 * software retry
529 */
530 ath9k_hw_cleartxdesc(sc->sc_ah,
531 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400532 }
Sujithe8324352009-01-16 21:38:42 +0530533 }
534
535 /*
536 * Put this buffer to the temporary pending
537 * queue to retain ordering
538 */
539 list_splice_tail_init(&bf_head, &bf_pending);
540 }
541
542 bf = bf_next;
543 }
544
Felix Fietkau4cee7862010-07-23 03:53:16 +0200545 /* prepend un-acked frames to the beginning of the pending frame queue */
546 if (!list_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200547 if (an->sleeping)
548 ieee80211_sta_set_tim(sta);
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200551 if (clear_filter)
552 tid->ac->clear_ps_filter = true;
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530569 if (needreset)
Sujithe8324352009-01-16 21:38:42 +0530570 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300631 * h/w can accept aggregates up to 16 bit lengths (65535).
632 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530665 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
666 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530667 ndelim += ATH_AGGR_ENCRYPTDELIM;
668
669 /*
670 * Convert desired mpdu density from microeconds to bytes based
671 * on highest rate in rate series (i.e. first rate) to determine
672 * required minimum length for subframe. Take into account
673 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530674 *
Sujithe8324352009-01-16 21:38:42 +0530675 * If there is no mpdu density restriction, no further calculation
676 * is needed.
677 */
Sujith4ef70842009-07-23 15:32:41 +0530678
679 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530680 return ndelim;
681
682 rix = tx_info->control.rates[0].idx;
683 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530684 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
685 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
686
687 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530688 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530689 else
Sujith4ef70842009-07-23 15:32:41 +0530690 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530691
692 if (nsymbols == 0)
693 nsymbols = 1;
694
Felix Fietkauc6663872010-04-19 19:57:33 +0200695 streams = HT_RC_2_STREAMS(rix);
696 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530697 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
698
Sujithe8324352009-01-16 21:38:42 +0530699 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530700 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
701 ndelim = max(mindelim, ndelim);
702 }
703
704 return ndelim;
705}
706
707static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530708 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530709 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100710 struct list_head *bf_q,
711 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530712{
713#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530714 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
715 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530716 u16 aggr_limit = 0, al = 0, bpad = 0,
717 al_delta, h_baw = tid->baw_size / 2;
718 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200719 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100720 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530721
722 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
723
724 do {
725 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100726 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530727
Sujithd43f30152009-01-16 21:38:53 +0530728 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100729 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530730 status = ATH_AGGR_BAW_CLOSED;
731 break;
732 }
733
734 if (!rl) {
735 aggr_limit = ath_lookup_rate(sc, bf, tid);
736 rl = 1;
737 }
738
Sujithd43f30152009-01-16 21:38:53 +0530739 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100740 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530741
Sujithd43f30152009-01-16 21:38:53 +0530742 if (nframes &&
743 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530744 status = ATH_AGGR_LIMITED;
745 break;
746 }
747
Felix Fietkau0299a502010-10-21 02:47:24 +0200748 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
749 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
750 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
751 break;
752
Sujithd43f30152009-01-16 21:38:53 +0530753 /* do not exceed subframe limit */
754 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530755 status = ATH_AGGR_LIMITED;
756 break;
757 }
Sujithd43f30152009-01-16 21:38:53 +0530758 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530759
Sujithd43f30152009-01-16 21:38:53 +0530760 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530761 al += bpad + al_delta;
762
763 /*
764 * Get the delimiters needed to meet the MPDU
765 * density for this node.
766 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100767 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530768 bpad = PADBYTES(al_delta) + (ndelim << 2);
769
770 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400771 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530772
Sujithd43f30152009-01-16 21:38:53 +0530773 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100774 if (!fi->retries)
775 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530776 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
777 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530778 if (bf_prev) {
779 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400780 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
781 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530782 }
783 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530784
Sujithe8324352009-01-16 21:38:42 +0530785 } while (!list_empty(&tid->buf_q));
786
Felix Fietkau269c44b2010-11-14 15:20:06 +0100787 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530788
Sujithe8324352009-01-16 21:38:42 +0530789 return status;
790#undef PADBYTES
791}
792
793static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
794 struct ath_atx_tid *tid)
795{
Sujithd43f30152009-01-16 21:38:53 +0530796 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530797 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100798 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530799 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100800 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530801
802 do {
803 if (list_empty(&tid->buf_q))
804 return;
805
806 INIT_LIST_HEAD(&bf_q);
807
Felix Fietkau269c44b2010-11-14 15:20:06 +0100808 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530809
810 /*
Sujithd43f30152009-01-16 21:38:53 +0530811 * no frames picked up to be aggregated;
812 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530813 */
814 if (list_empty(&bf_q))
815 break;
816
817 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530818 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530819
Felix Fietkau55195412011-04-17 23:28:09 +0200820 if (tid->ac->clear_ps_filter) {
821 tid->ac->clear_ps_filter = false;
822 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
823 }
824
Sujithd43f30152009-01-16 21:38:53 +0530825 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100826 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100827 fi = get_frame_info(bf->bf_mpdu);
828
Sujithe8324352009-01-16 21:38:42 +0530829 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530830 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100831 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200832 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530833 continue;
834 }
835
Sujithd43f30152009-01-16 21:38:53 +0530836 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530837 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100838 ath_buf_set_rate(sc, bf, aggr_len);
839 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530840
Sujithd43f30152009-01-16 21:38:53 +0530841 /* anchor last desc of aggregate */
842 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530843
Felix Fietkaufce041b2011-05-19 12:20:25 +0200844 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530845 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530846
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100847 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530848 status != ATH_AGGR_BAW_CLOSED);
849}
850
Felix Fietkau231c3a12010-09-20 19:35:28 +0200851int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
852 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530853{
854 struct ath_atx_tid *txtid;
855 struct ath_node *an;
856
857 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530858 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200859
860 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
861 return -EAGAIN;
862
Sujithf83da962009-07-23 15:32:37 +0530863 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200864 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700865 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200866
Felix Fietkau2ed72222011-01-10 17:05:49 -0700867 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
868 txtid->baw_head = txtid->baw_tail = 0;
869
Felix Fietkau231c3a12010-09-20 19:35:28 +0200870 return 0;
Sujithe8324352009-01-16 21:38:42 +0530871}
872
Sujithf83da962009-07-23 15:32:37 +0530873void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530874{
875 struct ath_node *an = (struct ath_node *)sta->drv_priv;
876 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100877 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530878
879 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530880 return;
Sujithe8324352009-01-16 21:38:42 +0530881
882 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530883 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530884 return;
Sujithe8324352009-01-16 21:38:42 +0530885 }
886
Sujithe8324352009-01-16 21:38:42 +0530887 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200888 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200889
890 /*
891 * If frames are still being transmitted for this TID, they will be
892 * cleaned up during tx completion. To prevent race conditions, this
893 * TID can only be reused after all in-progress subframes have been
894 * completed.
895 */
896 if (txtid->baw_head != txtid->baw_tail)
897 txtid->state |= AGGR_CLEANUP;
898 else
899 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530900 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530901
Felix Fietkau90fa5392010-09-20 13:45:38 +0200902 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530903}
904
Felix Fietkau55195412011-04-17 23:28:09 +0200905bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
906{
907 struct ath_atx_tid *tid;
908 struct ath_atx_ac *ac;
909 struct ath_txq *txq;
910 bool buffered = false;
911 int tidno;
912
913 for (tidno = 0, tid = &an->tid[tidno];
914 tidno < WME_NUM_TID; tidno++, tid++) {
915
916 if (!tid->sched)
917 continue;
918
919 ac = tid->ac;
920 txq = ac->txq;
921
922 spin_lock_bh(&txq->axq_lock);
923
924 if (!list_empty(&tid->buf_q))
925 buffered = true;
926
927 tid->sched = false;
928 list_del(&tid->list);
929
930 if (ac->sched) {
931 ac->sched = false;
932 list_del(&ac->list);
933 }
934
935 spin_unlock_bh(&txq->axq_lock);
936 }
937
938 return buffered;
939}
940
941void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
942{
943 struct ath_atx_tid *tid;
944 struct ath_atx_ac *ac;
945 struct ath_txq *txq;
946 int tidno;
947
948 for (tidno = 0, tid = &an->tid[tidno];
949 tidno < WME_NUM_TID; tidno++, tid++) {
950
951 ac = tid->ac;
952 txq = ac->txq;
953
954 spin_lock_bh(&txq->axq_lock);
955 ac->clear_ps_filter = true;
956
957 if (!list_empty(&tid->buf_q) && !tid->paused) {
958 ath_tx_queue_tid(txq, tid);
959 ath_txq_schedule(sc, txq);
960 }
961
962 spin_unlock_bh(&txq->axq_lock);
963 }
964}
965
Sujithe8324352009-01-16 21:38:42 +0530966void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
967{
968 struct ath_atx_tid *txtid;
969 struct ath_node *an;
970
971 an = (struct ath_node *)sta->drv_priv;
972
973 if (sc->sc_flags & SC_OP_TXAGGR) {
974 txtid = ATH_AN_2_TID(an, tid);
975 txtid->baw_size =
976 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
977 txtid->state |= AGGR_ADDBA_COMPLETE;
978 txtid->state &= ~AGGR_ADDBA_PROGRESS;
979 ath_tx_resume_tid(sc, txtid);
980 }
981}
982
Sujithe8324352009-01-16 21:38:42 +0530983/********************/
984/* Queue Management */
985/********************/
986
Sujithe8324352009-01-16 21:38:42 +0530987static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
988 struct ath_txq *txq)
989{
990 struct ath_atx_ac *ac, *ac_tmp;
991 struct ath_atx_tid *tid, *tid_tmp;
992
993 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
994 list_del(&ac->list);
995 ac->sched = false;
996 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
997 list_del(&tid->list);
998 tid->sched = false;
999 ath_tid_drain(sc, txq, tid);
1000 }
1001 }
1002}
1003
1004struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1005{
Sujithcbe61d82009-02-09 13:27:12 +05301006 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001007 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301008 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001009 static const int subtype_txq_to_hwq[] = {
1010 [WME_AC_BE] = ATH_TXQ_AC_BE,
1011 [WME_AC_BK] = ATH_TXQ_AC_BK,
1012 [WME_AC_VI] = ATH_TXQ_AC_VI,
1013 [WME_AC_VO] = ATH_TXQ_AC_VO,
1014 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001015 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301016
1017 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001018 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301019 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1020 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1021 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1022 qi.tqi_physCompBuf = 0;
1023
1024 /*
1025 * Enable interrupts only for EOL and DESC conditions.
1026 * We mark tx descriptors to receive a DESC interrupt
1027 * when a tx queue gets deep; otherwise waiting for the
1028 * EOL to reap descriptors. Note that this is done to
1029 * reduce interrupt load and this only defers reaping
1030 * descriptors, never transmitting frames. Aside from
1031 * reducing interrupts this also permits more concurrency.
1032 * The only potential downside is if the tx queue backs
1033 * up in which case the top half of the kernel may backup
1034 * due to a lack of tx descriptors.
1035 *
1036 * The UAPSD queue is an exception, since we take a desc-
1037 * based intr on the EOSP frames.
1038 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001039 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1040 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1041 TXQ_FLAG_TXERRINT_ENABLE;
1042 } else {
1043 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1044 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1045 else
1046 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1047 TXQ_FLAG_TXDESCINT_ENABLE;
1048 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001049 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1050 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301051 /*
1052 * NB: don't print a message, this happens
1053 * normally on parts with too few tx queues
1054 */
1055 return NULL;
1056 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001057 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001058 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001059 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1060 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301061 return NULL;
1062 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001063 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1064 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301065
Ben Greear60f2d1d2011-01-09 23:11:52 -08001066 txq->axq_qnum = axq_qnum;
1067 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301068 txq->axq_link = NULL;
1069 INIT_LIST_HEAD(&txq->axq_q);
1070 INIT_LIST_HEAD(&txq->axq_acq);
1071 spin_lock_init(&txq->axq_lock);
1072 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001073 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001074 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001075 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001076
1077 txq->txq_headidx = txq->txq_tailidx = 0;
1078 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1079 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301080 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001081 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301082}
1083
Sujithe8324352009-01-16 21:38:42 +05301084int ath_txq_update(struct ath_softc *sc, int qnum,
1085 struct ath9k_tx_queue_info *qinfo)
1086{
Sujithcbe61d82009-02-09 13:27:12 +05301087 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301088 int error = 0;
1089 struct ath9k_tx_queue_info qi;
1090
1091 if (qnum == sc->beacon.beaconq) {
1092 /*
1093 * XXX: for beacon queue, we just save the parameter.
1094 * It will be picked up by ath_beaconq_config when
1095 * it's necessary.
1096 */
1097 sc->beacon.beacon_qi = *qinfo;
1098 return 0;
1099 }
1100
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001101 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301102
1103 ath9k_hw_get_txq_props(ah, qnum, &qi);
1104 qi.tqi_aifs = qinfo->tqi_aifs;
1105 qi.tqi_cwmin = qinfo->tqi_cwmin;
1106 qi.tqi_cwmax = qinfo->tqi_cwmax;
1107 qi.tqi_burstTime = qinfo->tqi_burstTime;
1108 qi.tqi_readyTime = qinfo->tqi_readyTime;
1109
1110 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001111 ath_err(ath9k_hw_common(sc->sc_ah),
1112 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301113 error = -EIO;
1114 } else {
1115 ath9k_hw_resettxqueue(ah, qnum);
1116 }
1117
1118 return error;
1119}
1120
1121int ath_cabq_update(struct ath_softc *sc)
1122{
1123 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001124 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301125 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301126
1127 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1128 /*
1129 * Ensure the readytime % is within the bounds.
1130 */
Sujith17d79042009-02-09 13:27:03 +05301131 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1132 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1133 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1134 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301135
Steve Brown9814f6b2011-02-07 17:10:39 -07001136 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301137 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301138 ath_txq_update(sc, qnum, &qi);
1139
1140 return 0;
1141}
1142
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001143static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1144{
1145 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1146 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1147}
1148
Felix Fietkaufce041b2011-05-19 12:20:25 +02001149static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1150 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301151 __releases(txq->axq_lock)
1152 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301153{
1154 struct ath_buf *bf, *lastbf;
1155 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001156 struct ath_tx_status ts;
1157
1158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301159 INIT_LIST_HEAD(&bf_head);
1160
Felix Fietkaufce041b2011-05-19 12:20:25 +02001161 while (!list_empty(list)) {
1162 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301163
Felix Fietkaufce041b2011-05-19 12:20:25 +02001164 if (bf->bf_stale) {
1165 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301166
Felix Fietkaufce041b2011-05-19 12:20:25 +02001167 ath_tx_return_buffer(sc, bf);
1168 continue;
Sujithe8324352009-01-16 21:38:42 +05301169 }
1170
1171 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001172 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001173
Sujithe8324352009-01-16 21:38:42 +05301174 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001175 if (bf_is_ampdu_not_probing(bf))
1176 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301177
Felix Fietkaufce041b2011-05-19 12:20:25 +02001178 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301179 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001180 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1181 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301182 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001183 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001184 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001185 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001186}
1187
1188/*
1189 * Drain a given TX queue (could be Beacon or Data)
1190 *
1191 * This assumes output has been stopped and
1192 * we do not need to block ath_tx_tasklet.
1193 */
1194void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1195{
1196 spin_lock_bh(&txq->axq_lock);
1197 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1198 int idx = txq->txq_tailidx;
1199
1200 while (!list_empty(&txq->txq_fifo[idx])) {
1201 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1202 retry_tx);
1203
1204 INCR(idx, ATH_TXFIFO_DEPTH);
1205 }
1206 txq->txq_tailidx = idx;
1207 }
1208
1209 txq->axq_link = NULL;
1210 txq->axq_tx_inprogress = false;
1211 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001212
1213 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001214 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1215 ath_txq_drain_pending_buffers(sc, txq);
1216
1217 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301218}
1219
Felix Fietkau080e1a22010-12-05 20:17:53 +01001220bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301221{
Sujithcbe61d82009-02-09 13:27:12 +05301222 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001223 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301224 struct ath_txq *txq;
1225 int i, npend = 0;
1226
1227 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001228 return true;
Sujith043a0402009-01-16 21:38:47 +05301229
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001230 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301231
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001232 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301233 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001234 if (!ATH_TXQ_SETUP(sc, i))
1235 continue;
1236
1237 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301238 }
1239
Felix Fietkau080e1a22010-12-05 20:17:53 +01001240 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001241 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301242
1243 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001244 if (!ATH_TXQ_SETUP(sc, i))
1245 continue;
1246
1247 /*
1248 * The caller will resume queues with ieee80211_wake_queues.
1249 * Mark the queue as not stopped to prevent ath_tx_complete
1250 * from waking the queue too early.
1251 */
1252 txq = &sc->tx.txq[i];
1253 txq->stopped = false;
1254 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301255 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001256
1257 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301258}
1259
Sujithe8324352009-01-16 21:38:42 +05301260void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1261{
1262 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1263 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1264}
1265
Ben Greear7755bad2011-01-18 17:30:00 -08001266/* For each axq_acq entry, for each tid, try to schedule packets
1267 * for transmit until ampdu_depth has reached min Q depth.
1268 */
Sujithe8324352009-01-16 21:38:42 +05301269void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1270{
Ben Greear7755bad2011-01-18 17:30:00 -08001271 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1272 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301273
Felix Fietkau21f28e62011-01-15 14:30:14 +01001274 if (list_empty(&txq->axq_acq) ||
1275 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301276 return;
1277
1278 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001279 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301280
Ben Greear7755bad2011-01-18 17:30:00 -08001281 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1282 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1283 list_del(&ac->list);
1284 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301285
Ben Greear7755bad2011-01-18 17:30:00 -08001286 while (!list_empty(&ac->tid_q)) {
1287 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1288 list);
1289 list_del(&tid->list);
1290 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301291
Ben Greear7755bad2011-01-18 17:30:00 -08001292 if (tid->paused)
1293 continue;
Sujithe8324352009-01-16 21:38:42 +05301294
Ben Greear7755bad2011-01-18 17:30:00 -08001295 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301296
Ben Greear7755bad2011-01-18 17:30:00 -08001297 /*
1298 * add tid to round-robin queue if more frames
1299 * are pending for the tid
1300 */
1301 if (!list_empty(&tid->buf_q))
1302 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301303
Ben Greear7755bad2011-01-18 17:30:00 -08001304 if (tid == last_tid ||
1305 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1306 break;
Sujithe8324352009-01-16 21:38:42 +05301307 }
Ben Greear7755bad2011-01-18 17:30:00 -08001308
1309 if (!list_empty(&ac->tid_q)) {
1310 if (!ac->sched) {
1311 ac->sched = true;
1312 list_add_tail(&ac->list, &txq->axq_acq);
1313 }
1314 }
1315
1316 if (ac == last_ac ||
1317 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1318 return;
Sujithe8324352009-01-16 21:38:42 +05301319 }
1320}
1321
Sujithe8324352009-01-16 21:38:42 +05301322/***********/
1323/* TX, DMA */
1324/***********/
1325
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001326/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001327 * Insert a chain of ath_buf (descriptors) on a txq and
1328 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001329 */
Sujith102e0572008-10-29 10:15:16 +05301330static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001331 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001332{
Sujithcbe61d82009-02-09 13:27:12 +05301333 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001334 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001335 struct ath_buf *bf, *bf_last;
1336 bool puttxbuf = false;
1337 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301338
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001339 /*
1340 * Insert the frame on the outbound list and
1341 * pass it on to the hardware.
1342 */
1343
1344 if (list_empty(head))
1345 return;
1346
Felix Fietkaufce041b2011-05-19 12:20:25 +02001347 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001348 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001349 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001350
Joe Perches226afe62010-12-02 19:12:37 -08001351 ath_dbg(common, ATH_DBG_QUEUE,
1352 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001353
Felix Fietkaufce041b2011-05-19 12:20:25 +02001354 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1355 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001356 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001357 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001358 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001359 list_splice_tail_init(head, &txq->axq_q);
1360
Felix Fietkaufce041b2011-05-19 12:20:25 +02001361 if (txq->axq_link) {
1362 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001363 ath_dbg(common, ATH_DBG_XMIT,
1364 "link[%u] (%p)=%llx (%p)\n",
1365 txq->axq_qnum, txq->axq_link,
1366 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001367 } else if (!edma)
1368 puttxbuf = true;
1369
1370 txq->axq_link = bf_last->bf_desc;
1371 }
1372
1373 if (puttxbuf) {
1374 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1375 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1376 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1377 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1378 }
1379
1380 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001381 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001382 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001383 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001384
1385 if (!internal) {
1386 txq->axq_depth++;
1387 if (bf_is_ampdu_not_probing(bf))
1388 txq->axq_ampdu_depth++;
1389 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001390}
1391
Sujithe8324352009-01-16 21:38:42 +05301392static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001393 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301394{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001395 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001396 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301397
Sujithe8324352009-01-16 21:38:42 +05301398 bf->bf_state.bf_type |= BUF_AMPDU;
1399
1400 /*
1401 * Do not queue to h/w when any of the following conditions is true:
1402 * - there are pending frames in software queue
1403 * - the TID is currently paused for ADDBA/BAR request
1404 * - seqno is not within block-ack window
1405 * - h/w queue depth exceeds low water mark
1406 */
1407 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001408 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001409 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001410 /*
Sujithe8324352009-01-16 21:38:42 +05301411 * Add this frame to software queue for scheduling later
1412 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001413 */
Ben Greearbda8add2011-01-09 23:11:48 -08001414 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001415 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301416 ath_tx_queue_tid(txctl->txq, tid);
1417 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001418 }
1419
Felix Fietkau04caf862010-11-14 15:20:12 +01001420 INIT_LIST_HEAD(&bf_head);
1421 list_add(&bf->list, &bf_head);
1422
Sujithe8324352009-01-16 21:38:42 +05301423 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001424 if (!fi->retries)
1425 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301426
1427 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001428 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301429 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001430 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001431 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301432}
1433
Felix Fietkau82b873a2010-11-11 03:18:37 +01001434static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1435 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001436 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001437{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001438 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301439 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001440
Sujithe8324352009-01-16 21:38:42 +05301441 bf = list_first_entry(bf_head, struct ath_buf, list);
1442 bf->bf_state.bf_type &= ~BUF_AMPDU;
1443
1444 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001445 if (tid)
1446 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301447
Sujithd43f30152009-01-16 21:38:53 +05301448 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001449 fi = get_frame_info(bf->bf_mpdu);
1450 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001451 ath_tx_txqaddbuf(sc, txq, bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301452 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001453}
1454
Sujith528f0c62008-10-29 10:14:26 +05301455static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001456{
Sujith528f0c62008-10-29 10:14:26 +05301457 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001458 enum ath9k_pkt_type htype;
1459 __le16 fc;
1460
Sujith528f0c62008-10-29 10:14:26 +05301461 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001462 fc = hdr->frame_control;
1463
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001464 if (ieee80211_is_beacon(fc))
1465 htype = ATH9K_PKT_TYPE_BEACON;
1466 else if (ieee80211_is_probe_resp(fc))
1467 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1468 else if (ieee80211_is_atim(fc))
1469 htype = ATH9K_PKT_TYPE_ATIM;
1470 else if (ieee80211_is_pspoll(fc))
1471 htype = ATH9K_PKT_TYPE_PSPOLL;
1472 else
1473 htype = ATH9K_PKT_TYPE_NORMAL;
1474
1475 return htype;
1476}
1477
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001478static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1479 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301480{
Felix Fietkau9ac58612011-01-24 19:23:18 +01001481 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301482 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001483 struct ieee80211_sta *sta = tx_info->control.sta;
1484 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301485 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001486 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001487 struct ath_node *an = NULL;
Sujith528f0c62008-10-29 10:14:26 +05301488 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001489 enum ath9k_key_type keytype;
1490 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001491 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301492
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001493 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301494
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001495 if (sta)
1496 an = (struct ath_node *) sta->drv_priv;
1497
Sujith528f0c62008-10-29 10:14:26 +05301498 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001499 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001500 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001501
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001502 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1503
1504 /*
1505 * Override seqno set by upper layer with the one
1506 * in tx aggregation state.
1507 */
1508 tid = ATH_AN_2_TID(an, tidno);
1509 seqno = tid->seq_next;
1510 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1511 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1512 }
1513
1514 memset(fi, 0, sizeof(*fi));
1515 if (hw_key)
1516 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001517 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1518 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001519 else
1520 fi->keyix = ATH9K_TXKEYIX_INVALID;
1521 fi->keytype = keytype;
1522 fi->framelen = framelen;
1523 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301524}
1525
Felix Fietkau82b873a2010-11-11 03:18:37 +01001526static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301527{
1528 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1529 int flags = 0;
1530
Sujith528f0c62008-10-29 10:14:26 +05301531 flags |= ATH9K_TXDESC_INTREQ;
1532
1533 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1534 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301535
Felix Fietkau82b873a2010-11-11 03:18:37 +01001536 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001537 flags |= ATH9K_TXDESC_LDPC;
1538
Sujith528f0c62008-10-29 10:14:26 +05301539 return flags;
1540}
1541
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001542/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001543 * rix - rate index
1544 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1545 * width - 0 for 20 MHz, 1 for 40 MHz
1546 * half_gi - to use 4us v/s 3.6 us for symbol time
1547 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001548static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301549 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001551 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001552 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301553
1554 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001555 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001556 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001557 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001558 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1559
1560 if (!half_gi)
1561 duration = SYMBOL_TIME(nsymbols);
1562 else
1563 duration = SYMBOL_TIME_HALFGI(nsymbols);
1564
Sujithe63835b2008-11-18 09:07:53 +05301565 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001566 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301567
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001568 return duration;
1569}
1570
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301571u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1572{
1573 struct ath_hw *ah = sc->sc_ah;
1574 struct ath9k_channel *curchan = ah->curchan;
1575 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1576 (curchan->channelFlags & CHANNEL_5GHZ) &&
1577 (chainmask == 0x7) && (rate < 0x90))
1578 return 0x3;
1579 else
1580 return chainmask;
1581}
1582
Felix Fietkau269c44b2010-11-14 15:20:06 +01001583static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001584{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001585 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001586 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301587 struct sk_buff *skb;
1588 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301589 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001590 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301591 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301592 int i, flags = 0;
1593 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301594 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301595
1596 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301597
Sujitha22be222009-03-30 15:28:36 +05301598 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301599 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301600 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301601 hdr = (struct ieee80211_hdr *)skb->data;
1602 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301603
Sujithc89424d2009-01-30 14:29:28 +05301604 /*
1605 * We check if Short Preamble is needed for the CTS rate by
1606 * checking the BSS's global flag.
1607 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1608 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001609 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1610 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301611 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001612 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001613
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001614 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001615 bool is_40, is_sgi, is_sp;
1616 int phy;
1617
Sujithe63835b2008-11-18 09:07:53 +05301618 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001619 continue;
1620
Sujitha8efee42008-11-18 09:07:30 +05301621 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301622 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001623
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301624 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301625 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001626 flags |= ATH9K_TXDESC_RTSENA;
1627 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1628 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1629 flags |= ATH9K_TXDESC_CTSENA;
1630 }
1631
Sujithc89424d2009-01-30 14:29:28 +05301632 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1633 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1634 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1635 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001636
Felix Fietkau545750d2009-11-23 22:21:01 +01001637 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1638 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1639 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1640
1641 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1642 /* MCS rates */
1643 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301644 series[i].ChSel = ath_txchainmask_reduction(sc,
1645 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001646 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001647 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001648 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1649 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001650 continue;
1651 }
1652
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301653 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001654 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1655 !(rate->flags & IEEE80211_RATE_ERP_G))
1656 phy = WLAN_RC_PHY_CCK;
1657 else
1658 phy = WLAN_RC_PHY_OFDM;
1659
1660 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1661 series[i].Rate = rate->hw_value;
1662 if (rate->hw_value_short) {
1663 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1664 series[i].Rate |= rate->hw_value_short;
1665 } else {
1666 is_sp = false;
1667 }
1668
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301669 if (bf->bf_state.bfs_paprd)
1670 series[i].ChSel = common->tx_chainmask;
1671 else
1672 series[i].ChSel = ath_txchainmask_reduction(sc,
1673 common->tx_chainmask, series[i].Rate);
1674
Felix Fietkau545750d2009-11-23 22:21:01 +01001675 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001676 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001677 }
1678
Felix Fietkau27032052010-01-17 21:08:50 +01001679 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001680 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001681 flags &= ~ATH9K_TXDESC_RTSENA;
1682
1683 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1684 if (flags & ATH9K_TXDESC_RTSENA)
1685 flags &= ~ATH9K_TXDESC_CTSENA;
1686
Sujithe63835b2008-11-18 09:07:53 +05301687 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301688 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1689 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301690 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301691 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301692
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001693}
1694
Felix Fietkau82b873a2010-11-11 03:18:37 +01001695static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001696 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001697 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301698{
Felix Fietkau9ac58612011-01-24 19:23:18 +01001699 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001700 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001701 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001702 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001703 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001704 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001705 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001706
1707 bf = ath_tx_get_buffer(sc);
1708 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001709 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001710 return NULL;
1711 }
Sujithe8324352009-01-16 21:38:42 +05301712
Sujithe8324352009-01-16 21:38:42 +05301713 ATH_TXBUF_RESET(bf);
1714
Felix Fietkau82b873a2010-11-11 03:18:37 +01001715 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301716 bf->bf_mpdu = skb;
1717
Ben Greearc1739eb2010-10-14 12:45:29 -07001718 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1719 skb->len, DMA_TO_DEVICE);
1720 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301721 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001722 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001723 ath_err(ath9k_hw_common(sc->sc_ah),
1724 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001725 ath_tx_return_buffer(sc, bf);
1726 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301727 }
1728
Sujithe8324352009-01-16 21:38:42 +05301729 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301730
1731 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001732 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301733
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001734 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1735 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301736
1737 ath9k_hw_filltxdesc(ah, ds,
1738 skb->len, /* segment length */
1739 true, /* first segment */
1740 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001741 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001742 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001743 txq->axq_qnum);
1744
1745
1746 return bf;
1747}
1748
1749/* FIXME: tx power */
1750static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1751 struct ath_tx_control *txctl)
1752{
1753 struct sk_buff *skb = bf->bf_mpdu;
1754 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001756 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001757 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001758 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301759
Sujithe8324352009-01-16 21:38:42 +05301760 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301761 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1762 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001763 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1764 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001765 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001766
Felix Fietkau066dae92010-11-07 14:59:39 +01001767 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001768 }
1769
1770 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001771 /*
1772 * Try aggregation if it's a unicast data frame
1773 * and the destination is HT capable.
1774 */
1775 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301776 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001777 INIT_LIST_HEAD(&bf_head);
1778 list_add_tail(&bf->list, &bf_head);
1779
Felix Fietkau61117f02010-11-11 03:18:36 +01001780 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001781 bf->bf_state.bfs_paprd = txctl->paprd;
1782
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001783 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001784 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1785 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001786
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301787 if (txctl->paprd)
1788 bf->bf_state.bfs_paprd_timestamp = jiffies;
1789
Felix Fietkau55195412011-04-17 23:28:09 +02001790 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1791 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1792
Felix Fietkau248a38d2010-12-10 21:16:46 +01001793 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301794 }
1795
1796 spin_unlock_bh(&txctl->txq->axq_lock);
1797}
1798
1799/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001800int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301801 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001802{
Felix Fietkau28d16702010-11-14 15:20:10 +01001803 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1804 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001805 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001806 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001807 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001808 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001809 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001810 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001811 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001812 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001813
Ben Greeara9927ba2010-12-06 21:13:49 -08001814 /* NOTE: sta can be NULL according to net/mac80211.h */
1815 if (sta)
1816 txctl->an = (struct ath_node *)sta->drv_priv;
1817
Felix Fietkau04caf862010-11-14 15:20:12 +01001818 if (info->control.hw_key)
1819 frmlen += info->control.hw_key->icv_len;
1820
Felix Fietkau28d16702010-11-14 15:20:10 +01001821 /*
1822 * As a temporary workaround, assign seq# here; this will likely need
1823 * to be cleaned up to work better with Beacon transmission and virtual
1824 * BSSes.
1825 */
1826 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1827 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1828 sc->tx.seq_no += 0x10;
1829 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1830 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1831 }
1832
1833 /* Add the padding after the header if this is not already done */
1834 padpos = ath9k_cmn_padpos(hdr->frame_control);
1835 padsize = padpos & 3;
1836 if (padsize && skb->len > padpos) {
1837 if (skb_headroom(skb) < padsize)
1838 return -ENOMEM;
1839
1840 skb_push(skb, padsize);
1841 memmove(skb->data, skb->data + padsize, padpos);
1842 }
1843
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001844 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1845 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1846 !ieee80211_is_data(hdr->frame_control))
1847 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1848
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001849 setup_frame_info(hw, skb, frmlen);
1850
1851 /*
1852 * At this point, the vif, hw_key and sta pointers in the tx control
1853 * info are no longer valid (overwritten by the ath_frame_info data.
1854 */
1855
1856 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001857 if (unlikely(!bf))
1858 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001859
Felix Fietkau066dae92010-11-07 14:59:39 +01001860 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001861 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001862 if (txq == sc->tx.txq_map[q] &&
1863 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001864 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001865 txq->stopped = 1;
1866 }
1867 spin_unlock_bh(&txq->axq_lock);
1868
Sujithe8324352009-01-16 21:38:42 +05301869 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001870
1871 return 0;
1872}
1873
Sujithe8324352009-01-16 21:38:42 +05301874/*****************/
1875/* TX Completion */
1876/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001877
Sujithe8324352009-01-16 21:38:42 +05301878static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001879 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001880{
Sujithe8324352009-01-16 21:38:42 +05301881 struct ieee80211_hw *hw = sc->hw;
1882 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001883 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001884 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001885 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301886
Joe Perches226afe62010-12-02 19:12:37 -08001887 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301888
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301889 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301890 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301891
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301892 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301893 /* Frame was ACKed */
1894 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1895 }
1896
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001897 padpos = ath9k_cmn_padpos(hdr->frame_control);
1898 padsize = padpos & 3;
1899 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301900 /*
1901 * Remove MAC header padding before giving the frame back to
1902 * mac80211.
1903 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001904 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301905 skb_pull(skb, padsize);
1906 }
1907
Sujith1b04b932010-01-08 10:36:05 +05301908 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1909 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001910 ath_dbg(common, ATH_DBG_PS,
1911 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301912 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1913 PS_WAIT_FOR_CAB |
1914 PS_WAIT_FOR_PSPOLL_DATA |
1915 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001916 }
1917
Felix Fietkau7545daf2011-01-24 19:23:16 +01001918 q = skb_get_queue_mapping(skb);
1919 if (txq == sc->tx.txq_map[q]) {
1920 spin_lock_bh(&txq->axq_lock);
1921 if (WARN_ON(--txq->pending_frames < 0))
1922 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001923
Felix Fietkau7545daf2011-01-24 19:23:16 +01001924 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1925 ieee80211_wake_queue(sc->hw, q);
1926 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001927 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001928 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001929 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001930
1931 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301932}
1933
1934static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001935 struct ath_txq *txq, struct list_head *bf_q,
1936 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301937{
1938 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301939 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301940 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301941
Sujithe8324352009-01-16 21:38:42 +05301942 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301943 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301944
1945 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301946 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301947
1948 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301949 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301950 }
1951
Ben Greearc1739eb2010-10-14 12:45:29 -07001952 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001953 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001954
1955 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301956 if (time_after(jiffies,
1957 bf->bf_state.bfs_paprd_timestamp +
1958 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001959 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001960 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001961 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001962 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001963 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001964 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f02010-11-11 03:18:36 +01001965 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001966 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001967 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1968 * accidentally reference it later.
1969 */
1970 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301971
1972 /*
1973 * Return the list of ath_buf of this mpdu to free queue
1974 */
1975 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1976 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1977 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1978}
1979
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001980static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1981 struct ath_tx_status *ts, int nframes, int nbad,
1982 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301983{
Sujitha22be222009-03-30 15:28:36 +05301984 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301985 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301986 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001987 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001988 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301989 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301990
Sujith95e4acb2009-03-13 08:56:09 +05301991 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001992 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301993
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001994 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301995 WARN_ON(tx_rateindex >= hw->max_rates);
1996
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001997 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301998 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001999 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002000 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302001
Felix Fietkaub572d032010-11-14 15:20:07 +01002002 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002003
Felix Fietkaub572d032010-11-14 15:20:07 +01002004 tx_info->status.ampdu_len = nframes;
2005 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002006 }
2007
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002008 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302009 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002010 /*
2011 * If an underrun error is seen assume it as an excessive
2012 * retry only if max frame trigger level has been reached
2013 * (2 KB for single stream, and 4 KB for dual stream).
2014 * Adjust the long retry as if the frame was tried
2015 * hw->max_rate_tries times to affect how rate control updates
2016 * PER for the failed rate.
2017 * In case of congestion on the bus penalizing this type of
2018 * underruns should help hardware actually transmit new frames
2019 * successfully by eventually preferring slower rates.
2020 * This itself should also alleviate congestion on the bus.
2021 */
2022 if (ieee80211_is_data(hdr->frame_control) &&
2023 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2024 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002025 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002026 tx_info->status.rates[tx_rateindex].count =
2027 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302028 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302029
Felix Fietkau545750d2009-11-23 22:21:01 +01002030 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302031 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002032 tx_info->status.rates[i].idx = -1;
2033 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302034
Felix Fietkau78c46532010-06-25 01:26:16 +02002035 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302036}
2037
Felix Fietkaufce041b2011-05-19 12:20:25 +02002038static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2039 struct ath_tx_status *ts, struct ath_buf *bf,
2040 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302041 __releases(txq->axq_lock)
2042 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002043{
2044 int txok;
2045
2046 txq->axq_depth--;
2047 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2048 txq->axq_tx_inprogress = false;
2049 if (bf_is_ampdu_not_probing(bf))
2050 txq->axq_ampdu_depth--;
2051
2052 spin_unlock_bh(&txq->axq_lock);
2053
2054 if (!bf_isampdu(bf)) {
2055 /*
2056 * This frame is sent out as a single frame.
2057 * Use hardware retry status for this frame.
2058 */
2059 if (ts->ts_status & ATH9K_TXERR_XRETRY)
2060 bf->bf_state.bf_type |= BUF_XRETRY;
2061 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2062 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2063 } else
2064 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2065
2066 spin_lock_bh(&txq->axq_lock);
2067
2068 if (sc->sc_flags & SC_OP_TXAGGR)
2069 ath_txq_schedule(sc, txq);
2070}
2071
Sujithc4288392008-11-18 09:09:30 +05302072static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002073{
Sujithcbe61d82009-02-09 13:27:12 +05302074 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002075 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2077 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302078 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002079 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002080 int status;
2081
Joe Perches226afe62010-12-02 19:12:37 -08002082 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2083 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2084 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085
Felix Fietkaufce041b2011-05-19 12:20:25 +02002086 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002087 for (;;) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002088 if (list_empty(&txq->axq_q)) {
2089 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002090 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002091 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092 break;
2093 }
2094 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2095
2096 /*
2097 * There is a race condition that a BH gets scheduled
2098 * after sw writes TxE and before hw re-load the last
2099 * descriptor to get the newly chained one.
2100 * Software must keep the last DONE descriptor as a
2101 * holding descriptor - software does so by marking
2102 * it with the STALE flag.
2103 */
2104 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302105 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002107 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002108 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002109
2110 bf = list_entry(bf_held->list.next, struct ath_buf,
2111 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 }
2113
2114 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302115 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116
Felix Fietkau29bffa92010-03-29 20:14:23 -07002117 memset(&ts, 0, sizeof(ts));
2118 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002119 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002121
Ben Greear2dac4fb2011-01-09 23:11:45 -08002122 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123
2124 /*
2125 * Remove ath_buf's of the same transmit unit from txq,
2126 * however leave the last descriptor back as the holding
2127 * descriptor for hw.
2128 */
Sujitha119cc42009-03-30 15:28:38 +05302129 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002130 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 if (!list_is_singular(&lastbf->list))
2132 list_cut_position(&bf_head,
2133 &txq->axq_q, lastbf->list.prev);
2134
Felix Fietkaufce041b2011-05-19 12:20:25 +02002135 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002136 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002137 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138 }
Johannes Berge6a98542008-10-21 12:40:02 +02002139
Felix Fietkaufce041b2011-05-19 12:20:25 +02002140 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002142 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002143}
2144
Sujith305fe472009-07-23 15:32:29 +05302145static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002146{
2147 struct ath_softc *sc = container_of(work, struct ath_softc,
2148 tx_complete_work.work);
2149 struct ath_txq *txq;
2150 int i;
2151 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002152#ifdef CONFIG_ATH9K_DEBUGFS
2153 sc->tx_complete_poll_work_seen++;
2154#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002155
2156 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2157 if (ATH_TXQ_SETUP(sc, i)) {
2158 txq = &sc->tx.txq[i];
2159 spin_lock_bh(&txq->axq_lock);
2160 if (txq->axq_depth) {
2161 if (txq->axq_tx_inprogress) {
2162 needreset = true;
2163 spin_unlock_bh(&txq->axq_lock);
2164 break;
2165 } else {
2166 txq->axq_tx_inprogress = true;
2167 }
2168 }
2169 spin_unlock_bh(&txq->axq_lock);
2170 }
2171
2172 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002173 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2174 "tx hung, resetting the chip\n");
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302175 spin_lock_bh(&sc->sc_pcu_lock);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002176 ath_reset(sc, true);
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302177 spin_unlock_bh(&sc->sc_pcu_lock);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002178 }
2179
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002180 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002181 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2182}
2183
2184
Sujithe8324352009-01-16 21:38:42 +05302185
2186void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002187{
Sujithe8324352009-01-16 21:38:42 +05302188 int i;
2189 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002190
Sujithe8324352009-01-16 21:38:42 +05302191 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192
2193 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302194 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2195 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002196 }
2197}
2198
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002199void ath_tx_edma_tasklet(struct ath_softc *sc)
2200{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002201 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002202 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2203 struct ath_hw *ah = sc->sc_ah;
2204 struct ath_txq *txq;
2205 struct ath_buf *bf, *lastbf;
2206 struct list_head bf_head;
2207 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002208
2209 for (;;) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002210 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002211 if (status == -EINPROGRESS)
2212 break;
2213 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002214 ath_dbg(common, ATH_DBG_XMIT,
2215 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002216 break;
2217 }
2218
2219 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002220 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002221 continue;
2222
Felix Fietkaufce041b2011-05-19 12:20:25 +02002223 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002224
2225 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002226
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002227 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2228 spin_unlock_bh(&txq->axq_lock);
2229 return;
2230 }
2231
2232 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2233 struct ath_buf, list);
2234 lastbf = bf->bf_lastbf;
2235
2236 INIT_LIST_HEAD(&bf_head);
2237 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2238 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002239
Felix Fietkaufce041b2011-05-19 12:20:25 +02002240 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2241 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002242
Felix Fietkaufce041b2011-05-19 12:20:25 +02002243 if (!list_empty(&txq->axq_q)) {
2244 struct list_head bf_q;
2245
2246 INIT_LIST_HEAD(&bf_q);
2247 txq->axq_link = NULL;
2248 list_splice_tail_init(&txq->axq_q, &bf_q);
2249 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2250 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 }
2252
Felix Fietkaufce041b2011-05-19 12:20:25 +02002253 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002254 spin_unlock_bh(&txq->axq_lock);
2255 }
2256}
2257
Sujithe8324352009-01-16 21:38:42 +05302258/*****************/
2259/* Init, Cleanup */
2260/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002261
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002262static int ath_txstatus_setup(struct ath_softc *sc, int size)
2263{
2264 struct ath_descdma *dd = &sc->txsdma;
2265 u8 txs_len = sc->sc_ah->caps.txs_len;
2266
2267 dd->dd_desc_len = size * txs_len;
2268 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2269 &dd->dd_desc_paddr, GFP_KERNEL);
2270 if (!dd->dd_desc)
2271 return -ENOMEM;
2272
2273 return 0;
2274}
2275
2276static int ath_tx_edma_init(struct ath_softc *sc)
2277{
2278 int err;
2279
2280 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2281 if (!err)
2282 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2283 sc->txsdma.dd_desc_paddr,
2284 ATH_TXSTATUS_RING_SIZE);
2285
2286 return err;
2287}
2288
2289static void ath_tx_edma_cleanup(struct ath_softc *sc)
2290{
2291 struct ath_descdma *dd = &sc->txsdma;
2292
2293 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2294 dd->dd_desc_paddr);
2295}
2296
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002297int ath_tx_init(struct ath_softc *sc, int nbufs)
2298{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002299 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002300 int error = 0;
2301
Sujith797fe5c2009-03-30 15:28:45 +05302302 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303
Sujith797fe5c2009-03-30 15:28:45 +05302304 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002305 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302306 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002307 ath_err(common,
2308 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302309 goto err;
2310 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002311
Sujith797fe5c2009-03-30 15:28:45 +05302312 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002313 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302314 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002315 ath_err(common,
2316 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302317 goto err;
2318 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002319
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002320 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2321
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002322 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2323 error = ath_tx_edma_init(sc);
2324 if (error)
2325 goto err;
2326 }
2327
Sujith797fe5c2009-03-30 15:28:45 +05302328err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002329 if (error != 0)
2330 ath_tx_cleanup(sc);
2331
2332 return error;
2333}
2334
Sujith797fe5c2009-03-30 15:28:45 +05302335void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002336{
Sujithb77f4832008-12-07 21:44:03 +05302337 if (sc->beacon.bdma.dd_desc_len != 0)
2338 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002339
Sujithb77f4832008-12-07 21:44:03 +05302340 if (sc->tx.txdma.dd_desc_len != 0)
2341 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002342
2343 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2344 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002345}
2346
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2348{
Sujithc5170162008-10-29 10:13:59 +05302349 struct ath_atx_tid *tid;
2350 struct ath_atx_ac *ac;
2351 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352
Sujith8ee5afb2008-12-07 21:43:36 +05302353 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302354 tidno < WME_NUM_TID;
2355 tidno++, tid++) {
2356 tid->an = an;
2357 tid->tidno = tidno;
2358 tid->seq_start = tid->seq_next = 0;
2359 tid->baw_size = WME_MAX_BA;
2360 tid->baw_head = tid->baw_tail = 0;
2361 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302362 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302363 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302364 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302365 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302366 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302367 tid->state &= ~AGGR_ADDBA_COMPLETE;
2368 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302369 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002370
Sujith8ee5afb2008-12-07 21:43:36 +05302371 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302372 acno < WME_NUM_AC; acno++, ac++) {
2373 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002374 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302375 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376 }
2377}
2378
Sujithb5aa9bf2008-10-29 10:13:31 +05302379void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380{
Felix Fietkau2b409942010-07-07 19:42:08 +02002381 struct ath_atx_ac *ac;
2382 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002384 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302385
Felix Fietkau2b409942010-07-07 19:42:08 +02002386 for (tidno = 0, tid = &an->tid[tidno];
2387 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388
Felix Fietkau2b409942010-07-07 19:42:08 +02002389 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002390 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391
Felix Fietkau2b409942010-07-07 19:42:08 +02002392 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393
Felix Fietkau2b409942010-07-07 19:42:08 +02002394 if (tid->sched) {
2395 list_del(&tid->list);
2396 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002398
2399 if (ac->sched) {
2400 list_del(&ac->list);
2401 tid->ac->sched = false;
2402 }
2403
2404 ath_tid_drain(sc, txq, tid);
2405 tid->state &= ~AGGR_ADDBA_COMPLETE;
2406 tid->state &= ~AGGR_CLEANUP;
2407
2408 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409 }
2410}