blob: 7e79bbaf2ba76bc2bc39548c1d61e2d95b3fd63b [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010052 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053053static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070054 struct ath_txq *txq, struct list_head *bf_q,
55 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053056static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020057 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010058static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
300 tbf->bf_mpdu = bf->bf_mpdu;
301 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400302 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530303 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530304
305 return tbf;
306}
307
Felix Fietkaub572d032010-11-14 15:20:07 +0100308static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
309 struct ath_tx_status *ts, int txok,
310 int *nframes, int *nbad)
311{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100312 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100313 u16 seq_st = 0;
314 u32 ba[WME_BA_BMP_SIZE >> 5];
315 int ba_index;
316 int isaggr = 0;
317
318 *nbad = 0;
319 *nframes = 0;
320
Felix Fietkaub572d032010-11-14 15:20:07 +0100321 isaggr = bf_isaggr(bf);
322 if (isaggr) {
323 seq_st = ts->ts_seqnum;
324 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
325 }
326
327 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100328 fi = get_frame_info(bf->bf_mpdu);
329 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100330
331 (*nframes)++;
332 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
333 (*nbad)++;
334
335 bf = bf->bf_next;
336 }
337}
338
339
Sujithd43f30152009-01-16 21:38:53 +0530340static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
341 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100342 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530343{
344 struct ath_node *an = NULL;
345 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530346 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100347 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530348 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800349 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530350 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530351 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530352 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530353 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530354 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530355 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
356 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200357 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100358 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200359 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100360 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200361 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800367
Felix Fietkau78c46532010-06-25 01:26:16 +0200368 memcpy(rates, tx_info->control.rates, sizeof(rates));
369
Sujith1286ec62009-01-27 13:30:37 +0530370 rcu_read_lock();
371
Ben Greear686b9cb2010-09-23 09:44:36 -0700372 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530373 if (!sta) {
374 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200375
Felix Fietkau31e79a52010-07-12 23:16:34 +0200376 INIT_LIST_HEAD(&bf_head);
377 while (bf) {
378 bf_next = bf->bf_next;
379
380 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaufce041b2011-05-19 12:20:25 +0200381 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200382 list_move_tail(&bf->list, &bf_head);
383
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
386 0, 0);
387
388 bf = bf_next;
389 }
Sujith1286ec62009-01-27 13:30:37 +0530390 return;
Sujithe8324352009-01-16 21:38:42 +0530391 }
392
Sujith1286ec62009-01-27 13:30:37 +0530393 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530396
Felix Fietkaub11b1602010-07-11 12:48:44 +0200397 /*
398 * The hardware occasionally sends a tx status for the wrong TID.
399 * In this case, the BA status cannot be considered valid and all
400 * subframes need to be retransmitted
401 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100402 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200403 txok = false;
404
Sujithe8324352009-01-16 21:38:42 +0530405 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530406 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530407
Sujithd43f30152009-01-16 21:38:53 +0530408 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700409 if (ts->ts_flags & ATH9K_TX_BA) {
410 seq_st = ts->ts_seqnum;
411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530412 } else {
Sujithd43f30152009-01-16 21:38:53 +0530413 /*
414 * AR5416 can become deaf/mute when BA
415 * issue happens. Chip needs to be reset.
416 * But AP code may have sychronization issues
417 * when perform internal reset in this routine.
418 * Only enable reset in STA mode for now.
419 */
Sujith2660b812009-02-09 13:27:26 +0530420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530421 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530422 }
423 }
424
425 INIT_LIST_HEAD(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
427
Felix Fietkaub572d032010-11-14 15:20:07 +0100428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530429 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100430 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530431 bf_next = bf->bf_next;
432
Felix Fietkau78c46532010-06-25 01:26:16 +0200433 skb = bf->bf_mpdu;
434 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100435 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200436
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530438 /* transmit completion, subframe is
439 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530440 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530441 } else if (!isaggr && txok) {
442 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530443 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530444 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200445 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530446 /*
447 * cleanup in progress, just fail
448 * the un-acked sub-frames
449 */
450 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530463 }
464 }
465
Felix Fietkaufce041b2011-05-19 12:20:25 +0200466 /*
467 * Make sure the last desc is reclaimed if it
468 * not a holding desc.
469 */
470 if (!bf_last->bf_stale || bf_next != NULL)
Sujithd43f30152009-01-16 21:38:53 +0530471 list_move_tail(&bf->list, &bf_head);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200472 else
473 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530474
Felix Fietkau90fa5392010-09-20 13:45:38 +0200475 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530476 /*
477 * complete the acked-ones/xretried ones; update
478 * block-ack window
479 */
480 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100481 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530482 spin_unlock_bh(&txq->axq_lock);
483
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530484 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200485 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100486 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 rc_update = false;
488 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 }
491
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700492 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
493 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530494 } else {
Sujithd43f30152009-01-16 21:38:53 +0530495 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200496 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400497 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
498 if (bf->bf_next == NULL && bf_last->bf_stale) {
499 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530500
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400501 tbf = ath_clone_txbuf(sc, bf_last);
502 /*
503 * Update tx baw and complete the
504 * frame with failed status if we
505 * run out of tx buf.
506 */
507 if (!tbf) {
508 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100509 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400510 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400511
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 bf->bf_state.bf_type |=
513 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100514 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100515 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400516 ath_tx_complete_buf(sc, bf, txq,
517 &bf_head,
518 ts, 0, 0);
519 break;
520 }
521
522 ath9k_hw_cleartxdesc(sc->sc_ah,
523 tbf->bf_desc);
524 list_add_tail(&tbf->list, &bf_head);
525 } else {
526 /*
527 * Clear descriptor status words for
528 * software retry
529 */
530 ath9k_hw_cleartxdesc(sc->sc_ah,
531 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400532 }
Sujithe8324352009-01-16 21:38:42 +0530533 }
534
535 /*
536 * Put this buffer to the temporary pending
537 * queue to retain ordering
538 */
539 list_splice_tail_init(&bf_head, &bf_pending);
540 }
541
542 bf = bf_next;
543 }
544
Felix Fietkau4cee7862010-07-23 03:53:16 +0200545 /* prepend un-acked frames to the beginning of the pending frame queue */
546 if (!list_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200547 if (an->sleeping)
548 ieee80211_sta_set_tim(sta);
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200551 if (clear_filter)
552 tid->ac->clear_ps_filter = true;
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530569 if (needreset) {
570 spin_unlock_bh(&sc->sc_pcu_lock);
Sujithe8324352009-01-16 21:38:42 +0530571 ath_reset(sc, false);
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530572 spin_lock_bh(&sc->sc_pcu_lock);
573 }
Sujithe8324352009-01-16 21:38:42 +0530574}
575
576static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
577 struct ath_atx_tid *tid)
578{
Sujithe8324352009-01-16 21:38:42 +0530579 struct sk_buff *skb;
580 struct ieee80211_tx_info *tx_info;
581 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530582 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530583 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530584 int i;
585
Sujitha22be222009-03-30 15:28:36 +0530586 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530587 tx_info = IEEE80211_SKB_CB(skb);
588 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530589
590 /*
591 * Find the lowest frame length among the rate series that will have a
592 * 4ms transmit duration.
593 * TODO - TXOP limit needs to be considered.
594 */
595 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
596
597 for (i = 0; i < 4; i++) {
598 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100599 int modeidx;
600 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530601 legacy = 1;
602 break;
603 }
604
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100606 modeidx = MCS_HT40;
607 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200608 modeidx = MCS_HT20;
609
610 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
611 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100612
613 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530614 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530615 }
616 }
617
618 /*
619 * limit aggregate size by the minimum rate if rate selected is
620 * not a probe rate, if rate selected is a probe rate then
621 * avoid aggregation of this packet.
622 */
623 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
624 return 0;
625
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530626 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
627 aggr_limit = min((max_4ms_framelen * 3) / 8,
628 (u32)ATH_AMPDU_LIMIT_MAX);
629 else
630 aggr_limit = min(max_4ms_framelen,
631 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530632
633 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300634 * h/w can accept aggregates up to 16 bit lengths (65535).
635 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530636 * as zero. Ignore 65536 since we are constrained by hw.
637 */
Sujith4ef70842009-07-23 15:32:41 +0530638 if (tid->an->maxampdu)
639 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530640
641 return aggr_limit;
642}
643
644/*
Sujithd43f30152009-01-16 21:38:53 +0530645 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530646 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530647 */
648static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
649 struct ath_buf *bf, u16 frmlen)
650{
Sujithe8324352009-01-16 21:38:42 +0530651 struct sk_buff *skb = bf->bf_mpdu;
652 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530653 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530654 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100655 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200656 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100657 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530658
659 /* Select standard number of delimiters based on frame length alone */
660 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
661
662 /*
663 * If encryption enabled, hardware requires some more padding between
664 * subframes.
665 * TODO - this could be improved to be dependent on the rate.
666 * The hardware can keep up at lower rates, but not higher rates
667 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100668 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530669 ndelim += ATH_AGGR_ENCRYPTDELIM;
670
671 /*
672 * Convert desired mpdu density from microeconds to bytes based
673 * on highest rate in rate series (i.e. first rate) to determine
674 * required minimum length for subframe. Take into account
675 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530676 *
Sujithe8324352009-01-16 21:38:42 +0530677 * If there is no mpdu density restriction, no further calculation
678 * is needed.
679 */
Sujith4ef70842009-07-23 15:32:41 +0530680
681 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530682 return ndelim;
683
684 rix = tx_info->control.rates[0].idx;
685 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530686 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
687 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
688
689 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530690 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530691 else
Sujith4ef70842009-07-23 15:32:41 +0530692 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530693
694 if (nsymbols == 0)
695 nsymbols = 1;
696
Felix Fietkauc6663872010-04-19 19:57:33 +0200697 streams = HT_RC_2_STREAMS(rix);
698 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530699 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
700
Sujithe8324352009-01-16 21:38:42 +0530701 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530702 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
703 ndelim = max(mindelim, ndelim);
704 }
705
706 return ndelim;
707}
708
709static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530710 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530711 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100712 struct list_head *bf_q,
713 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530714{
715#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530716 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
717 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530718 u16 aggr_limit = 0, al = 0, bpad = 0,
719 al_delta, h_baw = tid->baw_size / 2;
720 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200721 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100722 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530723
724 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
725
726 do {
727 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530729
Sujithd43f30152009-01-16 21:38:53 +0530730 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100731 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530732 status = ATH_AGGR_BAW_CLOSED;
733 break;
734 }
735
736 if (!rl) {
737 aggr_limit = ath_lookup_rate(sc, bf, tid);
738 rl = 1;
739 }
740
Sujithd43f30152009-01-16 21:38:53 +0530741 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100742 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530743
Sujithd43f30152009-01-16 21:38:53 +0530744 if (nframes &&
745 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530746 status = ATH_AGGR_LIMITED;
747 break;
748 }
749
Felix Fietkau0299a502010-10-21 02:47:24 +0200750 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
751 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
752 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
753 break;
754
Sujithd43f30152009-01-16 21:38:53 +0530755 /* do not exceed subframe limit */
756 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530757 status = ATH_AGGR_LIMITED;
758 break;
759 }
Sujithd43f30152009-01-16 21:38:53 +0530760 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530761
Sujithd43f30152009-01-16 21:38:53 +0530762 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530763 al += bpad + al_delta;
764
765 /*
766 * Get the delimiters needed to meet the MPDU
767 * density for this node.
768 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100769 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530770 bpad = PADBYTES(al_delta) + (ndelim << 2);
771
772 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400773 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530774
Sujithd43f30152009-01-16 21:38:53 +0530775 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100776 if (!fi->retries)
777 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530778 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
779 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530780 if (bf_prev) {
781 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400782 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
783 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530784 }
785 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530786
Sujithe8324352009-01-16 21:38:42 +0530787 } while (!list_empty(&tid->buf_q));
788
Felix Fietkau269c44b2010-11-14 15:20:06 +0100789 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530790
Sujithe8324352009-01-16 21:38:42 +0530791 return status;
792#undef PADBYTES
793}
794
795static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
796 struct ath_atx_tid *tid)
797{
Sujithd43f30152009-01-16 21:38:53 +0530798 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530799 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100800 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530801 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100802 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530803
804 do {
805 if (list_empty(&tid->buf_q))
806 return;
807
808 INIT_LIST_HEAD(&bf_q);
809
Felix Fietkau269c44b2010-11-14 15:20:06 +0100810 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530811
812 /*
Sujithd43f30152009-01-16 21:38:53 +0530813 * no frames picked up to be aggregated;
814 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530815 */
816 if (list_empty(&bf_q))
817 break;
818
819 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530820 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530821
Felix Fietkau55195412011-04-17 23:28:09 +0200822 if (tid->ac->clear_ps_filter) {
823 tid->ac->clear_ps_filter = false;
824 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
825 }
826
Sujithd43f30152009-01-16 21:38:53 +0530827 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100828 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100829 fi = get_frame_info(bf->bf_mpdu);
830
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530832 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100833 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200834 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530835 continue;
836 }
837
Sujithd43f30152009-01-16 21:38:53 +0530838 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530839 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100840 ath_buf_set_rate(sc, bf, aggr_len);
841 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530842
Sujithd43f30152009-01-16 21:38:53 +0530843 /* anchor last desc of aggregate */
844 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530845
Felix Fietkaufce041b2011-05-19 12:20:25 +0200846 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530847 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530848
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100849 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530850 status != ATH_AGGR_BAW_CLOSED);
851}
852
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
854 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530855{
856 struct ath_atx_tid *txtid;
857 struct ath_node *an;
858
859 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530860 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200861
862 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
863 return -EAGAIN;
864
Sujithf83da962009-07-23 15:32:37 +0530865 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200866 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700867 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200868
Felix Fietkau2ed72222011-01-10 17:05:49 -0700869 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
870 txtid->baw_head = txtid->baw_tail = 0;
871
Felix Fietkau231c3a12010-09-20 19:35:28 +0200872 return 0;
Sujithe8324352009-01-16 21:38:42 +0530873}
874
Sujithf83da962009-07-23 15:32:37 +0530875void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530876{
877 struct ath_node *an = (struct ath_node *)sta->drv_priv;
878 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100879 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530880
881 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530882 return;
Sujithe8324352009-01-16 21:38:42 +0530883
884 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530885 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530886 return;
Sujithe8324352009-01-16 21:38:42 +0530887 }
888
Sujithe8324352009-01-16 21:38:42 +0530889 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200890 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200891
892 /*
893 * If frames are still being transmitted for this TID, they will be
894 * cleaned up during tx completion. To prevent race conditions, this
895 * TID can only be reused after all in-progress subframes have been
896 * completed.
897 */
898 if (txtid->baw_head != txtid->baw_tail)
899 txtid->state |= AGGR_CLEANUP;
900 else
901 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530902 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530903
Felix Fietkau90fa5392010-09-20 13:45:38 +0200904 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530905}
906
Felix Fietkau55195412011-04-17 23:28:09 +0200907bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
908{
909 struct ath_atx_tid *tid;
910 struct ath_atx_ac *ac;
911 struct ath_txq *txq;
912 bool buffered = false;
913 int tidno;
914
915 for (tidno = 0, tid = &an->tid[tidno];
916 tidno < WME_NUM_TID; tidno++, tid++) {
917
918 if (!tid->sched)
919 continue;
920
921 ac = tid->ac;
922 txq = ac->txq;
923
924 spin_lock_bh(&txq->axq_lock);
925
926 if (!list_empty(&tid->buf_q))
927 buffered = true;
928
929 tid->sched = false;
930 list_del(&tid->list);
931
932 if (ac->sched) {
933 ac->sched = false;
934 list_del(&ac->list);
935 }
936
937 spin_unlock_bh(&txq->axq_lock);
938 }
939
940 return buffered;
941}
942
943void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
944{
945 struct ath_atx_tid *tid;
946 struct ath_atx_ac *ac;
947 struct ath_txq *txq;
948 int tidno;
949
950 for (tidno = 0, tid = &an->tid[tidno];
951 tidno < WME_NUM_TID; tidno++, tid++) {
952
953 ac = tid->ac;
954 txq = ac->txq;
955
956 spin_lock_bh(&txq->axq_lock);
957 ac->clear_ps_filter = true;
958
959 if (!list_empty(&tid->buf_q) && !tid->paused) {
960 ath_tx_queue_tid(txq, tid);
961 ath_txq_schedule(sc, txq);
962 }
963
964 spin_unlock_bh(&txq->axq_lock);
965 }
966}
967
Sujithe8324352009-01-16 21:38:42 +0530968void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
969{
970 struct ath_atx_tid *txtid;
971 struct ath_node *an;
972
973 an = (struct ath_node *)sta->drv_priv;
974
975 if (sc->sc_flags & SC_OP_TXAGGR) {
976 txtid = ATH_AN_2_TID(an, tid);
977 txtid->baw_size =
978 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
979 txtid->state |= AGGR_ADDBA_COMPLETE;
980 txtid->state &= ~AGGR_ADDBA_PROGRESS;
981 ath_tx_resume_tid(sc, txtid);
982 }
983}
984
Sujithe8324352009-01-16 21:38:42 +0530985/********************/
986/* Queue Management */
987/********************/
988
Sujithe8324352009-01-16 21:38:42 +0530989static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
990 struct ath_txq *txq)
991{
992 struct ath_atx_ac *ac, *ac_tmp;
993 struct ath_atx_tid *tid, *tid_tmp;
994
995 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
996 list_del(&ac->list);
997 ac->sched = false;
998 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
999 list_del(&tid->list);
1000 tid->sched = false;
1001 ath_tid_drain(sc, txq, tid);
1002 }
1003 }
1004}
1005
1006struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1007{
Sujithcbe61d82009-02-09 13:27:12 +05301008 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001009 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301010 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001011 static const int subtype_txq_to_hwq[] = {
1012 [WME_AC_BE] = ATH_TXQ_AC_BE,
1013 [WME_AC_BK] = ATH_TXQ_AC_BK,
1014 [WME_AC_VI] = ATH_TXQ_AC_VI,
1015 [WME_AC_VO] = ATH_TXQ_AC_VO,
1016 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001017 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301018
1019 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001020 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301021 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1022 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1023 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1024 qi.tqi_physCompBuf = 0;
1025
1026 /*
1027 * Enable interrupts only for EOL and DESC conditions.
1028 * We mark tx descriptors to receive a DESC interrupt
1029 * when a tx queue gets deep; otherwise waiting for the
1030 * EOL to reap descriptors. Note that this is done to
1031 * reduce interrupt load and this only defers reaping
1032 * descriptors, never transmitting frames. Aside from
1033 * reducing interrupts this also permits more concurrency.
1034 * The only potential downside is if the tx queue backs
1035 * up in which case the top half of the kernel may backup
1036 * due to a lack of tx descriptors.
1037 *
1038 * The UAPSD queue is an exception, since we take a desc-
1039 * based intr on the EOSP frames.
1040 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001041 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1042 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1043 TXQ_FLAG_TXERRINT_ENABLE;
1044 } else {
1045 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1046 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1047 else
1048 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1049 TXQ_FLAG_TXDESCINT_ENABLE;
1050 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001051 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1052 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301053 /*
1054 * NB: don't print a message, this happens
1055 * normally on parts with too few tx queues
1056 */
1057 return NULL;
1058 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001059 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001060 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001061 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1062 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301063 return NULL;
1064 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001065 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1066 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301067
Ben Greear60f2d1d2011-01-09 23:11:52 -08001068 txq->axq_qnum = axq_qnum;
1069 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301070 txq->axq_link = NULL;
1071 INIT_LIST_HEAD(&txq->axq_q);
1072 INIT_LIST_HEAD(&txq->axq_acq);
1073 spin_lock_init(&txq->axq_lock);
1074 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001075 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001076 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001077 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001078
1079 txq->txq_headidx = txq->txq_tailidx = 0;
1080 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1081 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301082 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001083 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301084}
1085
Sujithe8324352009-01-16 21:38:42 +05301086int ath_txq_update(struct ath_softc *sc, int qnum,
1087 struct ath9k_tx_queue_info *qinfo)
1088{
Sujithcbe61d82009-02-09 13:27:12 +05301089 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301090 int error = 0;
1091 struct ath9k_tx_queue_info qi;
1092
1093 if (qnum == sc->beacon.beaconq) {
1094 /*
1095 * XXX: for beacon queue, we just save the parameter.
1096 * It will be picked up by ath_beaconq_config when
1097 * it's necessary.
1098 */
1099 sc->beacon.beacon_qi = *qinfo;
1100 return 0;
1101 }
1102
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001103 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301104
1105 ath9k_hw_get_txq_props(ah, qnum, &qi);
1106 qi.tqi_aifs = qinfo->tqi_aifs;
1107 qi.tqi_cwmin = qinfo->tqi_cwmin;
1108 qi.tqi_cwmax = qinfo->tqi_cwmax;
1109 qi.tqi_burstTime = qinfo->tqi_burstTime;
1110 qi.tqi_readyTime = qinfo->tqi_readyTime;
1111
1112 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001113 ath_err(ath9k_hw_common(sc->sc_ah),
1114 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301115 error = -EIO;
1116 } else {
1117 ath9k_hw_resettxqueue(ah, qnum);
1118 }
1119
1120 return error;
1121}
1122
1123int ath_cabq_update(struct ath_softc *sc)
1124{
1125 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001126 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301127 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301128
1129 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1130 /*
1131 * Ensure the readytime % is within the bounds.
1132 */
Sujith17d79042009-02-09 13:27:03 +05301133 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1134 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1135 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1136 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301137
Steve Brown9814f6b2011-02-07 17:10:39 -07001138 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301139 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301140 ath_txq_update(sc, qnum, &qi);
1141
1142 return 0;
1143}
1144
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001145static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1146{
1147 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1148 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1149}
1150
Felix Fietkaufce041b2011-05-19 12:20:25 +02001151static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1152 struct list_head *list, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301153{
1154 struct ath_buf *bf, *lastbf;
1155 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001156 struct ath_tx_status ts;
1157
1158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301159 INIT_LIST_HEAD(&bf_head);
1160
Felix Fietkaufce041b2011-05-19 12:20:25 +02001161 while (!list_empty(list)) {
1162 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301163
Felix Fietkaufce041b2011-05-19 12:20:25 +02001164 if (bf->bf_stale) {
1165 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301166
Felix Fietkaufce041b2011-05-19 12:20:25 +02001167 ath_tx_return_buffer(sc, bf);
1168 continue;
Sujithe8324352009-01-16 21:38:42 +05301169 }
1170
1171 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001172 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001173
Sujithe8324352009-01-16 21:38:42 +05301174 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001175 if (bf_is_ampdu_not_probing(bf))
1176 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301177
Felix Fietkaufce041b2011-05-19 12:20:25 +02001178 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301179 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001180 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1181 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301182 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001183 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001184 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001185 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001186}
1187
1188/*
1189 * Drain a given TX queue (could be Beacon or Data)
1190 *
1191 * This assumes output has been stopped and
1192 * we do not need to block ath_tx_tasklet.
1193 */
1194void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1195{
1196 spin_lock_bh(&txq->axq_lock);
1197 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1198 int idx = txq->txq_tailidx;
1199
1200 while (!list_empty(&txq->txq_fifo[idx])) {
1201 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1202 retry_tx);
1203
1204 INCR(idx, ATH_TXFIFO_DEPTH);
1205 }
1206 txq->txq_tailidx = idx;
1207 }
1208
1209 txq->axq_link = NULL;
1210 txq->axq_tx_inprogress = false;
1211 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001212
1213 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001214 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1215 ath_txq_drain_pending_buffers(sc, txq);
1216
1217 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301218}
1219
Felix Fietkau080e1a22010-12-05 20:17:53 +01001220bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301221{
Sujithcbe61d82009-02-09 13:27:12 +05301222 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001223 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301224 struct ath_txq *txq;
1225 int i, npend = 0;
1226
1227 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001228 return true;
Sujith043a0402009-01-16 21:38:47 +05301229
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001230 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301231
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001232 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301233 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001234 if (!ATH_TXQ_SETUP(sc, i))
1235 continue;
1236
1237 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301238 }
1239
Felix Fietkau080e1a22010-12-05 20:17:53 +01001240 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001241 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301242
1243 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001244 if (!ATH_TXQ_SETUP(sc, i))
1245 continue;
1246
1247 /*
1248 * The caller will resume queues with ieee80211_wake_queues.
1249 * Mark the queue as not stopped to prevent ath_tx_complete
1250 * from waking the queue too early.
1251 */
1252 txq = &sc->tx.txq[i];
1253 txq->stopped = false;
1254 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301255 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001256
1257 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301258}
1259
Sujithe8324352009-01-16 21:38:42 +05301260void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1261{
1262 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1263 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1264}
1265
Ben Greear7755bad2011-01-18 17:30:00 -08001266/* For each axq_acq entry, for each tid, try to schedule packets
1267 * for transmit until ampdu_depth has reached min Q depth.
1268 */
Sujithe8324352009-01-16 21:38:42 +05301269void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1270{
Ben Greear7755bad2011-01-18 17:30:00 -08001271 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1272 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301273
Felix Fietkau21f28e62011-01-15 14:30:14 +01001274 if (list_empty(&txq->axq_acq) ||
1275 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301276 return;
1277
1278 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001279 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301280
Ben Greear7755bad2011-01-18 17:30:00 -08001281 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1282 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1283 list_del(&ac->list);
1284 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301285
Ben Greear7755bad2011-01-18 17:30:00 -08001286 while (!list_empty(&ac->tid_q)) {
1287 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1288 list);
1289 list_del(&tid->list);
1290 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301291
Ben Greear7755bad2011-01-18 17:30:00 -08001292 if (tid->paused)
1293 continue;
Sujithe8324352009-01-16 21:38:42 +05301294
Ben Greear7755bad2011-01-18 17:30:00 -08001295 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301296
Ben Greear7755bad2011-01-18 17:30:00 -08001297 /*
1298 * add tid to round-robin queue if more frames
1299 * are pending for the tid
1300 */
1301 if (!list_empty(&tid->buf_q))
1302 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301303
Ben Greear7755bad2011-01-18 17:30:00 -08001304 if (tid == last_tid ||
1305 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1306 break;
Sujithe8324352009-01-16 21:38:42 +05301307 }
Ben Greear7755bad2011-01-18 17:30:00 -08001308
1309 if (!list_empty(&ac->tid_q)) {
1310 if (!ac->sched) {
1311 ac->sched = true;
1312 list_add_tail(&ac->list, &txq->axq_acq);
1313 }
1314 }
1315
1316 if (ac == last_ac ||
1317 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1318 return;
Sujithe8324352009-01-16 21:38:42 +05301319 }
1320}
1321
Sujithe8324352009-01-16 21:38:42 +05301322/***********/
1323/* TX, DMA */
1324/***********/
1325
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001326/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001327 * Insert a chain of ath_buf (descriptors) on a txq and
1328 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001329 */
Sujith102e0572008-10-29 10:15:16 +05301330static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001331 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001332{
Sujithcbe61d82009-02-09 13:27:12 +05301333 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001334 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001335 struct ath_buf *bf, *bf_last;
1336 bool puttxbuf = false;
1337 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301338
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001339 /*
1340 * Insert the frame on the outbound list and
1341 * pass it on to the hardware.
1342 */
1343
1344 if (list_empty(head))
1345 return;
1346
Felix Fietkaufce041b2011-05-19 12:20:25 +02001347 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001348 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001349 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001350
Joe Perches226afe62010-12-02 19:12:37 -08001351 ath_dbg(common, ATH_DBG_QUEUE,
1352 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001353
Felix Fietkaufce041b2011-05-19 12:20:25 +02001354 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1355 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001356 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001357 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001358 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001359 list_splice_tail_init(head, &txq->axq_q);
1360
Felix Fietkaufce041b2011-05-19 12:20:25 +02001361 if (txq->axq_link) {
1362 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001363 ath_dbg(common, ATH_DBG_XMIT,
1364 "link[%u] (%p)=%llx (%p)\n",
1365 txq->axq_qnum, txq->axq_link,
1366 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001367 } else if (!edma)
1368 puttxbuf = true;
1369
1370 txq->axq_link = bf_last->bf_desc;
1371 }
1372
1373 if (puttxbuf) {
1374 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1375 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1376 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1377 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1378 }
1379
1380 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001381 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001382 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001383 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001384
1385 if (!internal) {
1386 txq->axq_depth++;
1387 if (bf_is_ampdu_not_probing(bf))
1388 txq->axq_ampdu_depth++;
1389 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001390}
1391
Sujithe8324352009-01-16 21:38:42 +05301392static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001393 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301394{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001395 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001396 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301397
Sujithe8324352009-01-16 21:38:42 +05301398 bf->bf_state.bf_type |= BUF_AMPDU;
1399
1400 /*
1401 * Do not queue to h/w when any of the following conditions is true:
1402 * - there are pending frames in software queue
1403 * - the TID is currently paused for ADDBA/BAR request
1404 * - seqno is not within block-ack window
1405 * - h/w queue depth exceeds low water mark
1406 */
1407 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001408 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001409 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001410 /*
Sujithe8324352009-01-16 21:38:42 +05301411 * Add this frame to software queue for scheduling later
1412 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001413 */
Ben Greearbda8add2011-01-09 23:11:48 -08001414 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001415 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301416 ath_tx_queue_tid(txctl->txq, tid);
1417 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001418 }
1419
Felix Fietkau04caf862010-11-14 15:20:12 +01001420 INIT_LIST_HEAD(&bf_head);
1421 list_add(&bf->list, &bf_head);
1422
Sujithe8324352009-01-16 21:38:42 +05301423 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001424 if (!fi->retries)
1425 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301426
1427 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001428 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301429 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001430 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001431 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301432}
1433
Felix Fietkau82b873a2010-11-11 03:18:37 +01001434static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1435 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001436 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001437{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001438 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301439 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001440
Sujithe8324352009-01-16 21:38:42 +05301441 bf = list_first_entry(bf_head, struct ath_buf, list);
1442 bf->bf_state.bf_type &= ~BUF_AMPDU;
1443
1444 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001445 if (tid)
1446 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301447
Sujithd43f30152009-01-16 21:38:53 +05301448 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001449 fi = get_frame_info(bf->bf_mpdu);
1450 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001451 ath_tx_txqaddbuf(sc, txq, bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301452 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001453}
1454
Sujith528f0c62008-10-29 10:14:26 +05301455static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001456{
Sujith528f0c62008-10-29 10:14:26 +05301457 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001458 enum ath9k_pkt_type htype;
1459 __le16 fc;
1460
Sujith528f0c62008-10-29 10:14:26 +05301461 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001462 fc = hdr->frame_control;
1463
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001464 if (ieee80211_is_beacon(fc))
1465 htype = ATH9K_PKT_TYPE_BEACON;
1466 else if (ieee80211_is_probe_resp(fc))
1467 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1468 else if (ieee80211_is_atim(fc))
1469 htype = ATH9K_PKT_TYPE_ATIM;
1470 else if (ieee80211_is_pspoll(fc))
1471 htype = ATH9K_PKT_TYPE_PSPOLL;
1472 else
1473 htype = ATH9K_PKT_TYPE_NORMAL;
1474
1475 return htype;
1476}
1477
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001478static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1479 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301480{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001481 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301482 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001483 struct ieee80211_sta *sta = tx_info->control.sta;
1484 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301485 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001486 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001487 struct ath_node *an = NULL;
Sujith528f0c62008-10-29 10:14:26 +05301488 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001489 enum ath9k_key_type keytype;
1490 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001491 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301492
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001493 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301494
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001495 if (sta)
1496 an = (struct ath_node *) sta->drv_priv;
1497
Sujith528f0c62008-10-29 10:14:26 +05301498 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001499 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001500 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001501
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001502 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1503
1504 /*
1505 * Override seqno set by upper layer with the one
1506 * in tx aggregation state.
1507 */
1508 tid = ATH_AN_2_TID(an, tidno);
1509 seqno = tid->seq_next;
1510 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1511 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1512 }
1513
1514 memset(fi, 0, sizeof(*fi));
1515 if (hw_key)
1516 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001517 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1518 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001519 else
1520 fi->keyix = ATH9K_TXKEYIX_INVALID;
1521 fi->keytype = keytype;
1522 fi->framelen = framelen;
1523 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301524}
1525
Felix Fietkau82b873a2010-11-11 03:18:37 +01001526static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301527{
1528 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1529 int flags = 0;
1530
Sujith528f0c62008-10-29 10:14:26 +05301531 flags |= ATH9K_TXDESC_INTREQ;
1532
1533 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1534 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301535
Felix Fietkau82b873a2010-11-11 03:18:37 +01001536 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001537 flags |= ATH9K_TXDESC_LDPC;
1538
Sujith528f0c62008-10-29 10:14:26 +05301539 return flags;
1540}
1541
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001542/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001543 * rix - rate index
1544 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1545 * width - 0 for 20 MHz, 1 for 40 MHz
1546 * half_gi - to use 4us v/s 3.6 us for symbol time
1547 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001548static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301549 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001551 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001552 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301553
1554 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001555 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001556 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001557 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001558 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1559
1560 if (!half_gi)
1561 duration = SYMBOL_TIME(nsymbols);
1562 else
1563 duration = SYMBOL_TIME_HALFGI(nsymbols);
1564
Sujithe63835b2008-11-18 09:07:53 +05301565 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001566 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301567
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001568 return duration;
1569}
1570
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301571u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1572{
1573 struct ath_hw *ah = sc->sc_ah;
1574 struct ath9k_channel *curchan = ah->curchan;
1575 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1576 (curchan->channelFlags & CHANNEL_5GHZ) &&
1577 (chainmask == 0x7) && (rate < 0x90))
1578 return 0x3;
1579 else
1580 return chainmask;
1581}
1582
Felix Fietkau269c44b2010-11-14 15:20:06 +01001583static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001584{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001585 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001586 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301587 struct sk_buff *skb;
1588 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301589 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001590 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301591 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301592 int i, flags = 0;
1593 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301594 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301595
1596 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301597
Sujitha22be222009-03-30 15:28:36 +05301598 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301599 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301600 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301601 hdr = (struct ieee80211_hdr *)skb->data;
1602 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301603
Sujithc89424d2009-01-30 14:29:28 +05301604 /*
1605 * We check if Short Preamble is needed for the CTS rate by
1606 * checking the BSS's global flag.
1607 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1608 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001609 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1610 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301611 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001612 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001613
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001614 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001615 bool is_40, is_sgi, is_sp;
1616 int phy;
1617
Sujithe63835b2008-11-18 09:07:53 +05301618 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001619 continue;
1620
Sujitha8efee42008-11-18 09:07:30 +05301621 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301622 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001623
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301624 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301625 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001626 flags |= ATH9K_TXDESC_RTSENA;
1627 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1628 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1629 flags |= ATH9K_TXDESC_CTSENA;
1630 }
1631
Sujithc89424d2009-01-30 14:29:28 +05301632 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1633 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1634 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1635 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001636
Felix Fietkau545750d2009-11-23 22:21:01 +01001637 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1638 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1639 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1640
1641 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1642 /* MCS rates */
1643 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301644 series[i].ChSel = ath_txchainmask_reduction(sc,
1645 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001646 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001647 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001648 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1649 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001650 continue;
1651 }
1652
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301653 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001654 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1655 !(rate->flags & IEEE80211_RATE_ERP_G))
1656 phy = WLAN_RC_PHY_CCK;
1657 else
1658 phy = WLAN_RC_PHY_OFDM;
1659
1660 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1661 series[i].Rate = rate->hw_value;
1662 if (rate->hw_value_short) {
1663 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1664 series[i].Rate |= rate->hw_value_short;
1665 } else {
1666 is_sp = false;
1667 }
1668
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301669 if (bf->bf_state.bfs_paprd)
1670 series[i].ChSel = common->tx_chainmask;
1671 else
1672 series[i].ChSel = ath_txchainmask_reduction(sc,
1673 common->tx_chainmask, series[i].Rate);
1674
Felix Fietkau545750d2009-11-23 22:21:01 +01001675 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001676 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001677 }
1678
Felix Fietkau27032052010-01-17 21:08:50 +01001679 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001680 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001681 flags &= ~ATH9K_TXDESC_RTSENA;
1682
1683 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1684 if (flags & ATH9K_TXDESC_RTSENA)
1685 flags &= ~ATH9K_TXDESC_CTSENA;
1686
Sujithe63835b2008-11-18 09:07:53 +05301687 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301688 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1689 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301690 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301691 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301692
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001693}
1694
Felix Fietkau82b873a2010-11-11 03:18:37 +01001695static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001696 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001697 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301698{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001699 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001700 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001701 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001702 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001703 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001704 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001705 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001706
1707 bf = ath_tx_get_buffer(sc);
1708 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001709 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001710 return NULL;
1711 }
Sujithe8324352009-01-16 21:38:42 +05301712
Sujithe8324352009-01-16 21:38:42 +05301713 ATH_TXBUF_RESET(bf);
1714
Felix Fietkau82b873a2010-11-11 03:18:37 +01001715 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301716 bf->bf_mpdu = skb;
1717
Ben Greearc1739eb32010-10-14 12:45:29 -07001718 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1719 skb->len, DMA_TO_DEVICE);
1720 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301721 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001722 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001723 ath_err(ath9k_hw_common(sc->sc_ah),
1724 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001725 ath_tx_return_buffer(sc, bf);
1726 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301727 }
1728
Sujithe8324352009-01-16 21:38:42 +05301729 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301730
1731 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001732 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301733
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001734 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1735 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301736
1737 ath9k_hw_filltxdesc(ah, ds,
1738 skb->len, /* segment length */
1739 true, /* first segment */
1740 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001741 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001742 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001743 txq->axq_qnum);
1744
1745
1746 return bf;
1747}
1748
1749/* FIXME: tx power */
1750static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1751 struct ath_tx_control *txctl)
1752{
1753 struct sk_buff *skb = bf->bf_mpdu;
1754 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001756 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001757 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001758 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301759
Sujithe8324352009-01-16 21:38:42 +05301760 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301761 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1762 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001763 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1764 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001765 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001766
Felix Fietkau066dae92010-11-07 14:59:39 +01001767 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001768 }
1769
1770 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001771 /*
1772 * Try aggregation if it's a unicast data frame
1773 * and the destination is HT capable.
1774 */
1775 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301776 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001777 INIT_LIST_HEAD(&bf_head);
1778 list_add_tail(&bf->list, &bf_head);
1779
Felix Fietkau61117f02010-11-11 03:18:36 +01001780 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001781 bf->bf_state.bfs_paprd = txctl->paprd;
1782
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001783 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001784 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1785 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001786
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301787 if (txctl->paprd)
1788 bf->bf_state.bfs_paprd_timestamp = jiffies;
1789
Felix Fietkau55195412011-04-17 23:28:09 +02001790 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1791 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1792
Felix Fietkau248a38d2010-12-10 21:16:46 +01001793 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301794 }
1795
1796 spin_unlock_bh(&txctl->txq->axq_lock);
1797}
1798
1799/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001800int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301801 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001802{
Felix Fietkau28d16702010-11-14 15:20:10 +01001803 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1804 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001805 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001806 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001807 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001808 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001809 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001810 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001811 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001812 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001813
Ben Greeara9927ba2010-12-06 21:13:49 -08001814 /* NOTE: sta can be NULL according to net/mac80211.h */
1815 if (sta)
1816 txctl->an = (struct ath_node *)sta->drv_priv;
1817
Felix Fietkau04caf862010-11-14 15:20:12 +01001818 if (info->control.hw_key)
1819 frmlen += info->control.hw_key->icv_len;
1820
Felix Fietkau28d16702010-11-14 15:20:10 +01001821 /*
1822 * As a temporary workaround, assign seq# here; this will likely need
1823 * to be cleaned up to work better with Beacon transmission and virtual
1824 * BSSes.
1825 */
1826 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1827 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1828 sc->tx.seq_no += 0x10;
1829 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1830 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1831 }
1832
1833 /* Add the padding after the header if this is not already done */
1834 padpos = ath9k_cmn_padpos(hdr->frame_control);
1835 padsize = padpos & 3;
1836 if (padsize && skb->len > padpos) {
1837 if (skb_headroom(skb) < padsize)
1838 return -ENOMEM;
1839
1840 skb_push(skb, padsize);
1841 memmove(skb->data, skb->data + padsize, padpos);
1842 }
1843
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001844 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1845 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1846 !ieee80211_is_data(hdr->frame_control))
1847 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1848
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001849 setup_frame_info(hw, skb, frmlen);
1850
1851 /*
1852 * At this point, the vif, hw_key and sta pointers in the tx control
1853 * info are no longer valid (overwritten by the ath_frame_info data.
1854 */
1855
1856 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001857 if (unlikely(!bf))
1858 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001859
Felix Fietkau066dae92010-11-07 14:59:39 +01001860 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001861 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001862 if (txq == sc->tx.txq_map[q] &&
1863 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001864 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001865 txq->stopped = 1;
1866 }
1867 spin_unlock_bh(&txq->axq_lock);
1868
Sujithe8324352009-01-16 21:38:42 +05301869 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001870
1871 return 0;
1872}
1873
Sujithe8324352009-01-16 21:38:42 +05301874/*****************/
1875/* TX Completion */
1876/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001877
Sujithe8324352009-01-16 21:38:42 +05301878static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001879 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001880{
Sujithe8324352009-01-16 21:38:42 +05301881 struct ieee80211_hw *hw = sc->hw;
1882 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001883 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001884 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001885 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301886
Joe Perches226afe62010-12-02 19:12:37 -08001887 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301888
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301889 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301890 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301891
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301892 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301893 /* Frame was ACKed */
1894 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1895 }
1896
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001897 padpos = ath9k_cmn_padpos(hdr->frame_control);
1898 padsize = padpos & 3;
1899 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301900 /*
1901 * Remove MAC header padding before giving the frame back to
1902 * mac80211.
1903 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001904 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301905 skb_pull(skb, padsize);
1906 }
1907
Sujith1b04b932010-01-08 10:36:05 +05301908 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1909 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001910 ath_dbg(common, ATH_DBG_PS,
1911 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301912 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1913 PS_WAIT_FOR_CAB |
1914 PS_WAIT_FOR_PSPOLL_DATA |
1915 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001916 }
1917
Felix Fietkau7545daf2011-01-24 19:23:16 +01001918 q = skb_get_queue_mapping(skb);
1919 if (txq == sc->tx.txq_map[q]) {
1920 spin_lock_bh(&txq->axq_lock);
1921 if (WARN_ON(--txq->pending_frames < 0))
1922 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001923
Felix Fietkau7545daf2011-01-24 19:23:16 +01001924 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1925 ieee80211_wake_queue(sc->hw, q);
1926 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001927 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001928 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001929 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001930
1931 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301932}
1933
1934static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001935 struct ath_txq *txq, struct list_head *bf_q,
1936 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301937{
1938 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301939 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301940 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301941
Sujithe8324352009-01-16 21:38:42 +05301942 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301943 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301944
1945 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301946 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301947
1948 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301949 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301950 }
1951
Ben Greearc1739eb32010-10-14 12:45:29 -07001952 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001953 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001954
1955 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301956 if (time_after(jiffies,
1957 bf->bf_state.bfs_paprd_timestamp +
1958 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001959 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001960 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001961 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001962 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001963 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001964 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f02010-11-11 03:18:36 +01001965 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001966 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001967 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1968 * accidentally reference it later.
1969 */
1970 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301971
1972 /*
1973 * Return the list of ath_buf of this mpdu to free queue
1974 */
1975 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1976 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1977 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1978}
1979
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001980static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1981 struct ath_tx_status *ts, int nframes, int nbad,
1982 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301983{
Sujitha22be222009-03-30 15:28:36 +05301984 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301985 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301986 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001987 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001988 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301989 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301990
Sujith95e4acb2009-03-13 08:56:09 +05301991 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001992 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301993
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001994 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301995 WARN_ON(tx_rateindex >= hw->max_rates);
1996
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001997 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301998 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001999 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002000 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302001
Felix Fietkaub572d032010-11-14 15:20:07 +01002002 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002003
Felix Fietkaub572d032010-11-14 15:20:07 +01002004 tx_info->status.ampdu_len = nframes;
2005 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002006 }
2007
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002008 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302009 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002010 /*
2011 * If an underrun error is seen assume it as an excessive
2012 * retry only if max frame trigger level has been reached
2013 * (2 KB for single stream, and 4 KB for dual stream).
2014 * Adjust the long retry as if the frame was tried
2015 * hw->max_rate_tries times to affect how rate control updates
2016 * PER for the failed rate.
2017 * In case of congestion on the bus penalizing this type of
2018 * underruns should help hardware actually transmit new frames
2019 * successfully by eventually preferring slower rates.
2020 * This itself should also alleviate congestion on the bus.
2021 */
2022 if (ieee80211_is_data(hdr->frame_control) &&
2023 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2024 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002025 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002026 tx_info->status.rates[tx_rateindex].count =
2027 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302028 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302029
Felix Fietkau545750d2009-11-23 22:21:01 +01002030 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302031 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002032 tx_info->status.rates[i].idx = -1;
2033 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302034
Felix Fietkau78c46532010-06-25 01:26:16 +02002035 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302036}
2037
Felix Fietkaufce041b2011-05-19 12:20:25 +02002038static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2039 struct ath_tx_status *ts, struct ath_buf *bf,
2040 struct list_head *bf_head)
2041{
2042 int txok;
2043
2044 txq->axq_depth--;
2045 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2046 txq->axq_tx_inprogress = false;
2047 if (bf_is_ampdu_not_probing(bf))
2048 txq->axq_ampdu_depth--;
2049
2050 spin_unlock_bh(&txq->axq_lock);
2051
2052 if (!bf_isampdu(bf)) {
2053 /*
2054 * This frame is sent out as a single frame.
2055 * Use hardware retry status for this frame.
2056 */
2057 if (ts->ts_status & ATH9K_TXERR_XRETRY)
2058 bf->bf_state.bf_type |= BUF_XRETRY;
2059 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2060 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2061 } else
2062 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2063
2064 spin_lock_bh(&txq->axq_lock);
2065
2066 if (sc->sc_flags & SC_OP_TXAGGR)
2067 ath_txq_schedule(sc, txq);
2068}
2069
Sujithc4288392008-11-18 09:09:30 +05302070static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002071{
Sujithcbe61d82009-02-09 13:27:12 +05302072 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002073 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002074 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2075 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302076 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002077 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078 int status;
2079
Joe Perches226afe62010-12-02 19:12:37 -08002080 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2081 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2082 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002083
Felix Fietkaufce041b2011-05-19 12:20:25 +02002084 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085 for (;;) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 if (list_empty(&txq->axq_q)) {
2087 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002088 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002089 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 break;
2091 }
2092 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2093
2094 /*
2095 * There is a race condition that a BH gets scheduled
2096 * after sw writes TxE and before hw re-load the last
2097 * descriptor to get the newly chained one.
2098 * Software must keep the last DONE descriptor as a
2099 * holding descriptor - software does so by marking
2100 * it with the STALE flag.
2101 */
2102 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302103 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002104 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002105 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002107
2108 bf = list_entry(bf_held->list.next, struct ath_buf,
2109 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002110 }
2111
2112 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302113 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002114
Felix Fietkau29bffa92010-03-29 20:14:23 -07002115 memset(&ts, 0, sizeof(ts));
2116 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002117 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002118 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002119
Ben Greear2dac4fb2011-01-09 23:11:45 -08002120 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121
2122 /*
2123 * Remove ath_buf's of the same transmit unit from txq,
2124 * however leave the last descriptor back as the holding
2125 * descriptor for hw.
2126 */
Sujitha119cc42009-03-30 15:28:38 +05302127 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002129 if (!list_is_singular(&lastbf->list))
2130 list_cut_position(&bf_head,
2131 &txq->axq_q, lastbf->list.prev);
2132
Felix Fietkaufce041b2011-05-19 12:20:25 +02002133 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002134 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002135 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136 }
Johannes Berge6a98542008-10-21 12:40:02 +02002137
Felix Fietkaufce041b2011-05-19 12:20:25 +02002138 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002139 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002140 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141}
2142
Sujith305fe472009-07-23 15:32:29 +05302143static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002144{
2145 struct ath_softc *sc = container_of(work, struct ath_softc,
2146 tx_complete_work.work);
2147 struct ath_txq *txq;
2148 int i;
2149 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002150#ifdef CONFIG_ATH9K_DEBUGFS
2151 sc->tx_complete_poll_work_seen++;
2152#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002153
2154 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2155 if (ATH_TXQ_SETUP(sc, i)) {
2156 txq = &sc->tx.txq[i];
2157 spin_lock_bh(&txq->axq_lock);
2158 if (txq->axq_depth) {
2159 if (txq->axq_tx_inprogress) {
2160 needreset = true;
2161 spin_unlock_bh(&txq->axq_lock);
2162 break;
2163 } else {
2164 txq->axq_tx_inprogress = true;
2165 }
2166 }
2167 spin_unlock_bh(&txq->axq_lock);
2168 }
2169
2170 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002171 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2172 "tx hung, resetting the chip\n");
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002173 ath_reset(sc, true);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002174 }
2175
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002176 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002177 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2178}
2179
2180
Sujithe8324352009-01-16 21:38:42 +05302181
2182void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183{
Sujithe8324352009-01-16 21:38:42 +05302184 int i;
2185 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186
Sujithe8324352009-01-16 21:38:42 +05302187 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002188
2189 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302190 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2191 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 }
2193}
2194
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002195void ath_tx_edma_tasklet(struct ath_softc *sc)
2196{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002197 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002198 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2199 struct ath_hw *ah = sc->sc_ah;
2200 struct ath_txq *txq;
2201 struct ath_buf *bf, *lastbf;
2202 struct list_head bf_head;
2203 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002204
2205 for (;;) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002206 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002207 if (status == -EINPROGRESS)
2208 break;
2209 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002210 ath_dbg(common, ATH_DBG_XMIT,
2211 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002212 break;
2213 }
2214
2215 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002216 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002217 continue;
2218
Felix Fietkaufce041b2011-05-19 12:20:25 +02002219 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002220
2221 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002222
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002223 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2224 spin_unlock_bh(&txq->axq_lock);
2225 return;
2226 }
2227
2228 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2229 struct ath_buf, list);
2230 lastbf = bf->bf_lastbf;
2231
2232 INIT_LIST_HEAD(&bf_head);
2233 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2234 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002235
Felix Fietkaufce041b2011-05-19 12:20:25 +02002236 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2237 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002238
Felix Fietkaufce041b2011-05-19 12:20:25 +02002239 if (!list_empty(&txq->axq_q)) {
2240 struct list_head bf_q;
2241
2242 INIT_LIST_HEAD(&bf_q);
2243 txq->axq_link = NULL;
2244 list_splice_tail_init(&txq->axq_q, &bf_q);
2245 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2246 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002247 }
2248
Felix Fietkaufce041b2011-05-19 12:20:25 +02002249 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002250 spin_unlock_bh(&txq->axq_lock);
2251 }
2252}
2253
Sujithe8324352009-01-16 21:38:42 +05302254/*****************/
2255/* Init, Cleanup */
2256/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002257
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002258static int ath_txstatus_setup(struct ath_softc *sc, int size)
2259{
2260 struct ath_descdma *dd = &sc->txsdma;
2261 u8 txs_len = sc->sc_ah->caps.txs_len;
2262
2263 dd->dd_desc_len = size * txs_len;
2264 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2265 &dd->dd_desc_paddr, GFP_KERNEL);
2266 if (!dd->dd_desc)
2267 return -ENOMEM;
2268
2269 return 0;
2270}
2271
2272static int ath_tx_edma_init(struct ath_softc *sc)
2273{
2274 int err;
2275
2276 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2277 if (!err)
2278 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2279 sc->txsdma.dd_desc_paddr,
2280 ATH_TXSTATUS_RING_SIZE);
2281
2282 return err;
2283}
2284
2285static void ath_tx_edma_cleanup(struct ath_softc *sc)
2286{
2287 struct ath_descdma *dd = &sc->txsdma;
2288
2289 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2290 dd->dd_desc_paddr);
2291}
2292
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002293int ath_tx_init(struct ath_softc *sc, int nbufs)
2294{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002295 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002296 int error = 0;
2297
Sujith797fe5cb2009-03-30 15:28:45 +05302298 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002299
Sujith797fe5cb2009-03-30 15:28:45 +05302300 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002301 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302302 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002303 ath_err(common,
2304 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302305 goto err;
2306 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002307
Sujith797fe5cb2009-03-30 15:28:45 +05302308 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002309 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302310 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002311 ath_err(common,
2312 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302313 goto err;
2314 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002315
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002316 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2317
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002318 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2319 error = ath_tx_edma_init(sc);
2320 if (error)
2321 goto err;
2322 }
2323
Sujith797fe5cb2009-03-30 15:28:45 +05302324err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002325 if (error != 0)
2326 ath_tx_cleanup(sc);
2327
2328 return error;
2329}
2330
Sujith797fe5cb2009-03-30 15:28:45 +05302331void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002332{
Sujithb77f4832008-12-07 21:44:03 +05302333 if (sc->beacon.bdma.dd_desc_len != 0)
2334 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002335
Sujithb77f4832008-12-07 21:44:03 +05302336 if (sc->tx.txdma.dd_desc_len != 0)
2337 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002338
2339 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2340 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002341}
2342
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2344{
Sujithc5170162008-10-29 10:13:59 +05302345 struct ath_atx_tid *tid;
2346 struct ath_atx_ac *ac;
2347 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002348
Sujith8ee5afb2008-12-07 21:43:36 +05302349 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302350 tidno < WME_NUM_TID;
2351 tidno++, tid++) {
2352 tid->an = an;
2353 tid->tidno = tidno;
2354 tid->seq_start = tid->seq_next = 0;
2355 tid->baw_size = WME_MAX_BA;
2356 tid->baw_head = tid->baw_tail = 0;
2357 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302358 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302359 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302360 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302361 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302362 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302363 tid->state &= ~AGGR_ADDBA_COMPLETE;
2364 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302365 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujith8ee5afb2008-12-07 21:43:36 +05302367 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302368 acno < WME_NUM_AC; acno++, ac++) {
2369 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002370 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302371 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002372 }
2373}
2374
Sujithb5aa9bf2008-10-29 10:13:31 +05302375void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376{
Felix Fietkau2b409942010-07-07 19:42:08 +02002377 struct ath_atx_ac *ac;
2378 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002380 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302381
Felix Fietkau2b409942010-07-07 19:42:08 +02002382 for (tidno = 0, tid = &an->tid[tidno];
2383 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384
Felix Fietkau2b409942010-07-07 19:42:08 +02002385 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002386 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387
Felix Fietkau2b409942010-07-07 19:42:08 +02002388 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389
Felix Fietkau2b409942010-07-07 19:42:08 +02002390 if (tid->sched) {
2391 list_del(&tid->list);
2392 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002394
2395 if (ac->sched) {
2396 list_del(&ac->list);
2397 tid->ac->sched = false;
2398 }
2399
2400 ath_tid_drain(sc, txq, tid);
2401 tid->state &= ~AGGR_ADDBA_COMPLETE;
2402 tid->state &= ~AGGR_CLEANUP;
2403
2404 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405 }
2406}