blob: 5f05a3abbf6a0edca35f80ac134ff98f04ed7fd0 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Felix Fietkau82b873a2010-11-11 03:18:37 +010051static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010053 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkaudb1a0522010-03-29 20:07:11 -070060static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +010061 int nframes, int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
Felix Fietkau827e69b2009-11-15 23:09:25 +0100300 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530304 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530305
306 return tbf;
307}
308
Felix Fietkaub572d032010-11-14 15:20:07 +0100309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
Sujithd43f30152009-01-16 21:38:53 +0530341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100343 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530344{
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530349 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800350 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530351 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530353 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530355 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
357 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200358 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100359 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200360 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100361 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100367 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 memcpy(rates, tx_info->control.rates, sizeof(rates));
370
Sujith1286ec62009-01-27 13:30:37 +0530371 rcu_read_lock();
372
Ben Greear686b9cb2010-09-23 09:44:36 -0700373 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530374 if (!sta) {
375 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200376
Felix Fietkau31e79a52010-07-12 23:16:34 +0200377 INIT_LIST_HEAD(&bf_head);
378 while (bf) {
379 bf_next = bf->bf_next;
380
381 bf->bf_state.bf_type |= BUF_XRETRY;
382 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
383 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head);
385
Felix Fietkaub572d032010-11-14 15:20:07 +0100386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
Sujith1286ec62009-01-27 13:30:37 +0530392 return;
Sujithe8324352009-01-16 21:38:42 +0530393 }
394
Sujith1286ec62009-01-27 13:30:37 +0530395 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530398
Felix Fietkaub11b1602010-07-11 12:48:44 +0200399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200405 txok = false;
406
Sujithe8324352009-01-16 21:38:42 +0530407 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530409
Sujithd43f30152009-01-16 21:38:53 +0530410 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530414 } else {
Sujithd43f30152009-01-16 21:38:53 +0530415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
Sujith2660b812009-02-09 13:27:26 +0530422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530423 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530424 }
425 }
426
427 INIT_LIST_HEAD(&bf_pending);
428 INIT_LIST_HEAD(&bf_head);
429
Felix Fietkaub572d032010-11-14 15:20:07 +0100430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530431 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100432 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530433 bf_next = bf->bf_next;
434
Felix Fietkau78c46532010-06-25 01:26:16 +0200435 skb = bf->bf_mpdu;
436 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200438
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530440 /* transmit completion, subframe is
441 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else if (!isaggr && txok) {
444 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530445 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530446 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100447 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 if (fi->retries < ATH_MAX_SW_RETRIES) {
449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530450 txpending = 1;
451 } else {
452 bf->bf_state.bf_type |= BUF_XRETRY;
453 txfail = 1;
454 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 }
457 } else {
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
463 }
464 }
465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700477 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530478 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530479 }
480
Felix Fietkau90fa5392010-09-20 13:45:38 +0200481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100487 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530488 spin_unlock_bh(&txq->axq_lock);
489
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200491 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaub572d032010-11-14 15:20:07 +0100492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 rc_update = false;
494 } else {
Felix Fietkaub572d032010-11-14 15:20:07 +0100495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 }
497
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530500 } else {
Sujithd43f30152009-01-16 21:38:53 +0530501 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400502 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
503 if (bf->bf_next == NULL && bf_last->bf_stale) {
504 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530505
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400506 tbf = ath_clone_txbuf(sc, bf_last);
507 /*
508 * Update tx baw and complete the
509 * frame with failed status if we
510 * run out of tx buf.
511 */
512 if (!tbf) {
513 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100514 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400516
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 bf->bf_state.bf_type |=
518 BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +0100519 ath_tx_rc_status(bf, ts, nframes,
520 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400521 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head,
523 ts, 0, 0);
524 break;
525 }
526
527 ath9k_hw_cleartxdesc(sc->sc_ah,
528 tbf->bf_desc);
529 list_add_tail(&tbf->list, &bf_head);
530 } else {
531 /*
532 * Clear descriptor status words for
533 * software retry
534 */
535 ath9k_hw_cleartxdesc(sc->sc_ah,
536 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400537 }
Sujithe8324352009-01-16 21:38:42 +0530538 }
539
540 /*
541 * Put this buffer to the temporary pending
542 * queue to retain ordering
543 */
544 list_splice_tail_init(&bf_head, &bf_pending);
545 }
546
547 bf = bf_next;
548 }
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 /* prepend un-acked frames to the beginning of the pending frame queue */
551 if (!list_empty(&bf_pending)) {
552 spin_lock_bh(&txq->axq_lock);
553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Sujithe8324352009-01-16 21:38:42 +0530569 if (needreset)
570 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
Felix Fietkau2ed72222011-01-10 17:05:49 -0700861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
Felix Fietkau231c3a12010-09-20 19:35:28 +0200864 return 0;
Sujithe8324352009-01-16 21:38:42 +0530865}
866
Sujithf83da962009-07-23 15:32:37 +0530867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100871 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530874 return;
Sujithe8324352009-01-16 21:38:42 +0530875
876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530878 return;
Sujithe8324352009-01-16 21:38:42 +0530879 }
880
Sujithe8324352009-01-16 21:38:42 +0530881 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200882 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200883
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
891 txtid->state |= AGGR_CLEANUP;
892 else
893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530894 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau90fa5392010-09-20 13:45:38 +0200896 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530897}
898
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
914}
915
Sujithe8324352009-01-16 21:38:42 +0530916/********************/
917/* Queue Management */
918/********************/
919
Sujithe8324352009-01-16 21:38:42 +0530920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
922{
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
925
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
934 }
935}
936
937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938{
Sujithcbe61d82009-02-09 13:27:12 +0530939 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530941 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
Ben Greear60f2d1d2011-01-09 23:11:52 -0800948 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530949
950 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
956
957 /*
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
971 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800982 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +0530984 /*
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
987 */
988 return NULL;
989 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800990 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800991 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -0800992 axq_qnum, ARRAY_SIZE(sc->tx.txq));
993 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +0530994 return NULL;
995 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800996 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +0530998
Ben Greear60f2d1d2011-01-09 23:11:52 -0800999 txq->axq_qnum = axq_qnum;
1000 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301001 txq->axq_link = NULL;
1002 INIT_LIST_HEAD(&txq->axq_q);
1003 INIT_LIST_HEAD(&txq->axq_acq);
1004 spin_lock_init(&txq->axq_lock);
1005 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001006 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001007 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001008 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001009
1010 txq->txq_headidx = txq->txq_tailidx = 0;
1011 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1012 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1013 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301014 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001015 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301016}
1017
Sujithe8324352009-01-16 21:38:42 +05301018int ath_txq_update(struct ath_softc *sc, int qnum,
1019 struct ath9k_tx_queue_info *qinfo)
1020{
Sujithcbe61d82009-02-09 13:27:12 +05301021 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301022 int error = 0;
1023 struct ath9k_tx_queue_info qi;
1024
1025 if (qnum == sc->beacon.beaconq) {
1026 /*
1027 * XXX: for beacon queue, we just save the parameter.
1028 * It will be picked up by ath_beaconq_config when
1029 * it's necessary.
1030 */
1031 sc->beacon.beacon_qi = *qinfo;
1032 return 0;
1033 }
1034
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001035 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301036
1037 ath9k_hw_get_txq_props(ah, qnum, &qi);
1038 qi.tqi_aifs = qinfo->tqi_aifs;
1039 qi.tqi_cwmin = qinfo->tqi_cwmin;
1040 qi.tqi_cwmax = qinfo->tqi_cwmax;
1041 qi.tqi_burstTime = qinfo->tqi_burstTime;
1042 qi.tqi_readyTime = qinfo->tqi_readyTime;
1043
1044 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001045 ath_err(ath9k_hw_common(sc->sc_ah),
1046 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301047 error = -EIO;
1048 } else {
1049 ath9k_hw_resettxqueue(ah, qnum);
1050 }
1051
1052 return error;
1053}
1054
1055int ath_cabq_update(struct ath_softc *sc)
1056{
1057 struct ath9k_tx_queue_info qi;
1058 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301059
1060 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1061 /*
1062 * Ensure the readytime % is within the bounds.
1063 */
Sujith17d79042009-02-09 13:27:03 +05301064 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1065 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1066 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1067 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301068
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001069 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301070 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301071 ath_txq_update(sc, qnum, &qi);
1072
1073 return 0;
1074}
1075
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001076static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1077{
1078 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1079 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1080}
1081
Sujith043a0402009-01-16 21:38:47 +05301082/*
1083 * Drain a given TX queue (could be Beacon or Data)
1084 *
1085 * This assumes output has been stopped and
1086 * we do not need to block ath_tx_tasklet.
1087 */
1088void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301089{
1090 struct ath_buf *bf, *lastbf;
1091 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001092 struct ath_tx_status ts;
1093
1094 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301095 INIT_LIST_HEAD(&bf_head);
1096
Sujithe8324352009-01-16 21:38:42 +05301097 for (;;) {
1098 spin_lock_bh(&txq->axq_lock);
1099
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001100 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1101 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1102 txq->txq_headidx = txq->txq_tailidx = 0;
1103 spin_unlock_bh(&txq->axq_lock);
1104 break;
1105 } else {
1106 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1107 struct ath_buf, list);
1108 }
1109 } else {
1110 if (list_empty(&txq->axq_q)) {
1111 txq->axq_link = NULL;
1112 spin_unlock_bh(&txq->axq_lock);
1113 break;
1114 }
1115 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1116 list);
Sujithe8324352009-01-16 21:38:42 +05301117
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001118 if (bf->bf_stale) {
1119 list_del(&bf->list);
1120 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301121
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001122 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001123 continue;
1124 }
Sujithe8324352009-01-16 21:38:42 +05301125 }
1126
1127 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301128
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001129 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1130 list_cut_position(&bf_head,
1131 &txq->txq_fifo[txq->txq_tailidx],
1132 &lastbf->list);
1133 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1134 } else {
1135 /* remove ath_buf's of the same mpdu from txq */
1136 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1137 }
1138
Sujithe8324352009-01-16 21:38:42 +05301139 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001140 if (bf_is_ampdu_not_probing(bf))
1141 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301142 spin_unlock_bh(&txq->axq_lock);
1143
1144 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001145 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1146 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301147 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001148 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301149 }
1150
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001151 spin_lock_bh(&txq->axq_lock);
1152 txq->axq_tx_inprogress = false;
1153 spin_unlock_bh(&txq->axq_lock);
1154
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001155 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1156 spin_lock_bh(&txq->axq_lock);
1157 while (!list_empty(&txq->txq_fifo_pending)) {
1158 bf = list_first_entry(&txq->txq_fifo_pending,
1159 struct ath_buf, list);
1160 list_cut_position(&bf_head,
1161 &txq->txq_fifo_pending,
1162 &bf->bf_lastbf->list);
1163 spin_unlock_bh(&txq->axq_lock);
1164
1165 if (bf_isampdu(bf))
1166 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001167 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001168 else
1169 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1170 &ts, 0, 0);
1171 spin_lock_bh(&txq->axq_lock);
1172 }
1173 spin_unlock_bh(&txq->axq_lock);
1174 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001175
1176 /* flush any pending frames if aggregation is enabled */
1177 if (sc->sc_flags & SC_OP_TXAGGR) {
1178 if (!retry_tx) {
1179 spin_lock_bh(&txq->axq_lock);
1180 ath_txq_drain_pending_buffers(sc, txq);
1181 spin_unlock_bh(&txq->axq_lock);
1182 }
1183 }
Sujithe8324352009-01-16 21:38:42 +05301184}
1185
Felix Fietkau080e1a22010-12-05 20:17:53 +01001186bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301187{
Sujithcbe61d82009-02-09 13:27:12 +05301188 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001189 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301190 struct ath_txq *txq;
1191 int i, npend = 0;
1192
1193 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001194 return true;
Sujith043a0402009-01-16 21:38:47 +05301195
1196 /* Stop beacon queue */
1197 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1198
1199 /* Stop data queues */
1200 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1201 if (ATH_TXQ_SETUP(sc, i)) {
1202 txq = &sc->tx.txq[i];
1203 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1204 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1205 }
1206 }
1207
Felix Fietkau080e1a22010-12-05 20:17:53 +01001208 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001209 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301210
1211 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1212 if (ATH_TXQ_SETUP(sc, i))
1213 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1214 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001215
1216 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301217}
1218
Sujithe8324352009-01-16 21:38:42 +05301219void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1220{
1221 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1222 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1223}
1224
Ben Greear7755bad2011-01-18 17:30:00 -08001225/* For each axq_acq entry, for each tid, try to schedule packets
1226 * for transmit until ampdu_depth has reached min Q depth.
1227 */
Sujithe8324352009-01-16 21:38:42 +05301228void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1229{
Ben Greear7755bad2011-01-18 17:30:00 -08001230 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1231 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301232
Felix Fietkau21f28e62011-01-15 14:30:14 +01001233 if (list_empty(&txq->axq_acq) ||
1234 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301235 return;
1236
1237 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001238 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301239
Ben Greear7755bad2011-01-18 17:30:00 -08001240 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1241 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1242 list_del(&ac->list);
1243 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301244
Ben Greear7755bad2011-01-18 17:30:00 -08001245 while (!list_empty(&ac->tid_q)) {
1246 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1247 list);
1248 list_del(&tid->list);
1249 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301250
Ben Greear7755bad2011-01-18 17:30:00 -08001251 if (tid->paused)
1252 continue;
Sujithe8324352009-01-16 21:38:42 +05301253
Ben Greear7755bad2011-01-18 17:30:00 -08001254 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301255
Ben Greear7755bad2011-01-18 17:30:00 -08001256 /*
1257 * add tid to round-robin queue if more frames
1258 * are pending for the tid
1259 */
1260 if (!list_empty(&tid->buf_q))
1261 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301262
Ben Greear7755bad2011-01-18 17:30:00 -08001263 if (tid == last_tid ||
1264 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1265 break;
Sujithe8324352009-01-16 21:38:42 +05301266 }
Ben Greear7755bad2011-01-18 17:30:00 -08001267
1268 if (!list_empty(&ac->tid_q)) {
1269 if (!ac->sched) {
1270 ac->sched = true;
1271 list_add_tail(&ac->list, &txq->axq_acq);
1272 }
1273 }
1274
1275 if (ac == last_ac ||
1276 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1277 return;
Sujithe8324352009-01-16 21:38:42 +05301278 }
1279}
1280
Sujithe8324352009-01-16 21:38:42 +05301281/***********/
1282/* TX, DMA */
1283/***********/
1284
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001285/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001286 * Insert a chain of ath_buf (descriptors) on a txq and
1287 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001288 */
Sujith102e0572008-10-29 10:15:16 +05301289static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1290 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001291{
Sujithcbe61d82009-02-09 13:27:12 +05301292 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001293 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301295
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001296 /*
1297 * Insert the frame on the outbound list and
1298 * pass it on to the hardware.
1299 */
1300
1301 if (list_empty(head))
1302 return;
1303
1304 bf = list_first_entry(head, struct ath_buf, list);
1305
Joe Perches226afe62010-12-02 19:12:37 -08001306 ath_dbg(common, ATH_DBG_QUEUE,
1307 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001308
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001309 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1310 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1311 list_splice_tail_init(head, &txq->txq_fifo_pending);
1312 return;
1313 }
1314 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001315 ath_dbg(common, ATH_DBG_XMIT,
1316 "Initializing tx fifo %d which is non-empty\n",
1317 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001318 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1319 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1320 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001321 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001322 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1323 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001324 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001325 list_splice_tail_init(head, &txq->axq_q);
1326
1327 if (txq->axq_link == NULL) {
1328 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001329 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1330 txq->axq_qnum, ito64(bf->bf_daddr),
1331 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001332 } else {
1333 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001334 ath_dbg(common, ATH_DBG_XMIT,
1335 "link[%u] (%p)=%llx (%p)\n",
1336 txq->axq_qnum, txq->axq_link,
1337 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001338 }
1339 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1340 &txq->axq_link);
1341 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001342 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001343 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001344 if (bf_is_ampdu_not_probing(bf))
1345 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001346}
1347
Sujithe8324352009-01-16 21:38:42 +05301348static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001349 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301350{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001351 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001352 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301353
Sujithe8324352009-01-16 21:38:42 +05301354 bf->bf_state.bf_type |= BUF_AMPDU;
1355
1356 /*
1357 * Do not queue to h/w when any of the following conditions is true:
1358 * - there are pending frames in software queue
1359 * - the TID is currently paused for ADDBA/BAR request
1360 * - seqno is not within block-ack window
1361 * - h/w queue depth exceeds low water mark
1362 */
1363 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001364 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001365 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001366 /*
Sujithe8324352009-01-16 21:38:42 +05301367 * Add this frame to software queue for scheduling later
1368 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001369 */
Ben Greearbda8add2011-01-09 23:11:48 -08001370 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001371 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301372 ath_tx_queue_tid(txctl->txq, tid);
1373 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001374 }
1375
Felix Fietkau04caf862010-11-14 15:20:12 +01001376 INIT_LIST_HEAD(&bf_head);
1377 list_add(&bf->list, &bf_head);
1378
Sujithe8324352009-01-16 21:38:42 +05301379 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001380 if (!fi->retries)
1381 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301382
1383 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001384 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301385 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001386 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001387 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301388}
1389
Felix Fietkau82b873a2010-11-11 03:18:37 +01001390static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1391 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001392 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001393{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001394 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301395 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001396
Sujithe8324352009-01-16 21:38:42 +05301397 bf = list_first_entry(bf_head, struct ath_buf, list);
1398 bf->bf_state.bf_type &= ~BUF_AMPDU;
1399
1400 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001401 if (tid)
1402 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301403
Sujithd43f30152009-01-16 21:38:53 +05301404 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001405 fi = get_frame_info(bf->bf_mpdu);
1406 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301407 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301408 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001409}
1410
Sujith528f0c62008-10-29 10:14:26 +05301411static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001412{
Sujith528f0c62008-10-29 10:14:26 +05301413 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001414 enum ath9k_pkt_type htype;
1415 __le16 fc;
1416
Sujith528f0c62008-10-29 10:14:26 +05301417 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001418 fc = hdr->frame_control;
1419
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001420 if (ieee80211_is_beacon(fc))
1421 htype = ATH9K_PKT_TYPE_BEACON;
1422 else if (ieee80211_is_probe_resp(fc))
1423 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1424 else if (ieee80211_is_atim(fc))
1425 htype = ATH9K_PKT_TYPE_ATIM;
1426 else if (ieee80211_is_pspoll(fc))
1427 htype = ATH9K_PKT_TYPE_PSPOLL;
1428 else
1429 htype = ATH9K_PKT_TYPE_NORMAL;
1430
1431 return htype;
1432}
1433
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001434static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1435 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301436{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001437 struct ath_wiphy *aphy = hw->priv;
1438 struct ath_softc *sc = aphy->sc;
Sujith528f0c62008-10-29 10:14:26 +05301439 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001440 struct ieee80211_sta *sta = tx_info->control.sta;
1441 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301442 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001443 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301444 struct ath_node *an;
1445 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001446 enum ath9k_key_type keytype;
1447 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001448 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301449
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001450 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301451
Sujith528f0c62008-10-29 10:14:26 +05301452 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001453 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1454 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001455
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001456 an = (struct ath_node *) sta->drv_priv;
1457 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1458
1459 /*
1460 * Override seqno set by upper layer with the one
1461 * in tx aggregation state.
1462 */
1463 tid = ATH_AN_2_TID(an, tidno);
1464 seqno = tid->seq_next;
1465 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1466 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1467 }
1468
1469 memset(fi, 0, sizeof(*fi));
1470 if (hw_key)
1471 fi->keyix = hw_key->hw_key_idx;
1472 else
1473 fi->keyix = ATH9K_TXKEYIX_INVALID;
1474 fi->keytype = keytype;
1475 fi->framelen = framelen;
1476 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301477}
1478
Felix Fietkau82b873a2010-11-11 03:18:37 +01001479static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301480{
1481 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1482 int flags = 0;
1483
1484 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1485 flags |= ATH9K_TXDESC_INTREQ;
1486
1487 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1488 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301489
Felix Fietkau82b873a2010-11-11 03:18:37 +01001490 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001491 flags |= ATH9K_TXDESC_LDPC;
1492
Sujith528f0c62008-10-29 10:14:26 +05301493 return flags;
1494}
1495
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001496/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001497 * rix - rate index
1498 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1499 * width - 0 for 20 MHz, 1 for 40 MHz
1500 * half_gi - to use 4us v/s 3.6 us for symbol time
1501 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001502static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301503 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001505 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001506 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301507
1508 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001509 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001510 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001511 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001512 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1513
1514 if (!half_gi)
1515 duration = SYMBOL_TIME(nsymbols);
1516 else
1517 duration = SYMBOL_TIME_HALFGI(nsymbols);
1518
Sujithe63835b2008-11-18 09:07:53 +05301519 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001520 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301521
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522 return duration;
1523}
1524
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301525u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1526{
1527 struct ath_hw *ah = sc->sc_ah;
1528 struct ath9k_channel *curchan = ah->curchan;
1529 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1530 (curchan->channelFlags & CHANNEL_5GHZ) &&
1531 (chainmask == 0x7) && (rate < 0x90))
1532 return 0x3;
1533 else
1534 return chainmask;
1535}
1536
Felix Fietkau269c44b2010-11-14 15:20:06 +01001537static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001538{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001539 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001540 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301541 struct sk_buff *skb;
1542 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301543 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001544 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301545 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301546 int i, flags = 0;
1547 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301548 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301549
1550 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301551
Sujitha22be222009-03-30 15:28:36 +05301552 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301553 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301554 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301555 hdr = (struct ieee80211_hdr *)skb->data;
1556 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301557
Sujithc89424d2009-01-30 14:29:28 +05301558 /*
1559 * We check if Short Preamble is needed for the CTS rate by
1560 * checking the BSS's global flag.
1561 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1562 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001563 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1564 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301565 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001566 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001567
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001568 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001569 bool is_40, is_sgi, is_sp;
1570 int phy;
1571
Sujithe63835b2008-11-18 09:07:53 +05301572 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001573 continue;
1574
Sujitha8efee42008-11-18 09:07:30 +05301575 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301576 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577
Felix Fietkau27032052010-01-17 21:08:50 +01001578 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1579 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301580 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001581 flags |= ATH9K_TXDESC_RTSENA;
1582 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1583 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1584 flags |= ATH9K_TXDESC_CTSENA;
1585 }
1586
Sujithc89424d2009-01-30 14:29:28 +05301587 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1588 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1589 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1590 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001591
Felix Fietkau545750d2009-11-23 22:21:01 +01001592 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1593 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1594 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1595
1596 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1597 /* MCS rates */
1598 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301599 series[i].ChSel = ath_txchainmask_reduction(sc,
1600 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001601 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001602 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001603 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1604 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001605 continue;
1606 }
1607
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301608 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001609 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1610 !(rate->flags & IEEE80211_RATE_ERP_G))
1611 phy = WLAN_RC_PHY_CCK;
1612 else
1613 phy = WLAN_RC_PHY_OFDM;
1614
1615 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1616 series[i].Rate = rate->hw_value;
1617 if (rate->hw_value_short) {
1618 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1619 series[i].Rate |= rate->hw_value_short;
1620 } else {
1621 is_sp = false;
1622 }
1623
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301624 if (bf->bf_state.bfs_paprd)
1625 series[i].ChSel = common->tx_chainmask;
1626 else
1627 series[i].ChSel = ath_txchainmask_reduction(sc,
1628 common->tx_chainmask, series[i].Rate);
1629
Felix Fietkau545750d2009-11-23 22:21:01 +01001630 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001631 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001632 }
1633
Felix Fietkau27032052010-01-17 21:08:50 +01001634 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001635 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001636 flags &= ~ATH9K_TXDESC_RTSENA;
1637
1638 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1639 if (flags & ATH9K_TXDESC_RTSENA)
1640 flags &= ~ATH9K_TXDESC_CTSENA;
1641
Sujithe63835b2008-11-18 09:07:53 +05301642 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301643 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1644 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301645 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301646 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301647
Sujith17d79042009-02-09 13:27:03 +05301648 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301649 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001650}
1651
Felix Fietkau82b873a2010-11-11 03:18:37 +01001652static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001653 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001654 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301655{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001656 struct ath_wiphy *aphy = hw->priv;
1657 struct ath_softc *sc = aphy->sc;
Felix Fietkau04caf862010-11-14 15:20:12 +01001658 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001659 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001660 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001661 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001662 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001663 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001664
1665 bf = ath_tx_get_buffer(sc);
1666 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001667 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001668 return NULL;
1669 }
Sujithe8324352009-01-16 21:38:42 +05301670
Sujithe8324352009-01-16 21:38:42 +05301671 ATH_TXBUF_RESET(bf);
1672
Felix Fietkau827e69b2009-11-15 23:09:25 +01001673 bf->aphy = aphy;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001674 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301675 bf->bf_mpdu = skb;
1676
Ben Greearc1739eb32010-10-14 12:45:29 -07001677 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1678 skb->len, DMA_TO_DEVICE);
1679 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301680 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001681 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001682 ath_err(ath9k_hw_common(sc->sc_ah),
1683 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001684 ath_tx_return_buffer(sc, bf);
1685 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301686 }
1687
Sujithe8324352009-01-16 21:38:42 +05301688 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301689
1690 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001691 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301692
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001693 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1694 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301695
1696 ath9k_hw_filltxdesc(ah, ds,
1697 skb->len, /* segment length */
1698 true, /* first segment */
1699 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001700 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001701 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001702 txq->axq_qnum);
1703
1704
1705 return bf;
1706}
1707
1708/* FIXME: tx power */
1709static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1710 struct ath_tx_control *txctl)
1711{
1712 struct sk_buff *skb = bf->bf_mpdu;
1713 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1714 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001715 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001716 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001717 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301718
Sujithe8324352009-01-16 21:38:42 +05301719 spin_lock_bh(&txctl->txq->axq_lock);
1720
Felix Fietkau248a38d2010-12-10 21:16:46 +01001721 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001722 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1723 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001724 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001725
Felix Fietkau066dae92010-11-07 14:59:39 +01001726 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001727 }
1728
1729 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001730 /*
1731 * Try aggregation if it's a unicast data frame
1732 * and the destination is HT capable.
1733 */
1734 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301735 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001736 INIT_LIST_HEAD(&bf_head);
1737 list_add_tail(&bf->list, &bf_head);
1738
Felix Fietkau61117f02010-11-11 03:18:36 +01001739 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001740 bf->bf_state.bfs_paprd = txctl->paprd;
1741
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001742 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001743 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1744 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001745
Felix Fietkau248a38d2010-12-10 21:16:46 +01001746 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301747 }
1748
1749 spin_unlock_bh(&txctl->txq->axq_lock);
1750}
1751
1752/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001753int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301754 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001755{
Felix Fietkau28d16702010-11-14 15:20:10 +01001756 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1757 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001758 struct ieee80211_sta *sta = info->control.sta;
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001759 struct ath_wiphy *aphy = hw->priv;
1760 struct ath_softc *sc = aphy->sc;
Felix Fietkau84642d62010-06-01 21:33:13 +02001761 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001762 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001763 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001764 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001765 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001766
Ben Greeara9927ba2010-12-06 21:13:49 -08001767 /* NOTE: sta can be NULL according to net/mac80211.h */
1768 if (sta)
1769 txctl->an = (struct ath_node *)sta->drv_priv;
1770
Felix Fietkau04caf862010-11-14 15:20:12 +01001771 if (info->control.hw_key)
1772 frmlen += info->control.hw_key->icv_len;
1773
Felix Fietkau28d16702010-11-14 15:20:10 +01001774 /*
1775 * As a temporary workaround, assign seq# here; this will likely need
1776 * to be cleaned up to work better with Beacon transmission and virtual
1777 * BSSes.
1778 */
1779 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1780 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1781 sc->tx.seq_no += 0x10;
1782 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1783 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1784 }
1785
1786 /* Add the padding after the header if this is not already done */
1787 padpos = ath9k_cmn_padpos(hdr->frame_control);
1788 padsize = padpos & 3;
1789 if (padsize && skb->len > padpos) {
1790 if (skb_headroom(skb) < padsize)
1791 return -ENOMEM;
1792
1793 skb_push(skb, padsize);
1794 memmove(skb->data, skb->data + padsize, padpos);
1795 }
1796
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001797 setup_frame_info(hw, skb, frmlen);
1798
1799 /*
1800 * At this point, the vif, hw_key and sta pointers in the tx control
1801 * info are no longer valid (overwritten by the ath_frame_info data.
1802 */
1803
1804 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001805 if (unlikely(!bf))
1806 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001807
Felix Fietkau066dae92010-11-07 14:59:39 +01001808 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001809 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001810 if (txq == sc->tx.txq_map[q] &&
1811 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1812 ath_mac80211_stop_queue(sc, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001813 txq->stopped = 1;
1814 }
1815 spin_unlock_bh(&txq->axq_lock);
1816
Sujithe8324352009-01-16 21:38:42 +05301817 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001818
1819 return 0;
1820}
1821
Sujithe8324352009-01-16 21:38:42 +05301822/*****************/
1823/* TX Completion */
1824/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001825
Sujithe8324352009-01-16 21:38:42 +05301826static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau61117f02010-11-11 03:18:36 +01001827 struct ath_wiphy *aphy, int tx_flags, int ftype,
Felix Fietkau066dae92010-11-07 14:59:39 +01001828 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001829{
Sujithe8324352009-01-16 21:38:42 +05301830 struct ieee80211_hw *hw = sc->hw;
1831 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001832 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001833 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001834 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301835
Joe Perches226afe62010-12-02 19:12:37 -08001836 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301837
Felix Fietkau827e69b2009-11-15 23:09:25 +01001838 if (aphy)
1839 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301840
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301841 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301842 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301843
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301844 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301845 /* Frame was ACKed */
1846 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1847 }
1848
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001849 padpos = ath9k_cmn_padpos(hdr->frame_control);
1850 padsize = padpos & 3;
1851 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301852 /*
1853 * Remove MAC header padding before giving the frame back to
1854 * mac80211.
1855 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001856 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301857 skb_pull(skb, padsize);
1858 }
1859
Sujith1b04b932010-01-08 10:36:05 +05301860 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1861 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001862 ath_dbg(common, ATH_DBG_PS,
1863 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301864 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1865 PS_WAIT_FOR_CAB |
1866 PS_WAIT_FOR_PSPOLL_DATA |
1867 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001868 }
1869
Felix Fietkau61117f02010-11-11 03:18:36 +01001870 if (unlikely(ftype))
1871 ath9k_tx_status(hw, skb, ftype);
Felix Fietkau97923b12010-06-12 00:33:55 -04001872 else {
1873 q = skb_get_queue_mapping(skb);
Felix Fietkau066dae92010-11-07 14:59:39 +01001874 if (txq == sc->tx.txq_map[q]) {
1875 spin_lock_bh(&txq->axq_lock);
1876 if (WARN_ON(--txq->pending_frames < 0))
1877 txq->pending_frames = 0;
1878 spin_unlock_bh(&txq->axq_lock);
1879 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001880
Felix Fietkau827e69b2009-11-15 23:09:25 +01001881 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001882 }
Sujithe8324352009-01-16 21:38:42 +05301883}
1884
1885static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001886 struct ath_txq *txq, struct list_head *bf_q,
1887 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301888{
1889 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301890 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301891 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301892
Sujithe8324352009-01-16 21:38:42 +05301893 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301894 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301895
1896 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301897 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301898
1899 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301900 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301901 }
1902
Ben Greearc1739eb32010-10-14 12:45:29 -07001903 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001904 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001905
1906 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001907 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001908 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001909 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001910 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001911 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001912 ath_debug_stat_tx(sc, bf, ts);
Felix Fietkau61117f02010-11-11 03:18:36 +01001913 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1914 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001915 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001916 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1917 * accidentally reference it later.
1918 */
1919 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301920
1921 /*
1922 * Return the list of ath_buf of this mpdu to free queue
1923 */
1924 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1925 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1926 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1927}
1928
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001929static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +01001930 int nframes, int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301931{
Sujitha22be222009-03-30 15:28:36 +05301932 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301933 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301934 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001935 struct ieee80211_hw *hw = bf->aphy->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001936 struct ath_softc *sc = bf->aphy->sc;
1937 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301938 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301939
Sujith95e4acb2009-03-13 08:56:09 +05301940 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001941 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301942
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001943 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301944 WARN_ON(tx_rateindex >= hw->max_rates);
1945
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001946 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301947 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001948 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001949 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301950
Felix Fietkaub572d032010-11-14 15:20:07 +01001951 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001952
Felix Fietkaub572d032010-11-14 15:20:07 +01001953 tx_info->status.ampdu_len = nframes;
1954 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001955 }
1956
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001957 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301958 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001959 /*
1960 * If an underrun error is seen assume it as an excessive
1961 * retry only if max frame trigger level has been reached
1962 * (2 KB for single stream, and 4 KB for dual stream).
1963 * Adjust the long retry as if the frame was tried
1964 * hw->max_rate_tries times to affect how rate control updates
1965 * PER for the failed rate.
1966 * In case of congestion on the bus penalizing this type of
1967 * underruns should help hardware actually transmit new frames
1968 * successfully by eventually preferring slower rates.
1969 * This itself should also alleviate congestion on the bus.
1970 */
1971 if (ieee80211_is_data(hdr->frame_control) &&
1972 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1973 ATH9K_TX_DELIM_UNDERRUN)) &&
1974 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1975 tx_info->status.rates[tx_rateindex].count =
1976 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301977 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301978
Felix Fietkau545750d2009-11-23 22:21:01 +01001979 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301980 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001981 tx_info->status.rates[i].idx = -1;
1982 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301983
Felix Fietkau78c46532010-06-25 01:26:16 +02001984 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301985}
1986
Ben Greear60f2d1d2011-01-09 23:11:52 -08001987/* Has no locking. Must hold spin_lock_bh(&txq->axq_lock)
1988 * before calling this.
1989 */
1990static void __ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
Sujith059d8062009-01-16 21:38:49 +05301991{
Ben Greear60f2d1d2011-01-09 23:11:52 -08001992 if (txq->mac80211_qnum >= 0 &&
1993 txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1994 if (ath_mac80211_start_queue(sc, txq->mac80211_qnum))
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07001995 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05301996 }
Sujith059d8062009-01-16 21:38:49 +05301997}
1998
Sujithc4288392008-11-18 09:09:30 +05301999static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002000{
Sujithcbe61d82009-02-09 13:27:12 +05302001 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002002 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002003 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2004 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302005 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002006 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302007 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002008 int status;
Felix Fietkau066dae92010-11-07 14:59:39 +01002009 int qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002010
Joe Perches226afe62010-12-02 19:12:37 -08002011 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2012 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2013 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002014
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002015 for (;;) {
2016 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002017 if (list_empty(&txq->axq_q)) {
2018 txq->axq_link = NULL;
Ben Greear082f6532011-01-09 23:11:47 -08002019 if (sc->sc_flags & SC_OP_TXAGGR)
2020 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002021 spin_unlock_bh(&txq->axq_lock);
2022 break;
2023 }
2024 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2025
2026 /*
2027 * There is a race condition that a BH gets scheduled
2028 * after sw writes TxE and before hw re-load the last
2029 * descriptor to get the newly chained one.
2030 * Software must keep the last DONE descriptor as a
2031 * holding descriptor - software does so by marking
2032 * it with the STALE flag.
2033 */
2034 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302035 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002036 bf_held = bf;
2037 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302038 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002039 break;
2040 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002041 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302042 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043 }
2044 }
2045
2046 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302047 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048
Felix Fietkau29bffa92010-03-29 20:14:23 -07002049 memset(&ts, 0, sizeof(ts));
2050 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002051 if (status == -EINPROGRESS) {
2052 spin_unlock_bh(&txq->axq_lock);
2053 break;
2054 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002055 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002056
2057 /*
2058 * Remove ath_buf's of the same transmit unit from txq,
2059 * however leave the last descriptor back as the holding
2060 * descriptor for hw.
2061 */
Sujitha119cc42009-03-30 15:28:38 +05302062 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002063 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002064 if (!list_is_singular(&lastbf->list))
2065 list_cut_position(&bf_head,
2066 &txq->axq_q, lastbf->list.prev);
2067
2068 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002069 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002070 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002071 if (bf_held)
2072 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002073
2074 if (bf_is_ampdu_not_probing(bf))
2075 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 spin_unlock_bh(&txq->axq_lock);
2077
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002078 if (bf_held)
2079 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002080
Sujithcd3d39a2008-08-11 14:03:34 +05302081 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082 /*
2083 * This frame is sent out as a single frame.
2084 * Use hardware retry status for this frame.
2085 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002086 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302087 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002088 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 }
Johannes Berge6a98542008-10-21 12:40:02 +02002090
Felix Fietkau066dae92010-11-07 14:59:39 +01002091 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2092
Sujithcd3d39a2008-08-11 14:03:34 +05302093 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002094 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2095 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002096 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002097 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002098
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002099 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002100 __ath_wake_mac80211_queue(sc, txq);
2101
Sujith672840a2008-08-11 14:05:08 +05302102 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002103 ath_txq_schedule(sc, txq);
2104 spin_unlock_bh(&txq->axq_lock);
2105 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106}
2107
Sujith305fe472009-07-23 15:32:29 +05302108static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002109{
2110 struct ath_softc *sc = container_of(work, struct ath_softc,
2111 tx_complete_work.work);
2112 struct ath_txq *txq;
2113 int i;
2114 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002115#ifdef CONFIG_ATH9K_DEBUGFS
2116 sc->tx_complete_poll_work_seen++;
2117#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002118
2119 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2120 if (ATH_TXQ_SETUP(sc, i)) {
2121 txq = &sc->tx.txq[i];
2122 spin_lock_bh(&txq->axq_lock);
2123 if (txq->axq_depth) {
2124 if (txq->axq_tx_inprogress) {
2125 needreset = true;
2126 spin_unlock_bh(&txq->axq_lock);
2127 break;
2128 } else {
2129 txq->axq_tx_inprogress = true;
2130 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08002131 } else {
2132 /* If the queue has pending buffers, then it
2133 * should be doing tx work (and have axq_depth).
2134 * Shouldn't get to this state I think..but
2135 * we do.
2136 */
2137 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2138 (txq->pending_frames > 0 ||
2139 !list_empty(&txq->axq_acq) ||
2140 txq->stopped)) {
2141 ath_err(ath9k_hw_common(sc->sc_ah),
2142 "txq: %p axq_qnum: %u,"
2143 " mac80211_qnum: %i"
2144 " axq_link: %p"
2145 " pending frames: %i"
2146 " axq_acq empty: %i"
2147 " stopped: %i"
2148 " axq_depth: 0 Attempting to"
2149 " restart tx logic.\n",
2150 txq, txq->axq_qnum,
2151 txq->mac80211_qnum,
2152 txq->axq_link,
2153 txq->pending_frames,
2154 list_empty(&txq->axq_acq),
2155 txq->stopped);
2156 __ath_wake_mac80211_queue(sc, txq);
2157 ath_txq_schedule(sc, txq);
2158 }
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002159 }
2160 spin_unlock_bh(&txq->axq_lock);
2161 }
2162
2163 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002164 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2165 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302166 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002167 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302168 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002169 }
2170
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002171 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002172 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2173}
2174
2175
Sujithe8324352009-01-16 21:38:42 +05302176
2177void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178{
Sujithe8324352009-01-16 21:38:42 +05302179 int i;
2180 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181
Sujithe8324352009-01-16 21:38:42 +05302182 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183
2184 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302185 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2186 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002187 }
2188}
2189
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002190void ath_tx_edma_tasklet(struct ath_softc *sc)
2191{
2192 struct ath_tx_status txs;
2193 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2194 struct ath_hw *ah = sc->sc_ah;
2195 struct ath_txq *txq;
2196 struct ath_buf *bf, *lastbf;
2197 struct list_head bf_head;
2198 int status;
2199 int txok;
Felix Fietkau066dae92010-11-07 14:59:39 +01002200 int qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002201
2202 for (;;) {
2203 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2204 if (status == -EINPROGRESS)
2205 break;
2206 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002207 ath_dbg(common, ATH_DBG_XMIT,
2208 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002209 break;
2210 }
2211
2212 /* Skip beacon completions */
2213 if (txs.qid == sc->beacon.beaconq)
2214 continue;
2215
2216 txq = &sc->tx.txq[txs.qid];
2217
2218 spin_lock_bh(&txq->axq_lock);
2219 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2220 spin_unlock_bh(&txq->axq_lock);
2221 return;
2222 }
2223
2224 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2225 struct ath_buf, list);
2226 lastbf = bf->bf_lastbf;
2227
2228 INIT_LIST_HEAD(&bf_head);
2229 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2230 &lastbf->list);
2231 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2232 txq->axq_depth--;
2233 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002234 if (bf_is_ampdu_not_probing(bf))
2235 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002236 spin_unlock_bh(&txq->axq_lock);
2237
2238 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2239
2240 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002241 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2242 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002243 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002244 }
2245
Felix Fietkau066dae92010-11-07 14:59:39 +01002246 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2247
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002249 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2250 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 else
2252 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2253 &txs, txok, 0);
2254
2255 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002256 __ath_wake_mac80211_queue(sc, txq);
2257
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002258 if (!list_empty(&txq->txq_fifo_pending)) {
2259 INIT_LIST_HEAD(&bf_head);
2260 bf = list_first_entry(&txq->txq_fifo_pending,
2261 struct ath_buf, list);
2262 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2263 &bf->bf_lastbf->list);
2264 ath_tx_txqaddbuf(sc, txq, &bf_head);
2265 } else if (sc->sc_flags & SC_OP_TXAGGR)
2266 ath_txq_schedule(sc, txq);
2267 spin_unlock_bh(&txq->axq_lock);
2268 }
2269}
2270
Sujithe8324352009-01-16 21:38:42 +05302271/*****************/
2272/* Init, Cleanup */
2273/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002274
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002275static int ath_txstatus_setup(struct ath_softc *sc, int size)
2276{
2277 struct ath_descdma *dd = &sc->txsdma;
2278 u8 txs_len = sc->sc_ah->caps.txs_len;
2279
2280 dd->dd_desc_len = size * txs_len;
2281 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2282 &dd->dd_desc_paddr, GFP_KERNEL);
2283 if (!dd->dd_desc)
2284 return -ENOMEM;
2285
2286 return 0;
2287}
2288
2289static int ath_tx_edma_init(struct ath_softc *sc)
2290{
2291 int err;
2292
2293 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2294 if (!err)
2295 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2296 sc->txsdma.dd_desc_paddr,
2297 ATH_TXSTATUS_RING_SIZE);
2298
2299 return err;
2300}
2301
2302static void ath_tx_edma_cleanup(struct ath_softc *sc)
2303{
2304 struct ath_descdma *dd = &sc->txsdma;
2305
2306 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2307 dd->dd_desc_paddr);
2308}
2309
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002310int ath_tx_init(struct ath_softc *sc, int nbufs)
2311{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002312 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002313 int error = 0;
2314
Sujith797fe5cb2009-03-30 15:28:45 +05302315 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316
Sujith797fe5cb2009-03-30 15:28:45 +05302317 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002318 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302319 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002320 ath_err(common,
2321 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302322 goto err;
2323 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002324
Sujith797fe5cb2009-03-30 15:28:45 +05302325 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002326 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302327 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002328 ath_err(common,
2329 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302330 goto err;
2331 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002332
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002333 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2334
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002335 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2336 error = ath_tx_edma_init(sc);
2337 if (error)
2338 goto err;
2339 }
2340
Sujith797fe5cb2009-03-30 15:28:45 +05302341err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342 if (error != 0)
2343 ath_tx_cleanup(sc);
2344
2345 return error;
2346}
2347
Sujith797fe5cb2009-03-30 15:28:45 +05302348void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349{
Sujithb77f4832008-12-07 21:44:03 +05302350 if (sc->beacon.bdma.dd_desc_len != 0)
2351 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352
Sujithb77f4832008-12-07 21:44:03 +05302353 if (sc->tx.txdma.dd_desc_len != 0)
2354 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002355
2356 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2357 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002358}
2359
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002360void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2361{
Sujithc5170162008-10-29 10:13:59 +05302362 struct ath_atx_tid *tid;
2363 struct ath_atx_ac *ac;
2364 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002365
Sujith8ee5afb2008-12-07 21:43:36 +05302366 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302367 tidno < WME_NUM_TID;
2368 tidno++, tid++) {
2369 tid->an = an;
2370 tid->tidno = tidno;
2371 tid->seq_start = tid->seq_next = 0;
2372 tid->baw_size = WME_MAX_BA;
2373 tid->baw_head = tid->baw_tail = 0;
2374 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302375 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302376 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302377 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302378 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302379 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302380 tid->state &= ~AGGR_ADDBA_COMPLETE;
2381 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302382 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383
Sujith8ee5afb2008-12-07 21:43:36 +05302384 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302385 acno < WME_NUM_AC; acno++, ac++) {
2386 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002387 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302388 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389 }
2390}
2391
Sujithb5aa9bf2008-10-29 10:13:31 +05302392void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393{
Felix Fietkau2b409942010-07-07 19:42:08 +02002394 struct ath_atx_ac *ac;
2395 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002396 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002397 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302398
Felix Fietkau2b409942010-07-07 19:42:08 +02002399 for (tidno = 0, tid = &an->tid[tidno];
2400 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401
Felix Fietkau2b409942010-07-07 19:42:08 +02002402 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002403 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002404
Felix Fietkau2b409942010-07-07 19:42:08 +02002405 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002406
Felix Fietkau2b409942010-07-07 19:42:08 +02002407 if (tid->sched) {
2408 list_del(&tid->list);
2409 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002411
2412 if (ac->sched) {
2413 list_del(&ac->list);
2414 tid->ac->sched = false;
2415 }
2416
2417 ath_tid_drain(sc, txq, tid);
2418 tid->state &= ~AGGR_ADDBA_COMPLETE;
2419 tid->state &= ~AGGR_CLEANUP;
2420
2421 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002422 }
2423}