blob: a83f2c54508ce1d8ffe520146112d5774a11cbca [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Felix Fietkau82b873a2010-11-11 03:18:37 +010051static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010053 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkaudb1a0522010-03-29 20:07:11 -070060static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +010061 int nframes, int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
Felix Fietkau827e69b2009-11-15 23:09:25 +0100300 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530304 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530305
306 return tbf;
307}
308
Felix Fietkaub572d032010-11-14 15:20:07 +0100309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
Sujithd43f30152009-01-16 21:38:53 +0530341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100343 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530344{
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530349 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800350 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530351 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530353 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530355 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
357 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200358 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100359 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200360 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100361 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100367 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 memcpy(rates, tx_info->control.rates, sizeof(rates));
370
Sujith1286ec62009-01-27 13:30:37 +0530371 rcu_read_lock();
372
Ben Greear686b9cb2010-09-23 09:44:36 -0700373 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530374 if (!sta) {
375 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200376
Felix Fietkau31e79a52010-07-12 23:16:34 +0200377 INIT_LIST_HEAD(&bf_head);
378 while (bf) {
379 bf_next = bf->bf_next;
380
381 bf->bf_state.bf_type |= BUF_XRETRY;
382 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
383 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head);
385
Felix Fietkaub572d032010-11-14 15:20:07 +0100386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
Sujith1286ec62009-01-27 13:30:37 +0530392 return;
Sujithe8324352009-01-16 21:38:42 +0530393 }
394
Sujith1286ec62009-01-27 13:30:37 +0530395 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530398
Felix Fietkaub11b1602010-07-11 12:48:44 +0200399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200405 txok = false;
406
Sujithe8324352009-01-16 21:38:42 +0530407 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530409
Sujithd43f30152009-01-16 21:38:53 +0530410 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530414 } else {
Sujithd43f30152009-01-16 21:38:53 +0530415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
Sujith2660b812009-02-09 13:27:26 +0530422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530423 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530424 }
425 }
426
427 INIT_LIST_HEAD(&bf_pending);
428 INIT_LIST_HEAD(&bf_head);
429
Felix Fietkaub572d032010-11-14 15:20:07 +0100430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530431 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100432 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530433 bf_next = bf->bf_next;
434
Felix Fietkau78c46532010-06-25 01:26:16 +0200435 skb = bf->bf_mpdu;
436 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200438
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530440 /* transmit completion, subframe is
441 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else if (!isaggr && txok) {
444 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530445 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530446 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100447 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 if (fi->retries < ATH_MAX_SW_RETRIES) {
449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530450 txpending = 1;
451 } else {
452 bf->bf_state.bf_type |= BUF_XRETRY;
453 txfail = 1;
454 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 }
457 } else {
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
463 }
464 }
465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700477 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530478 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530479 }
480
Felix Fietkau90fa5392010-09-20 13:45:38 +0200481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100487 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530488 spin_unlock_bh(&txq->axq_lock);
489
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200491 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaub572d032010-11-14 15:20:07 +0100492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 rc_update = false;
494 } else {
Felix Fietkaub572d032010-11-14 15:20:07 +0100495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 }
497
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530500 } else {
Sujithd43f30152009-01-16 21:38:53 +0530501 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400502 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
503 if (bf->bf_next == NULL && bf_last->bf_stale) {
504 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530505
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400506 tbf = ath_clone_txbuf(sc, bf_last);
507 /*
508 * Update tx baw and complete the
509 * frame with failed status if we
510 * run out of tx buf.
511 */
512 if (!tbf) {
513 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100514 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400516
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 bf->bf_state.bf_type |=
518 BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +0100519 ath_tx_rc_status(bf, ts, nframes,
520 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400521 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head,
523 ts, 0, 0);
524 break;
525 }
526
527 ath9k_hw_cleartxdesc(sc->sc_ah,
528 tbf->bf_desc);
529 list_add_tail(&tbf->list, &bf_head);
530 } else {
531 /*
532 * Clear descriptor status words for
533 * software retry
534 */
535 ath9k_hw_cleartxdesc(sc->sc_ah,
536 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400537 }
Sujithe8324352009-01-16 21:38:42 +0530538 }
539
540 /*
541 * Put this buffer to the temporary pending
542 * queue to retain ordering
543 */
544 list_splice_tail_init(&bf_head, &bf_pending);
545 }
546
547 bf = bf_next;
548 }
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 /* prepend un-acked frames to the beginning of the pending frame queue */
551 if (!list_empty(&bf_pending)) {
552 spin_lock_bh(&txq->axq_lock);
553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Sujithe8324352009-01-16 21:38:42 +0530569 if (needreset)
570 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
Felix Fietkau2ed72222011-01-10 17:05:49 -0700861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
Felix Fietkau231c3a12010-09-20 19:35:28 +0200864 return 0;
Sujithe8324352009-01-16 21:38:42 +0530865}
866
Sujithf83da962009-07-23 15:32:37 +0530867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100871 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530874 return;
Sujithe8324352009-01-16 21:38:42 +0530875
876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530878 return;
Sujithe8324352009-01-16 21:38:42 +0530879 }
880
Sujithe8324352009-01-16 21:38:42 +0530881 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200882 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200883
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
891 txtid->state |= AGGR_CLEANUP;
892 else
893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530894 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau90fa5392010-09-20 13:45:38 +0200896 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530897}
898
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
914}
915
Sujithe8324352009-01-16 21:38:42 +0530916/********************/
917/* Queue Management */
918/********************/
919
Sujithe8324352009-01-16 21:38:42 +0530920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
922{
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
925
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
934 }
935}
936
937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938{
Sujithcbe61d82009-02-09 13:27:12 +0530939 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530941 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400948 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530949
950 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
956
957 /*
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
971 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
Sujithe8324352009-01-16 21:38:42 +0530982 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (qnum == -1) {
984 /*
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
987 */
988 return NULL;
989 }
990 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800991 ath_err(common, "qnum %u out of range, max %zu!\n",
992 qnum, ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530993 ath9k_hw_releasetxqueue(ah, qnum);
994 return NULL;
995 }
996 if (!ATH_TXQ_SETUP(sc, qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[qnum];
998
999 txq->axq_qnum = qnum;
1000 txq->axq_link = NULL;
1001 INIT_LIST_HEAD(&txq->axq_q);
1002 INIT_LIST_HEAD(&txq->axq_acq);
1003 spin_lock_init(&txq->axq_lock);
1004 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001005 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001006 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +05301007 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001008
1009 txq->txq_headidx = txq->txq_tailidx = 0;
1010 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1011 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1012 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301013 }
1014 return &sc->tx.txq[qnum];
1015}
1016
Sujithe8324352009-01-16 21:38:42 +05301017int ath_txq_update(struct ath_softc *sc, int qnum,
1018 struct ath9k_tx_queue_info *qinfo)
1019{
Sujithcbe61d82009-02-09 13:27:12 +05301020 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301021 int error = 0;
1022 struct ath9k_tx_queue_info qi;
1023
1024 if (qnum == sc->beacon.beaconq) {
1025 /*
1026 * XXX: for beacon queue, we just save the parameter.
1027 * It will be picked up by ath_beaconq_config when
1028 * it's necessary.
1029 */
1030 sc->beacon.beacon_qi = *qinfo;
1031 return 0;
1032 }
1033
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001034 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301035
1036 ath9k_hw_get_txq_props(ah, qnum, &qi);
1037 qi.tqi_aifs = qinfo->tqi_aifs;
1038 qi.tqi_cwmin = qinfo->tqi_cwmin;
1039 qi.tqi_cwmax = qinfo->tqi_cwmax;
1040 qi.tqi_burstTime = qinfo->tqi_burstTime;
1041 qi.tqi_readyTime = qinfo->tqi_readyTime;
1042
1043 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001044 ath_err(ath9k_hw_common(sc->sc_ah),
1045 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301046 error = -EIO;
1047 } else {
1048 ath9k_hw_resettxqueue(ah, qnum);
1049 }
1050
1051 return error;
1052}
1053
1054int ath_cabq_update(struct ath_softc *sc)
1055{
1056 struct ath9k_tx_queue_info qi;
1057 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301058
1059 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1060 /*
1061 * Ensure the readytime % is within the bounds.
1062 */
Sujith17d79042009-02-09 13:27:03 +05301063 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1064 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1065 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1066 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301067
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001068 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301069 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301070 ath_txq_update(sc, qnum, &qi);
1071
1072 return 0;
1073}
1074
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001075static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1076{
1077 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1078 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1079}
1080
Sujith043a0402009-01-16 21:38:47 +05301081/*
1082 * Drain a given TX queue (could be Beacon or Data)
1083 *
1084 * This assumes output has been stopped and
1085 * we do not need to block ath_tx_tasklet.
1086 */
1087void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301088{
1089 struct ath_buf *bf, *lastbf;
1090 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001091 struct ath_tx_status ts;
1092
1093 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301094 INIT_LIST_HEAD(&bf_head);
1095
Sujithe8324352009-01-16 21:38:42 +05301096 for (;;) {
1097 spin_lock_bh(&txq->axq_lock);
1098
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001099 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1100 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1101 txq->txq_headidx = txq->txq_tailidx = 0;
1102 spin_unlock_bh(&txq->axq_lock);
1103 break;
1104 } else {
1105 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1106 struct ath_buf, list);
1107 }
1108 } else {
1109 if (list_empty(&txq->axq_q)) {
1110 txq->axq_link = NULL;
1111 spin_unlock_bh(&txq->axq_lock);
1112 break;
1113 }
1114 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1115 list);
Sujithe8324352009-01-16 21:38:42 +05301116
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001117 if (bf->bf_stale) {
1118 list_del(&bf->list);
1119 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301120
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001121 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001122 continue;
1123 }
Sujithe8324352009-01-16 21:38:42 +05301124 }
1125
1126 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301127
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001128 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1129 list_cut_position(&bf_head,
1130 &txq->txq_fifo[txq->txq_tailidx],
1131 &lastbf->list);
1132 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1133 } else {
1134 /* remove ath_buf's of the same mpdu from txq */
1135 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1136 }
1137
Sujithe8324352009-01-16 21:38:42 +05301138 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001139 if (bf_is_ampdu_not_probing(bf))
1140 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301141 spin_unlock_bh(&txq->axq_lock);
1142
1143 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001144 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1145 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301146 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001147 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301148 }
1149
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001150 spin_lock_bh(&txq->axq_lock);
1151 txq->axq_tx_inprogress = false;
1152 spin_unlock_bh(&txq->axq_lock);
1153
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001154 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1155 spin_lock_bh(&txq->axq_lock);
1156 while (!list_empty(&txq->txq_fifo_pending)) {
1157 bf = list_first_entry(&txq->txq_fifo_pending,
1158 struct ath_buf, list);
1159 list_cut_position(&bf_head,
1160 &txq->txq_fifo_pending,
1161 &bf->bf_lastbf->list);
1162 spin_unlock_bh(&txq->axq_lock);
1163
1164 if (bf_isampdu(bf))
1165 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001166 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001167 else
1168 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1169 &ts, 0, 0);
1170 spin_lock_bh(&txq->axq_lock);
1171 }
1172 spin_unlock_bh(&txq->axq_lock);
1173 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001174
1175 /* flush any pending frames if aggregation is enabled */
1176 if (sc->sc_flags & SC_OP_TXAGGR) {
1177 if (!retry_tx) {
1178 spin_lock_bh(&txq->axq_lock);
1179 ath_txq_drain_pending_buffers(sc, txq);
1180 spin_unlock_bh(&txq->axq_lock);
1181 }
1182 }
Sujithe8324352009-01-16 21:38:42 +05301183}
1184
Felix Fietkau080e1a22010-12-05 20:17:53 +01001185bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301186{
Sujithcbe61d82009-02-09 13:27:12 +05301187 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001188 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301189 struct ath_txq *txq;
1190 int i, npend = 0;
1191
1192 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001193 return true;
Sujith043a0402009-01-16 21:38:47 +05301194
1195 /* Stop beacon queue */
1196 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1197
1198 /* Stop data queues */
1199 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1200 if (ATH_TXQ_SETUP(sc, i)) {
1201 txq = &sc->tx.txq[i];
1202 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1203 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1204 }
1205 }
1206
Felix Fietkau080e1a22010-12-05 20:17:53 +01001207 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001208 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301209
1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1211 if (ATH_TXQ_SETUP(sc, i))
1212 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1213 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001214
1215 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301216}
1217
Sujithe8324352009-01-16 21:38:42 +05301218void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1219{
1220 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1221 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1222}
1223
Sujithe8324352009-01-16 21:38:42 +05301224void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1225{
1226 struct ath_atx_ac *ac;
Felix Fietkau21f28e62011-01-15 14:30:14 +01001227 struct ath_atx_tid *tid, *last;
Sujithe8324352009-01-16 21:38:42 +05301228
Felix Fietkau21f28e62011-01-15 14:30:14 +01001229 if (list_empty(&txq->axq_acq) ||
1230 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301231 return;
1232
1233 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Felix Fietkau21f28e62011-01-15 14:30:14 +01001234 last = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
Sujithe8324352009-01-16 21:38:42 +05301235 list_del(&ac->list);
1236 ac->sched = false;
1237
1238 do {
1239 if (list_empty(&ac->tid_q))
1240 return;
1241
1242 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1243 list_del(&tid->list);
1244 tid->sched = false;
1245
1246 if (tid->paused)
1247 continue;
1248
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001249 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301250
1251 /*
1252 * add tid to round-robin queue if more frames
1253 * are pending for the tid
1254 */
1255 if (!list_empty(&tid->buf_q))
1256 ath_tx_queue_tid(txq, tid);
1257
Felix Fietkau21f28e62011-01-15 14:30:14 +01001258 if (tid == last || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1259 break;
Sujithe8324352009-01-16 21:38:42 +05301260 } while (!list_empty(&ac->tid_q));
1261
1262 if (!list_empty(&ac->tid_q)) {
1263 if (!ac->sched) {
1264 ac->sched = true;
1265 list_add_tail(&ac->list, &txq->axq_acq);
1266 }
1267 }
1268}
1269
Sujithe8324352009-01-16 21:38:42 +05301270/***********/
1271/* TX, DMA */
1272/***********/
1273
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001274/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001275 * Insert a chain of ath_buf (descriptors) on a txq and
1276 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001277 */
Sujith102e0572008-10-29 10:15:16 +05301278static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1279 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001280{
Sujithcbe61d82009-02-09 13:27:12 +05301281 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001282 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001283 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301284
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001285 /*
1286 * Insert the frame on the outbound list and
1287 * pass it on to the hardware.
1288 */
1289
1290 if (list_empty(head))
1291 return;
1292
1293 bf = list_first_entry(head, struct ath_buf, list);
1294
Joe Perches226afe62010-12-02 19:12:37 -08001295 ath_dbg(common, ATH_DBG_QUEUE,
1296 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001297
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001298 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1299 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1300 list_splice_tail_init(head, &txq->txq_fifo_pending);
1301 return;
1302 }
1303 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001304 ath_dbg(common, ATH_DBG_XMIT,
1305 "Initializing tx fifo %d which is non-empty\n",
1306 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001307 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1308 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1309 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001310 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001311 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1312 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001313 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001314 list_splice_tail_init(head, &txq->axq_q);
1315
1316 if (txq->axq_link == NULL) {
1317 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001318 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1319 txq->axq_qnum, ito64(bf->bf_daddr),
1320 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001321 } else {
1322 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001323 ath_dbg(common, ATH_DBG_XMIT,
1324 "link[%u] (%p)=%llx (%p)\n",
1325 txq->axq_qnum, txq->axq_link,
1326 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001327 }
1328 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1329 &txq->axq_link);
1330 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001331 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001332 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001333 if (bf_is_ampdu_not_probing(bf))
1334 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001335}
1336
Sujithe8324352009-01-16 21:38:42 +05301337static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001338 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301339{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001340 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001341 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301342
Sujithe8324352009-01-16 21:38:42 +05301343 bf->bf_state.bf_type |= BUF_AMPDU;
1344
1345 /*
1346 * Do not queue to h/w when any of the following conditions is true:
1347 * - there are pending frames in software queue
1348 * - the TID is currently paused for ADDBA/BAR request
1349 * - seqno is not within block-ack window
1350 * - h/w queue depth exceeds low water mark
1351 */
1352 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001353 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001354 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001355 /*
Sujithe8324352009-01-16 21:38:42 +05301356 * Add this frame to software queue for scheduling later
1357 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001358 */
Ben Greearbda8add2011-01-09 23:11:48 -08001359 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001360 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301361 ath_tx_queue_tid(txctl->txq, tid);
1362 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001363 }
1364
Felix Fietkau04caf862010-11-14 15:20:12 +01001365 INIT_LIST_HEAD(&bf_head);
1366 list_add(&bf->list, &bf_head);
1367
Sujithe8324352009-01-16 21:38:42 +05301368 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001369 if (!fi->retries)
1370 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301371
1372 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001373 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301374 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001375 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001376 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301377}
1378
Felix Fietkau82b873a2010-11-11 03:18:37 +01001379static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1380 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001381 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001382{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001383 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301384 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001385
Sujithe8324352009-01-16 21:38:42 +05301386 bf = list_first_entry(bf_head, struct ath_buf, list);
1387 bf->bf_state.bf_type &= ~BUF_AMPDU;
1388
1389 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001390 if (tid)
1391 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301392
Sujithd43f30152009-01-16 21:38:53 +05301393 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001394 fi = get_frame_info(bf->bf_mpdu);
1395 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301396 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301397 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001398}
1399
Sujith528f0c62008-10-29 10:14:26 +05301400static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001401{
Sujith528f0c62008-10-29 10:14:26 +05301402 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001403 enum ath9k_pkt_type htype;
1404 __le16 fc;
1405
Sujith528f0c62008-10-29 10:14:26 +05301406 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001407 fc = hdr->frame_control;
1408
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001409 if (ieee80211_is_beacon(fc))
1410 htype = ATH9K_PKT_TYPE_BEACON;
1411 else if (ieee80211_is_probe_resp(fc))
1412 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1413 else if (ieee80211_is_atim(fc))
1414 htype = ATH9K_PKT_TYPE_ATIM;
1415 else if (ieee80211_is_pspoll(fc))
1416 htype = ATH9K_PKT_TYPE_PSPOLL;
1417 else
1418 htype = ATH9K_PKT_TYPE_NORMAL;
1419
1420 return htype;
1421}
1422
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001423static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1424 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301425{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001426 struct ath_wiphy *aphy = hw->priv;
1427 struct ath_softc *sc = aphy->sc;
Sujith528f0c62008-10-29 10:14:26 +05301428 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001429 struct ieee80211_sta *sta = tx_info->control.sta;
1430 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301431 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001432 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301433 struct ath_node *an;
1434 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001435 enum ath9k_key_type keytype;
1436 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001437 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301438
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001439 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301440
Sujith528f0c62008-10-29 10:14:26 +05301441 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001442 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1443 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001444
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001445 an = (struct ath_node *) sta->drv_priv;
1446 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1447
1448 /*
1449 * Override seqno set by upper layer with the one
1450 * in tx aggregation state.
1451 */
1452 tid = ATH_AN_2_TID(an, tidno);
1453 seqno = tid->seq_next;
1454 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1455 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1456 }
1457
1458 memset(fi, 0, sizeof(*fi));
1459 if (hw_key)
1460 fi->keyix = hw_key->hw_key_idx;
1461 else
1462 fi->keyix = ATH9K_TXKEYIX_INVALID;
1463 fi->keytype = keytype;
1464 fi->framelen = framelen;
1465 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301466}
1467
Felix Fietkau82b873a2010-11-11 03:18:37 +01001468static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301469{
1470 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1471 int flags = 0;
1472
1473 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1474 flags |= ATH9K_TXDESC_INTREQ;
1475
1476 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1477 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301478
Felix Fietkau82b873a2010-11-11 03:18:37 +01001479 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001480 flags |= ATH9K_TXDESC_LDPC;
1481
Sujith528f0c62008-10-29 10:14:26 +05301482 return flags;
1483}
1484
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001485/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001486 * rix - rate index
1487 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1488 * width - 0 for 20 MHz, 1 for 40 MHz
1489 * half_gi - to use 4us v/s 3.6 us for symbol time
1490 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001491static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301492 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001493{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001494 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001495 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301496
1497 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001498 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001499 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001500 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001501 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1502
1503 if (!half_gi)
1504 duration = SYMBOL_TIME(nsymbols);
1505 else
1506 duration = SYMBOL_TIME_HALFGI(nsymbols);
1507
Sujithe63835b2008-11-18 09:07:53 +05301508 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001509 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301510
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001511 return duration;
1512}
1513
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301514u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1515{
1516 struct ath_hw *ah = sc->sc_ah;
1517 struct ath9k_channel *curchan = ah->curchan;
1518 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1519 (curchan->channelFlags & CHANNEL_5GHZ) &&
1520 (chainmask == 0x7) && (rate < 0x90))
1521 return 0x3;
1522 else
1523 return chainmask;
1524}
1525
Felix Fietkau269c44b2010-11-14 15:20:06 +01001526static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001527{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001528 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001529 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301530 struct sk_buff *skb;
1531 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301532 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001533 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301534 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301535 int i, flags = 0;
1536 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301537 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301538
1539 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301540
Sujitha22be222009-03-30 15:28:36 +05301541 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301542 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301543 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301544 hdr = (struct ieee80211_hdr *)skb->data;
1545 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301546
Sujithc89424d2009-01-30 14:29:28 +05301547 /*
1548 * We check if Short Preamble is needed for the CTS rate by
1549 * checking the BSS's global flag.
1550 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1551 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001552 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1553 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301554 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001555 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001556
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001557 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001558 bool is_40, is_sgi, is_sp;
1559 int phy;
1560
Sujithe63835b2008-11-18 09:07:53 +05301561 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001562 continue;
1563
Sujitha8efee42008-11-18 09:07:30 +05301564 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301565 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001566
Felix Fietkau27032052010-01-17 21:08:50 +01001567 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1568 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301569 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001570 flags |= ATH9K_TXDESC_RTSENA;
1571 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1572 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1573 flags |= ATH9K_TXDESC_CTSENA;
1574 }
1575
Sujithc89424d2009-01-30 14:29:28 +05301576 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1577 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1578 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1579 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580
Felix Fietkau545750d2009-11-23 22:21:01 +01001581 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1582 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1583 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1584
1585 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1586 /* MCS rates */
1587 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301588 series[i].ChSel = ath_txchainmask_reduction(sc,
1589 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001590 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001591 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001592 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1593 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001594 continue;
1595 }
1596
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301597 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001598 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1599 !(rate->flags & IEEE80211_RATE_ERP_G))
1600 phy = WLAN_RC_PHY_CCK;
1601 else
1602 phy = WLAN_RC_PHY_OFDM;
1603
1604 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1605 series[i].Rate = rate->hw_value;
1606 if (rate->hw_value_short) {
1607 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1608 series[i].Rate |= rate->hw_value_short;
1609 } else {
1610 is_sp = false;
1611 }
1612
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301613 if (bf->bf_state.bfs_paprd)
1614 series[i].ChSel = common->tx_chainmask;
1615 else
1616 series[i].ChSel = ath_txchainmask_reduction(sc,
1617 common->tx_chainmask, series[i].Rate);
1618
Felix Fietkau545750d2009-11-23 22:21:01 +01001619 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001620 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001621 }
1622
Felix Fietkau27032052010-01-17 21:08:50 +01001623 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001624 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001625 flags &= ~ATH9K_TXDESC_RTSENA;
1626
1627 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1628 if (flags & ATH9K_TXDESC_RTSENA)
1629 flags &= ~ATH9K_TXDESC_CTSENA;
1630
Sujithe63835b2008-11-18 09:07:53 +05301631 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301632 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1633 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301634 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301635 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301636
Sujith17d79042009-02-09 13:27:03 +05301637 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301638 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001639}
1640
Felix Fietkau82b873a2010-11-11 03:18:37 +01001641static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001642 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001643 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301644{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001645 struct ath_wiphy *aphy = hw->priv;
1646 struct ath_softc *sc = aphy->sc;
Felix Fietkau04caf862010-11-14 15:20:12 +01001647 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001648 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001649 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001650 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001651 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001652 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001653
1654 bf = ath_tx_get_buffer(sc);
1655 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001656 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001657 return NULL;
1658 }
Sujithe8324352009-01-16 21:38:42 +05301659
Sujithe8324352009-01-16 21:38:42 +05301660 ATH_TXBUF_RESET(bf);
1661
Felix Fietkau827e69b2009-11-15 23:09:25 +01001662 bf->aphy = aphy;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001663 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301664 bf->bf_mpdu = skb;
1665
Ben Greearc1739eb32010-10-14 12:45:29 -07001666 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1667 skb->len, DMA_TO_DEVICE);
1668 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301669 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001670 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001671 ath_err(ath9k_hw_common(sc->sc_ah),
1672 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001673 ath_tx_return_buffer(sc, bf);
1674 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301675 }
1676
Sujithe8324352009-01-16 21:38:42 +05301677 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301678
1679 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001680 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301681
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001682 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1683 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301684
1685 ath9k_hw_filltxdesc(ah, ds,
1686 skb->len, /* segment length */
1687 true, /* first segment */
1688 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001689 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001690 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001691 txq->axq_qnum);
1692
1693
1694 return bf;
1695}
1696
1697/* FIXME: tx power */
1698static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1699 struct ath_tx_control *txctl)
1700{
1701 struct sk_buff *skb = bf->bf_mpdu;
1702 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1703 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001704 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001705 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001706 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301707
Sujithe8324352009-01-16 21:38:42 +05301708 spin_lock_bh(&txctl->txq->axq_lock);
1709
Felix Fietkau248a38d2010-12-10 21:16:46 +01001710 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001711 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1712 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001713 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001714
Felix Fietkau066dae92010-11-07 14:59:39 +01001715 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001716 }
1717
1718 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001719 /*
1720 * Try aggregation if it's a unicast data frame
1721 * and the destination is HT capable.
1722 */
1723 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301724 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001725 INIT_LIST_HEAD(&bf_head);
1726 list_add_tail(&bf->list, &bf_head);
1727
Felix Fietkau61117f02010-11-11 03:18:36 +01001728 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001729 bf->bf_state.bfs_paprd = txctl->paprd;
1730
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001731 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001732 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1733 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001734
Felix Fietkau248a38d2010-12-10 21:16:46 +01001735 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301736 }
1737
1738 spin_unlock_bh(&txctl->txq->axq_lock);
1739}
1740
1741/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001742int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301743 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001744{
Felix Fietkau28d16702010-11-14 15:20:10 +01001745 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1746 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001747 struct ieee80211_sta *sta = info->control.sta;
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001748 struct ath_wiphy *aphy = hw->priv;
1749 struct ath_softc *sc = aphy->sc;
Felix Fietkau84642d62010-06-01 21:33:13 +02001750 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001751 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001752 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001753 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001754 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001755
Ben Greeara9927ba2010-12-06 21:13:49 -08001756 /* NOTE: sta can be NULL according to net/mac80211.h */
1757 if (sta)
1758 txctl->an = (struct ath_node *)sta->drv_priv;
1759
Felix Fietkau04caf862010-11-14 15:20:12 +01001760 if (info->control.hw_key)
1761 frmlen += info->control.hw_key->icv_len;
1762
Felix Fietkau28d16702010-11-14 15:20:10 +01001763 /*
1764 * As a temporary workaround, assign seq# here; this will likely need
1765 * to be cleaned up to work better with Beacon transmission and virtual
1766 * BSSes.
1767 */
1768 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1769 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1770 sc->tx.seq_no += 0x10;
1771 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1772 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1773 }
1774
1775 /* Add the padding after the header if this is not already done */
1776 padpos = ath9k_cmn_padpos(hdr->frame_control);
1777 padsize = padpos & 3;
1778 if (padsize && skb->len > padpos) {
1779 if (skb_headroom(skb) < padsize)
1780 return -ENOMEM;
1781
1782 skb_push(skb, padsize);
1783 memmove(skb->data, skb->data + padsize, padpos);
1784 }
1785
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001786 setup_frame_info(hw, skb, frmlen);
1787
1788 /*
1789 * At this point, the vif, hw_key and sta pointers in the tx control
1790 * info are no longer valid (overwritten by the ath_frame_info data.
1791 */
1792
1793 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001794 if (unlikely(!bf))
1795 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001796
Felix Fietkau066dae92010-11-07 14:59:39 +01001797 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001798 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001799 if (txq == sc->tx.txq_map[q] &&
1800 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1801 ath_mac80211_stop_queue(sc, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001802 txq->stopped = 1;
1803 }
1804 spin_unlock_bh(&txq->axq_lock);
1805
Sujithe8324352009-01-16 21:38:42 +05301806 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001807
1808 return 0;
1809}
1810
Sujithe8324352009-01-16 21:38:42 +05301811/*****************/
1812/* TX Completion */
1813/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001814
Sujithe8324352009-01-16 21:38:42 +05301815static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau61117f02010-11-11 03:18:36 +01001816 struct ath_wiphy *aphy, int tx_flags, int ftype,
Felix Fietkau066dae92010-11-07 14:59:39 +01001817 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001818{
Sujithe8324352009-01-16 21:38:42 +05301819 struct ieee80211_hw *hw = sc->hw;
1820 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001821 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001822 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001823 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301824
Joe Perches226afe62010-12-02 19:12:37 -08001825 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301826
Felix Fietkau827e69b2009-11-15 23:09:25 +01001827 if (aphy)
1828 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301829
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301830 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301831 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301832
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301833 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301834 /* Frame was ACKed */
1835 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1836 }
1837
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001838 padpos = ath9k_cmn_padpos(hdr->frame_control);
1839 padsize = padpos & 3;
1840 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301841 /*
1842 * Remove MAC header padding before giving the frame back to
1843 * mac80211.
1844 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001845 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301846 skb_pull(skb, padsize);
1847 }
1848
Sujith1b04b932010-01-08 10:36:05 +05301849 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1850 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001851 ath_dbg(common, ATH_DBG_PS,
1852 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301853 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1854 PS_WAIT_FOR_CAB |
1855 PS_WAIT_FOR_PSPOLL_DATA |
1856 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001857 }
1858
Felix Fietkau61117f02010-11-11 03:18:36 +01001859 if (unlikely(ftype))
1860 ath9k_tx_status(hw, skb, ftype);
Felix Fietkau97923b12010-06-12 00:33:55 -04001861 else {
1862 q = skb_get_queue_mapping(skb);
Felix Fietkau066dae92010-11-07 14:59:39 +01001863 if (txq == sc->tx.txq_map[q]) {
1864 spin_lock_bh(&txq->axq_lock);
1865 if (WARN_ON(--txq->pending_frames < 0))
1866 txq->pending_frames = 0;
1867 spin_unlock_bh(&txq->axq_lock);
1868 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001869
Felix Fietkau827e69b2009-11-15 23:09:25 +01001870 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001871 }
Sujithe8324352009-01-16 21:38:42 +05301872}
1873
1874static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001875 struct ath_txq *txq, struct list_head *bf_q,
1876 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301877{
1878 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301879 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301880 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301881
Sujithe8324352009-01-16 21:38:42 +05301882 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301883 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301884
1885 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301886 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301887
1888 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301889 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301890 }
1891
Ben Greearc1739eb32010-10-14 12:45:29 -07001892 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001893 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001894
1895 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001896 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001897 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001898 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001899 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001900 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001901 ath_debug_stat_tx(sc, bf, ts);
Felix Fietkau61117f02010-11-11 03:18:36 +01001902 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1903 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001904 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001905 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1906 * accidentally reference it later.
1907 */
1908 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301909
1910 /*
1911 * Return the list of ath_buf of this mpdu to free queue
1912 */
1913 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1914 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1915 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1916}
1917
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001918static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +01001919 int nframes, int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301920{
Sujitha22be222009-03-30 15:28:36 +05301921 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301922 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301923 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001924 struct ieee80211_hw *hw = bf->aphy->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001925 struct ath_softc *sc = bf->aphy->sc;
1926 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301927 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301928
Sujith95e4acb2009-03-13 08:56:09 +05301929 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001930 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301931
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001932 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301933 WARN_ON(tx_rateindex >= hw->max_rates);
1934
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001935 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301936 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001937 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001938 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301939
Felix Fietkaub572d032010-11-14 15:20:07 +01001940 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001941
Felix Fietkaub572d032010-11-14 15:20:07 +01001942 tx_info->status.ampdu_len = nframes;
1943 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001944 }
1945
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001946 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301947 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001948 /*
1949 * If an underrun error is seen assume it as an excessive
1950 * retry only if max frame trigger level has been reached
1951 * (2 KB for single stream, and 4 KB for dual stream).
1952 * Adjust the long retry as if the frame was tried
1953 * hw->max_rate_tries times to affect how rate control updates
1954 * PER for the failed rate.
1955 * In case of congestion on the bus penalizing this type of
1956 * underruns should help hardware actually transmit new frames
1957 * successfully by eventually preferring slower rates.
1958 * This itself should also alleviate congestion on the bus.
1959 */
1960 if (ieee80211_is_data(hdr->frame_control) &&
1961 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1962 ATH9K_TX_DELIM_UNDERRUN)) &&
1963 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1964 tx_info->status.rates[tx_rateindex].count =
1965 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301966 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301967
Felix Fietkau545750d2009-11-23 22:21:01 +01001968 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301969 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001970 tx_info->status.rates[i].idx = -1;
1971 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301972
Felix Fietkau78c46532010-06-25 01:26:16 +02001973 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301974}
1975
Felix Fietkau066dae92010-11-07 14:59:39 +01001976static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
Sujith059d8062009-01-16 21:38:49 +05301977{
Felix Fietkau066dae92010-11-07 14:59:39 +01001978 struct ath_txq *txq;
Sujith059d8062009-01-16 21:38:49 +05301979
Felix Fietkau066dae92010-11-07 14:59:39 +01001980 txq = sc->tx.txq_map[qnum];
Sujith059d8062009-01-16 21:38:49 +05301981 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001982 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07001983 if (ath_mac80211_start_queue(sc, qnum))
1984 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05301985 }
1986 spin_unlock_bh(&txq->axq_lock);
1987}
1988
Sujithc4288392008-11-18 09:09:30 +05301989static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001990{
Sujithcbe61d82009-02-09 13:27:12 +05301991 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001992 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001993 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1994 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301995 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07001996 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05301997 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001998 int status;
Felix Fietkau066dae92010-11-07 14:59:39 +01001999 int qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002000
Joe Perches226afe62010-12-02 19:12:37 -08002001 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2002 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2003 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002004
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002005 for (;;) {
2006 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002007 if (list_empty(&txq->axq_q)) {
2008 txq->axq_link = NULL;
Ben Greear082f6532011-01-09 23:11:47 -08002009 if (sc->sc_flags & SC_OP_TXAGGR)
2010 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002011 spin_unlock_bh(&txq->axq_lock);
2012 break;
2013 }
2014 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2015
2016 /*
2017 * There is a race condition that a BH gets scheduled
2018 * after sw writes TxE and before hw re-load the last
2019 * descriptor to get the newly chained one.
2020 * Software must keep the last DONE descriptor as a
2021 * holding descriptor - software does so by marking
2022 * it with the STALE flag.
2023 */
2024 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302025 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002026 bf_held = bf;
2027 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302028 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002029 break;
2030 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002031 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302032 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002033 }
2034 }
2035
2036 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302037 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002038
Felix Fietkau29bffa92010-03-29 20:14:23 -07002039 memset(&ts, 0, sizeof(ts));
2040 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002041 if (status == -EINPROGRESS) {
2042 spin_unlock_bh(&txq->axq_lock);
2043 break;
2044 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002045 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002046
2047 /*
2048 * Remove ath_buf's of the same transmit unit from txq,
2049 * however leave the last descriptor back as the holding
2050 * descriptor for hw.
2051 */
Sujitha119cc42009-03-30 15:28:38 +05302052 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002053 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002054 if (!list_is_singular(&lastbf->list))
2055 list_cut_position(&bf_head,
2056 &txq->axq_q, lastbf->list.prev);
2057
2058 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002059 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002060 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002061 if (bf_held)
2062 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002063
2064 if (bf_is_ampdu_not_probing(bf))
2065 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002066 spin_unlock_bh(&txq->axq_lock);
2067
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002068 if (bf_held)
2069 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002070
Sujithcd3d39a2008-08-11 14:03:34 +05302071 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072 /*
2073 * This frame is sent out as a single frame.
2074 * Use hardware retry status for this frame.
2075 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002076 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302077 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002078 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002079 }
Johannes Berge6a98542008-10-21 12:40:02 +02002080
Felix Fietkau066dae92010-11-07 14:59:39 +01002081 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2082
Sujithcd3d39a2008-08-11 14:03:34 +05302083 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002084 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2085 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002087 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002088
Felix Fietkau066dae92010-11-07 14:59:39 +01002089 if (txq == sc->tx.txq_map[qnum])
2090 ath_wake_mac80211_queue(sc, qnum);
Sujith059d8062009-01-16 21:38:49 +05302091
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302093 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094 ath_txq_schedule(sc, txq);
2095 spin_unlock_bh(&txq->axq_lock);
2096 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002097}
2098
Sujith305fe472009-07-23 15:32:29 +05302099static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002100{
2101 struct ath_softc *sc = container_of(work, struct ath_softc,
2102 tx_complete_work.work);
2103 struct ath_txq *txq;
2104 int i;
2105 bool needreset = false;
2106
2107 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2108 if (ATH_TXQ_SETUP(sc, i)) {
2109 txq = &sc->tx.txq[i];
2110 spin_lock_bh(&txq->axq_lock);
2111 if (txq->axq_depth) {
2112 if (txq->axq_tx_inprogress) {
2113 needreset = true;
2114 spin_unlock_bh(&txq->axq_lock);
2115 break;
2116 } else {
2117 txq->axq_tx_inprogress = true;
2118 }
2119 }
2120 spin_unlock_bh(&txq->axq_lock);
2121 }
2122
2123 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002124 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2125 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302126 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002127 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302128 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002129 }
2130
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002131 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002132 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2133}
2134
2135
Sujithe8324352009-01-16 21:38:42 +05302136
2137void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138{
Sujithe8324352009-01-16 21:38:42 +05302139 int i;
2140 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141
Sujithe8324352009-01-16 21:38:42 +05302142 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002143
2144 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302145 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2146 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 }
2148}
2149
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002150void ath_tx_edma_tasklet(struct ath_softc *sc)
2151{
2152 struct ath_tx_status txs;
2153 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2154 struct ath_hw *ah = sc->sc_ah;
2155 struct ath_txq *txq;
2156 struct ath_buf *bf, *lastbf;
2157 struct list_head bf_head;
2158 int status;
2159 int txok;
Felix Fietkau066dae92010-11-07 14:59:39 +01002160 int qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002161
2162 for (;;) {
2163 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2164 if (status == -EINPROGRESS)
2165 break;
2166 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002167 ath_dbg(common, ATH_DBG_XMIT,
2168 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002169 break;
2170 }
2171
2172 /* Skip beacon completions */
2173 if (txs.qid == sc->beacon.beaconq)
2174 continue;
2175
2176 txq = &sc->tx.txq[txs.qid];
2177
2178 spin_lock_bh(&txq->axq_lock);
2179 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2180 spin_unlock_bh(&txq->axq_lock);
2181 return;
2182 }
2183
2184 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2185 struct ath_buf, list);
2186 lastbf = bf->bf_lastbf;
2187
2188 INIT_LIST_HEAD(&bf_head);
2189 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2190 &lastbf->list);
2191 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2192 txq->axq_depth--;
2193 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002194 if (bf_is_ampdu_not_probing(bf))
2195 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002196 spin_unlock_bh(&txq->axq_lock);
2197
2198 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2199
2200 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002201 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2202 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002203 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002204 }
2205
Felix Fietkau066dae92010-11-07 14:59:39 +01002206 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2207
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002208 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002209 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2210 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002211 else
2212 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2213 &txs, txok, 0);
2214
Felix Fietkau066dae92010-11-07 14:59:39 +01002215 if (txq == sc->tx.txq_map[qnum])
2216 ath_wake_mac80211_queue(sc, qnum);
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002217
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002218 spin_lock_bh(&txq->axq_lock);
2219 if (!list_empty(&txq->txq_fifo_pending)) {
2220 INIT_LIST_HEAD(&bf_head);
2221 bf = list_first_entry(&txq->txq_fifo_pending,
2222 struct ath_buf, list);
2223 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2224 &bf->bf_lastbf->list);
2225 ath_tx_txqaddbuf(sc, txq, &bf_head);
2226 } else if (sc->sc_flags & SC_OP_TXAGGR)
2227 ath_txq_schedule(sc, txq);
2228 spin_unlock_bh(&txq->axq_lock);
2229 }
2230}
2231
Sujithe8324352009-01-16 21:38:42 +05302232/*****************/
2233/* Init, Cleanup */
2234/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002235
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002236static int ath_txstatus_setup(struct ath_softc *sc, int size)
2237{
2238 struct ath_descdma *dd = &sc->txsdma;
2239 u8 txs_len = sc->sc_ah->caps.txs_len;
2240
2241 dd->dd_desc_len = size * txs_len;
2242 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2243 &dd->dd_desc_paddr, GFP_KERNEL);
2244 if (!dd->dd_desc)
2245 return -ENOMEM;
2246
2247 return 0;
2248}
2249
2250static int ath_tx_edma_init(struct ath_softc *sc)
2251{
2252 int err;
2253
2254 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2255 if (!err)
2256 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2257 sc->txsdma.dd_desc_paddr,
2258 ATH_TXSTATUS_RING_SIZE);
2259
2260 return err;
2261}
2262
2263static void ath_tx_edma_cleanup(struct ath_softc *sc)
2264{
2265 struct ath_descdma *dd = &sc->txsdma;
2266
2267 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2268 dd->dd_desc_paddr);
2269}
2270
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002271int ath_tx_init(struct ath_softc *sc, int nbufs)
2272{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002273 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002274 int error = 0;
2275
Sujith797fe5cb2009-03-30 15:28:45 +05302276 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002277
Sujith797fe5cb2009-03-30 15:28:45 +05302278 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002279 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302280 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002281 ath_err(common,
2282 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302283 goto err;
2284 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002285
Sujith797fe5cb2009-03-30 15:28:45 +05302286 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002287 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302288 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002289 ath_err(common,
2290 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302291 goto err;
2292 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002293
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002294 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2295
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002296 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2297 error = ath_tx_edma_init(sc);
2298 if (error)
2299 goto err;
2300 }
2301
Sujith797fe5cb2009-03-30 15:28:45 +05302302err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303 if (error != 0)
2304 ath_tx_cleanup(sc);
2305
2306 return error;
2307}
2308
Sujith797fe5cb2009-03-30 15:28:45 +05302309void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002310{
Sujithb77f4832008-12-07 21:44:03 +05302311 if (sc->beacon.bdma.dd_desc_len != 0)
2312 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002313
Sujithb77f4832008-12-07 21:44:03 +05302314 if (sc->tx.txdma.dd_desc_len != 0)
2315 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002316
2317 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2318 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002319}
2320
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002321void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2322{
Sujithc5170162008-10-29 10:13:59 +05302323 struct ath_atx_tid *tid;
2324 struct ath_atx_ac *ac;
2325 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002326
Sujith8ee5afb2008-12-07 21:43:36 +05302327 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302328 tidno < WME_NUM_TID;
2329 tidno++, tid++) {
2330 tid->an = an;
2331 tid->tidno = tidno;
2332 tid->seq_start = tid->seq_next = 0;
2333 tid->baw_size = WME_MAX_BA;
2334 tid->baw_head = tid->baw_tail = 0;
2335 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302336 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302337 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302338 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302339 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302340 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302341 tid->state &= ~AGGR_ADDBA_COMPLETE;
2342 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302343 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344
Sujith8ee5afb2008-12-07 21:43:36 +05302345 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302346 acno < WME_NUM_AC; acno++, ac++) {
2347 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002348 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302349 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002350 }
2351}
2352
Sujithb5aa9bf2008-10-29 10:13:31 +05302353void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002354{
Felix Fietkau2b409942010-07-07 19:42:08 +02002355 struct ath_atx_ac *ac;
2356 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002358 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302359
Felix Fietkau2b409942010-07-07 19:42:08 +02002360 for (tidno = 0, tid = &an->tid[tidno];
2361 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362
Felix Fietkau2b409942010-07-07 19:42:08 +02002363 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002364 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002365
Felix Fietkau2b409942010-07-07 19:42:08 +02002366 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002367
Felix Fietkau2b409942010-07-07 19:42:08 +02002368 if (tid->sched) {
2369 list_del(&tid->list);
2370 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002371 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002372
2373 if (ac->sched) {
2374 list_del(&ac->list);
2375 tid->ac->sched = false;
2376 }
2377
2378 ath_tid_drain(sc, txq, tid);
2379 tid->state &= ~AGGR_ADDBA_COMPLETE;
2380 tid->state &= ~AGGR_CLEANUP;
2381
2382 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383 }
2384}