blob: 6ddba4b361fd1690f7abdbac59e2291d8addd401 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Felix Fietkau82b873a2010-11-11 03:18:37 +010051static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010053 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkaudb1a0522010-03-29 20:07:11 -070060static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +010061 int nframes, int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
Felix Fietkau827e69b2009-11-15 23:09:25 +0100300 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530304 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530305
306 return tbf;
307}
308
Felix Fietkaub572d032010-11-14 15:20:07 +0100309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
Sujithd43f30152009-01-16 21:38:53 +0530341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100343 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530344{
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530349 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800350 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530351 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530353 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530355 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
357 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200358 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100359 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200360 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100361 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100367 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 memcpy(rates, tx_info->control.rates, sizeof(rates));
370
Sujith1286ec62009-01-27 13:30:37 +0530371 rcu_read_lock();
372
Ben Greear686b9cb2010-09-23 09:44:36 -0700373 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530374 if (!sta) {
375 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200376
Felix Fietkau31e79a52010-07-12 23:16:34 +0200377 INIT_LIST_HEAD(&bf_head);
378 while (bf) {
379 bf_next = bf->bf_next;
380
381 bf->bf_state.bf_type |= BUF_XRETRY;
382 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
383 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head);
385
Felix Fietkaub572d032010-11-14 15:20:07 +0100386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
Sujith1286ec62009-01-27 13:30:37 +0530392 return;
Sujithe8324352009-01-16 21:38:42 +0530393 }
394
Sujith1286ec62009-01-27 13:30:37 +0530395 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530398
Felix Fietkaub11b1602010-07-11 12:48:44 +0200399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200405 txok = false;
406
Sujithe8324352009-01-16 21:38:42 +0530407 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530409
Sujithd43f30152009-01-16 21:38:53 +0530410 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530414 } else {
Sujithd43f30152009-01-16 21:38:53 +0530415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
Sujith2660b812009-02-09 13:27:26 +0530422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530423 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530424 }
425 }
426
427 INIT_LIST_HEAD(&bf_pending);
428 INIT_LIST_HEAD(&bf_head);
429
Felix Fietkaub572d032010-11-14 15:20:07 +0100430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530431 while (bf) {
432 txfail = txpending = 0;
433 bf_next = bf->bf_next;
434
Felix Fietkau78c46532010-06-25 01:26:16 +0200435 skb = bf->bf_mpdu;
436 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200438
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530440 /* transmit completion, subframe is
441 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else if (!isaggr && txok) {
444 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530445 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530446 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100447 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 if (fi->retries < ATH_MAX_SW_RETRIES) {
449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530450 txpending = 1;
451 } else {
452 bf->bf_state.bf_type |= BUF_XRETRY;
453 txfail = 1;
454 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 }
457 } else {
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
463 }
464 }
465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700477 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530478 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530479 }
480
Felix Fietkau90fa5392010-09-20 13:45:38 +0200481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100487 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530488 spin_unlock_bh(&txq->axq_lock);
489
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200491 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaub572d032010-11-14 15:20:07 +0100492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 rc_update = false;
494 } else {
Felix Fietkaub572d032010-11-14 15:20:07 +0100495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 }
497
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530500 } else {
Sujithd43f30152009-01-16 21:38:53 +0530501 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400502 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
503 if (bf->bf_next == NULL && bf_last->bf_stale) {
504 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530505
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400506 tbf = ath_clone_txbuf(sc, bf_last);
507 /*
508 * Update tx baw and complete the
509 * frame with failed status if we
510 * run out of tx buf.
511 */
512 if (!tbf) {
513 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100514 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400516
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 bf->bf_state.bf_type |=
518 BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +0100519 ath_tx_rc_status(bf, ts, nframes,
520 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400521 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head,
523 ts, 0, 0);
524 break;
525 }
526
527 ath9k_hw_cleartxdesc(sc->sc_ah,
528 tbf->bf_desc);
529 list_add_tail(&tbf->list, &bf_head);
530 } else {
531 /*
532 * Clear descriptor status words for
533 * software retry
534 */
535 ath9k_hw_cleartxdesc(sc->sc_ah,
536 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400537 }
Sujithe8324352009-01-16 21:38:42 +0530538 }
539
540 /*
541 * Put this buffer to the temporary pending
542 * queue to retain ordering
543 */
544 list_splice_tail_init(&bf_head, &bf_pending);
545 }
546
547 bf = bf_next;
548 }
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 /* prepend un-acked frames to the beginning of the pending frame queue */
551 if (!list_empty(&bf_pending)) {
552 spin_lock_bh(&txq->axq_lock);
553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Sujithe8324352009-01-16 21:38:42 +0530569 if (needreset)
570 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
861 return 0;
Sujithe8324352009-01-16 21:38:42 +0530862}
863
Sujithf83da962009-07-23 15:32:37 +0530864void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530865{
866 struct ath_node *an = (struct ath_node *)sta->drv_priv;
867 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100868 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530869
870 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530871 return;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530874 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530875 return;
Sujithe8324352009-01-16 21:38:42 +0530876 }
877
Sujithe8324352009-01-16 21:38:42 +0530878 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200879 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200880
881 /*
882 * If frames are still being transmitted for this TID, they will be
883 * cleaned up during tx completion. To prevent race conditions, this
884 * TID can only be reused after all in-progress subframes have been
885 * completed.
886 */
887 if (txtid->baw_head != txtid->baw_tail)
888 txtid->state |= AGGR_CLEANUP;
889 else
890 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530891 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530892
Felix Fietkau90fa5392010-09-20 13:45:38 +0200893 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530894}
895
896void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
897{
898 struct ath_atx_tid *txtid;
899 struct ath_node *an;
900
901 an = (struct ath_node *)sta->drv_priv;
902
903 if (sc->sc_flags & SC_OP_TXAGGR) {
904 txtid = ATH_AN_2_TID(an, tid);
905 txtid->baw_size =
906 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
907 txtid->state |= AGGR_ADDBA_COMPLETE;
908 txtid->state &= ~AGGR_ADDBA_PROGRESS;
909 ath_tx_resume_tid(sc, txtid);
910 }
911}
912
Sujithe8324352009-01-16 21:38:42 +0530913/********************/
914/* Queue Management */
915/********************/
916
Sujithe8324352009-01-16 21:38:42 +0530917static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
918 struct ath_txq *txq)
919{
920 struct ath_atx_ac *ac, *ac_tmp;
921 struct ath_atx_tid *tid, *tid_tmp;
922
923 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
924 list_del(&ac->list);
925 ac->sched = false;
926 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
927 list_del(&tid->list);
928 tid->sched = false;
929 ath_tid_drain(sc, txq, tid);
930 }
931 }
932}
933
934struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
935{
Sujithcbe61d82009-02-09 13:27:12 +0530936 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700937 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530938 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100939 static const int subtype_txq_to_hwq[] = {
940 [WME_AC_BE] = ATH_TXQ_AC_BE,
941 [WME_AC_BK] = ATH_TXQ_AC_BK,
942 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 };
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400945 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530946
947 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100948 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530949 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
950 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
951 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
952 qi.tqi_physCompBuf = 0;
953
954 /*
955 * Enable interrupts only for EOL and DESC conditions.
956 * We mark tx descriptors to receive a DESC interrupt
957 * when a tx queue gets deep; otherwise waiting for the
958 * EOL to reap descriptors. Note that this is done to
959 * reduce interrupt load and this only defers reaping
960 * descriptors, never transmitting frames. Aside from
961 * reducing interrupts this also permits more concurrency.
962 * The only potential downside is if the tx queue backs
963 * up in which case the top half of the kernel may backup
964 * due to a lack of tx descriptors.
965 *
966 * The UAPSD queue is an exception, since we take a desc-
967 * based intr on the EOSP frames.
968 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400969 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
970 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
971 TXQ_FLAG_TXERRINT_ENABLE;
972 } else {
973 if (qtype == ATH9K_TX_QUEUE_UAPSD)
974 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
975 else
976 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
977 TXQ_FLAG_TXDESCINT_ENABLE;
978 }
Sujithe8324352009-01-16 21:38:42 +0530979 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
980 if (qnum == -1) {
981 /*
982 * NB: don't print a message, this happens
983 * normally on parts with too few tx queues
984 */
985 return NULL;
986 }
987 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800988 ath_err(common, "qnum %u out of range, max %zu!\n",
989 qnum, ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530990 ath9k_hw_releasetxqueue(ah, qnum);
991 return NULL;
992 }
993 if (!ATH_TXQ_SETUP(sc, qnum)) {
994 struct ath_txq *txq = &sc->tx.txq[qnum];
995
996 txq->axq_qnum = qnum;
997 txq->axq_link = NULL;
998 INIT_LIST_HEAD(&txq->axq_q);
999 INIT_LIST_HEAD(&txq->axq_acq);
1000 spin_lock_init(&txq->axq_lock);
1001 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001002 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001003 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +05301004 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001005
1006 txq->txq_headidx = txq->txq_tailidx = 0;
1007 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1008 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1009 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301010 }
1011 return &sc->tx.txq[qnum];
1012}
1013
Sujithe8324352009-01-16 21:38:42 +05301014int ath_txq_update(struct ath_softc *sc, int qnum,
1015 struct ath9k_tx_queue_info *qinfo)
1016{
Sujithcbe61d82009-02-09 13:27:12 +05301017 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301018 int error = 0;
1019 struct ath9k_tx_queue_info qi;
1020
1021 if (qnum == sc->beacon.beaconq) {
1022 /*
1023 * XXX: for beacon queue, we just save the parameter.
1024 * It will be picked up by ath_beaconq_config when
1025 * it's necessary.
1026 */
1027 sc->beacon.beacon_qi = *qinfo;
1028 return 0;
1029 }
1030
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001031 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301032
1033 ath9k_hw_get_txq_props(ah, qnum, &qi);
1034 qi.tqi_aifs = qinfo->tqi_aifs;
1035 qi.tqi_cwmin = qinfo->tqi_cwmin;
1036 qi.tqi_cwmax = qinfo->tqi_cwmax;
1037 qi.tqi_burstTime = qinfo->tqi_burstTime;
1038 qi.tqi_readyTime = qinfo->tqi_readyTime;
1039
1040 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001041 ath_err(ath9k_hw_common(sc->sc_ah),
1042 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301043 error = -EIO;
1044 } else {
1045 ath9k_hw_resettxqueue(ah, qnum);
1046 }
1047
1048 return error;
1049}
1050
1051int ath_cabq_update(struct ath_softc *sc)
1052{
1053 struct ath9k_tx_queue_info qi;
1054 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301055
1056 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1057 /*
1058 * Ensure the readytime % is within the bounds.
1059 */
Sujith17d79042009-02-09 13:27:03 +05301060 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1061 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1062 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1063 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301064
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001065 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301066 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301067 ath_txq_update(sc, qnum, &qi);
1068
1069 return 0;
1070}
1071
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001072static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1073{
1074 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1075 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1076}
1077
Sujith043a0402009-01-16 21:38:47 +05301078/*
1079 * Drain a given TX queue (could be Beacon or Data)
1080 *
1081 * This assumes output has been stopped and
1082 * we do not need to block ath_tx_tasklet.
1083 */
1084void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301085{
1086 struct ath_buf *bf, *lastbf;
1087 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001088 struct ath_tx_status ts;
1089
1090 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301091 INIT_LIST_HEAD(&bf_head);
1092
Sujithe8324352009-01-16 21:38:42 +05301093 for (;;) {
1094 spin_lock_bh(&txq->axq_lock);
1095
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001096 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1097 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1098 txq->txq_headidx = txq->txq_tailidx = 0;
1099 spin_unlock_bh(&txq->axq_lock);
1100 break;
1101 } else {
1102 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1103 struct ath_buf, list);
1104 }
1105 } else {
1106 if (list_empty(&txq->axq_q)) {
1107 txq->axq_link = NULL;
1108 spin_unlock_bh(&txq->axq_lock);
1109 break;
1110 }
1111 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1112 list);
Sujithe8324352009-01-16 21:38:42 +05301113
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001114 if (bf->bf_stale) {
1115 list_del(&bf->list);
1116 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301117
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001118 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001119 continue;
1120 }
Sujithe8324352009-01-16 21:38:42 +05301121 }
1122
1123 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301124
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001125 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1126 list_cut_position(&bf_head,
1127 &txq->txq_fifo[txq->txq_tailidx],
1128 &lastbf->list);
1129 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1130 } else {
1131 /* remove ath_buf's of the same mpdu from txq */
1132 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1133 }
1134
Sujithe8324352009-01-16 21:38:42 +05301135 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001136 if (bf_is_ampdu_not_probing(bf))
1137 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301138 spin_unlock_bh(&txq->axq_lock);
1139
1140 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001141 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1142 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301143 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001144 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301145 }
1146
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001147 spin_lock_bh(&txq->axq_lock);
1148 txq->axq_tx_inprogress = false;
1149 spin_unlock_bh(&txq->axq_lock);
1150
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001151 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1152 spin_lock_bh(&txq->axq_lock);
1153 while (!list_empty(&txq->txq_fifo_pending)) {
1154 bf = list_first_entry(&txq->txq_fifo_pending,
1155 struct ath_buf, list);
1156 list_cut_position(&bf_head,
1157 &txq->txq_fifo_pending,
1158 &bf->bf_lastbf->list);
1159 spin_unlock_bh(&txq->axq_lock);
1160
1161 if (bf_isampdu(bf))
1162 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001163 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001164 else
1165 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1166 &ts, 0, 0);
1167 spin_lock_bh(&txq->axq_lock);
1168 }
1169 spin_unlock_bh(&txq->axq_lock);
1170 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001171
1172 /* flush any pending frames if aggregation is enabled */
1173 if (sc->sc_flags & SC_OP_TXAGGR) {
1174 if (!retry_tx) {
1175 spin_lock_bh(&txq->axq_lock);
1176 ath_txq_drain_pending_buffers(sc, txq);
1177 spin_unlock_bh(&txq->axq_lock);
1178 }
1179 }
Sujithe8324352009-01-16 21:38:42 +05301180}
1181
Felix Fietkau080e1a22010-12-05 20:17:53 +01001182bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301183{
Sujithcbe61d82009-02-09 13:27:12 +05301184 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001185 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301186 struct ath_txq *txq;
1187 int i, npend = 0;
1188
1189 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001190 return true;
Sujith043a0402009-01-16 21:38:47 +05301191
1192 /* Stop beacon queue */
1193 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1194
1195 /* Stop data queues */
1196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1197 if (ATH_TXQ_SETUP(sc, i)) {
1198 txq = &sc->tx.txq[i];
1199 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1200 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1201 }
1202 }
1203
Felix Fietkau080e1a22010-12-05 20:17:53 +01001204 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001205 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301206
1207 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1208 if (ATH_TXQ_SETUP(sc, i))
1209 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1210 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001211
1212 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301213}
1214
Sujithe8324352009-01-16 21:38:42 +05301215void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1216{
1217 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1218 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1219}
1220
Sujithe8324352009-01-16 21:38:42 +05301221void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1222{
1223 struct ath_atx_ac *ac;
1224 struct ath_atx_tid *tid;
1225
1226 if (list_empty(&txq->axq_acq))
1227 return;
1228
1229 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1230 list_del(&ac->list);
1231 ac->sched = false;
1232
1233 do {
1234 if (list_empty(&ac->tid_q))
1235 return;
1236
1237 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1238 list_del(&tid->list);
1239 tid->sched = false;
1240
1241 if (tid->paused)
1242 continue;
1243
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001244 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301245
1246 /*
1247 * add tid to round-robin queue if more frames
1248 * are pending for the tid
1249 */
1250 if (!list_empty(&tid->buf_q))
1251 ath_tx_queue_tid(txq, tid);
1252
1253 break;
1254 } while (!list_empty(&ac->tid_q));
1255
1256 if (!list_empty(&ac->tid_q)) {
1257 if (!ac->sched) {
1258 ac->sched = true;
1259 list_add_tail(&ac->list, &txq->axq_acq);
1260 }
1261 }
1262}
1263
Sujithe8324352009-01-16 21:38:42 +05301264/***********/
1265/* TX, DMA */
1266/***********/
1267
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001268/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001269 * Insert a chain of ath_buf (descriptors) on a txq and
1270 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001271 */
Sujith102e0572008-10-29 10:15:16 +05301272static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1273 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001274{
Sujithcbe61d82009-02-09 13:27:12 +05301275 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001276 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001277 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301278
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001279 /*
1280 * Insert the frame on the outbound list and
1281 * pass it on to the hardware.
1282 */
1283
1284 if (list_empty(head))
1285 return;
1286
1287 bf = list_first_entry(head, struct ath_buf, list);
1288
Joe Perches226afe62010-12-02 19:12:37 -08001289 ath_dbg(common, ATH_DBG_QUEUE,
1290 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001291
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001292 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1293 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1294 list_splice_tail_init(head, &txq->txq_fifo_pending);
1295 return;
1296 }
1297 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001298 ath_dbg(common, ATH_DBG_XMIT,
1299 "Initializing tx fifo %d which is non-empty\n",
1300 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001301 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1302 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1303 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001304 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001305 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1306 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001307 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001308 list_splice_tail_init(head, &txq->axq_q);
1309
1310 if (txq->axq_link == NULL) {
1311 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001312 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1313 txq->axq_qnum, ito64(bf->bf_daddr),
1314 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001315 } else {
1316 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001317 ath_dbg(common, ATH_DBG_XMIT,
1318 "link[%u] (%p)=%llx (%p)\n",
1319 txq->axq_qnum, txq->axq_link,
1320 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001321 }
1322 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1323 &txq->axq_link);
1324 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001325 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001326 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001327 if (bf_is_ampdu_not_probing(bf))
1328 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001329}
1330
Sujithe8324352009-01-16 21:38:42 +05301331static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001332 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301333{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001334 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001335 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301336
Sujithe8324352009-01-16 21:38:42 +05301337 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301338 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301339
1340 /*
1341 * Do not queue to h/w when any of the following conditions is true:
1342 * - there are pending frames in software queue
1343 * - the TID is currently paused for ADDBA/BAR request
1344 * - seqno is not within block-ack window
1345 * - h/w queue depth exceeds low water mark
1346 */
1347 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001348 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001349 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001350 /*
Sujithe8324352009-01-16 21:38:42 +05301351 * Add this frame to software queue for scheduling later
1352 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001353 */
Felix Fietkau04caf862010-11-14 15:20:12 +01001354 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301355 ath_tx_queue_tid(txctl->txq, tid);
1356 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001357 }
1358
Felix Fietkau04caf862010-11-14 15:20:12 +01001359 INIT_LIST_HEAD(&bf_head);
1360 list_add(&bf->list, &bf_head);
1361
Sujithe8324352009-01-16 21:38:42 +05301362 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001363 if (!fi->retries)
1364 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301365
1366 /* Queue to h/w without aggregation */
Sujithd43f30152009-01-16 21:38:53 +05301367 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001368 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001369 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301370}
1371
Felix Fietkau82b873a2010-11-11 03:18:37 +01001372static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1373 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001374 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001375{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001376 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301377 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001378
Sujithe8324352009-01-16 21:38:42 +05301379 bf = list_first_entry(bf_head, struct ath_buf, list);
1380 bf->bf_state.bf_type &= ~BUF_AMPDU;
1381
1382 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001383 if (tid)
1384 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301385
Sujithd43f30152009-01-16 21:38:53 +05301386 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001387 fi = get_frame_info(bf->bf_mpdu);
1388 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301389 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301390 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001391}
1392
Sujith528f0c62008-10-29 10:14:26 +05301393static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394{
Sujith528f0c62008-10-29 10:14:26 +05301395 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001396 enum ath9k_pkt_type htype;
1397 __le16 fc;
1398
Sujith528f0c62008-10-29 10:14:26 +05301399 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001400 fc = hdr->frame_control;
1401
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001402 if (ieee80211_is_beacon(fc))
1403 htype = ATH9K_PKT_TYPE_BEACON;
1404 else if (ieee80211_is_probe_resp(fc))
1405 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1406 else if (ieee80211_is_atim(fc))
1407 htype = ATH9K_PKT_TYPE_ATIM;
1408 else if (ieee80211_is_pspoll(fc))
1409 htype = ATH9K_PKT_TYPE_PSPOLL;
1410 else
1411 htype = ATH9K_PKT_TYPE_NORMAL;
1412
1413 return htype;
1414}
1415
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001416static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1417 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301418{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001419 struct ath_wiphy *aphy = hw->priv;
1420 struct ath_softc *sc = aphy->sc;
Sujith528f0c62008-10-29 10:14:26 +05301421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001422 struct ieee80211_sta *sta = tx_info->control.sta;
1423 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301424 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001425 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301426 struct ath_node *an;
1427 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001428 enum ath9k_key_type keytype;
1429 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001430 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301431
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001432 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301433
Sujith528f0c62008-10-29 10:14:26 +05301434 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001435 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1436 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001437
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001438 an = (struct ath_node *) sta->drv_priv;
1439 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1440
1441 /*
1442 * Override seqno set by upper layer with the one
1443 * in tx aggregation state.
1444 */
1445 tid = ATH_AN_2_TID(an, tidno);
1446 seqno = tid->seq_next;
1447 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1448 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1449 }
1450
1451 memset(fi, 0, sizeof(*fi));
1452 if (hw_key)
1453 fi->keyix = hw_key->hw_key_idx;
1454 else
1455 fi->keyix = ATH9K_TXKEYIX_INVALID;
1456 fi->keytype = keytype;
1457 fi->framelen = framelen;
1458 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301459}
1460
Felix Fietkau82b873a2010-11-11 03:18:37 +01001461static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301462{
1463 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1464 int flags = 0;
1465
1466 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1467 flags |= ATH9K_TXDESC_INTREQ;
1468
1469 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1470 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301471
Felix Fietkau82b873a2010-11-11 03:18:37 +01001472 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001473 flags |= ATH9K_TXDESC_LDPC;
1474
Sujith528f0c62008-10-29 10:14:26 +05301475 return flags;
1476}
1477
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001478/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001479 * rix - rate index
1480 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1481 * width - 0 for 20 MHz, 1 for 40 MHz
1482 * half_gi - to use 4us v/s 3.6 us for symbol time
1483 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001484static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301485 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001486{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001487 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001488 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301489
1490 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001491 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001492 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001493 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001494 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1495
1496 if (!half_gi)
1497 duration = SYMBOL_TIME(nsymbols);
1498 else
1499 duration = SYMBOL_TIME_HALFGI(nsymbols);
1500
Sujithe63835b2008-11-18 09:07:53 +05301501 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001502 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301503
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504 return duration;
1505}
1506
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301507u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1508{
1509 struct ath_hw *ah = sc->sc_ah;
1510 struct ath9k_channel *curchan = ah->curchan;
1511 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1512 (curchan->channelFlags & CHANNEL_5GHZ) &&
1513 (chainmask == 0x7) && (rate < 0x90))
1514 return 0x3;
1515 else
1516 return chainmask;
1517}
1518
Felix Fietkau269c44b2010-11-14 15:20:06 +01001519static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001520{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001521 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301523 struct sk_buff *skb;
1524 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301525 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001526 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301527 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301528 int i, flags = 0;
1529 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301530 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301531
1532 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301533
Sujitha22be222009-03-30 15:28:36 +05301534 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301535 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301536 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301537 hdr = (struct ieee80211_hdr *)skb->data;
1538 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301539
Sujithc89424d2009-01-30 14:29:28 +05301540 /*
1541 * We check if Short Preamble is needed for the CTS rate by
1542 * checking the BSS's global flag.
1543 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1544 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001545 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1546 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301547 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001548 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001549
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001551 bool is_40, is_sgi, is_sp;
1552 int phy;
1553
Sujithe63835b2008-11-18 09:07:53 +05301554 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001555 continue;
1556
Sujitha8efee42008-11-18 09:07:30 +05301557 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301558 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001559
Felix Fietkau27032052010-01-17 21:08:50 +01001560 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1561 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301562 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001563 flags |= ATH9K_TXDESC_RTSENA;
1564 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1565 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1566 flags |= ATH9K_TXDESC_CTSENA;
1567 }
1568
Sujithc89424d2009-01-30 14:29:28 +05301569 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1570 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1571 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1572 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001573
Felix Fietkau545750d2009-11-23 22:21:01 +01001574 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1575 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1576 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1577
1578 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1579 /* MCS rates */
1580 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301581 series[i].ChSel = ath_txchainmask_reduction(sc,
1582 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001583 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001584 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001585 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1586 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001587 continue;
1588 }
1589
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301590 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001591 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1592 !(rate->flags & IEEE80211_RATE_ERP_G))
1593 phy = WLAN_RC_PHY_CCK;
1594 else
1595 phy = WLAN_RC_PHY_OFDM;
1596
1597 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1598 series[i].Rate = rate->hw_value;
1599 if (rate->hw_value_short) {
1600 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1601 series[i].Rate |= rate->hw_value_short;
1602 } else {
1603 is_sp = false;
1604 }
1605
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301606 if (bf->bf_state.bfs_paprd)
1607 series[i].ChSel = common->tx_chainmask;
1608 else
1609 series[i].ChSel = ath_txchainmask_reduction(sc,
1610 common->tx_chainmask, series[i].Rate);
1611
Felix Fietkau545750d2009-11-23 22:21:01 +01001612 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001613 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001614 }
1615
Felix Fietkau27032052010-01-17 21:08:50 +01001616 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001617 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001618 flags &= ~ATH9K_TXDESC_RTSENA;
1619
1620 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1621 if (flags & ATH9K_TXDESC_RTSENA)
1622 flags &= ~ATH9K_TXDESC_CTSENA;
1623
Sujithe63835b2008-11-18 09:07:53 +05301624 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301625 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1626 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301627 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301628 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301629
Sujith17d79042009-02-09 13:27:03 +05301630 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301631 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001632}
1633
Felix Fietkau82b873a2010-11-11 03:18:37 +01001634static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001635 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001636 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301637{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001638 struct ath_wiphy *aphy = hw->priv;
1639 struct ath_softc *sc = aphy->sc;
Felix Fietkau04caf862010-11-14 15:20:12 +01001640 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001641 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001642 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001643 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001644 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001645 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001646
1647 bf = ath_tx_get_buffer(sc);
1648 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001649 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001650 return NULL;
1651 }
Sujithe8324352009-01-16 21:38:42 +05301652
Sujithe8324352009-01-16 21:38:42 +05301653 ATH_TXBUF_RESET(bf);
1654
Felix Fietkau827e69b2009-11-15 23:09:25 +01001655 bf->aphy = aphy;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001656 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301657 bf->bf_mpdu = skb;
1658
Ben Greearc1739eb32010-10-14 12:45:29 -07001659 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1660 skb->len, DMA_TO_DEVICE);
1661 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301662 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001663 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001664 ath_err(ath9k_hw_common(sc->sc_ah),
1665 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001666 ath_tx_return_buffer(sc, bf);
1667 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301668 }
1669
Sujithe8324352009-01-16 21:38:42 +05301670 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301671
1672 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001673 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301674
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001675 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1676 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301677
1678 ath9k_hw_filltxdesc(ah, ds,
1679 skb->len, /* segment length */
1680 true, /* first segment */
1681 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001682 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001683 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001684 txq->axq_qnum);
1685
1686
1687 return bf;
1688}
1689
1690/* FIXME: tx power */
1691static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1692 struct ath_tx_control *txctl)
1693{
1694 struct sk_buff *skb = bf->bf_mpdu;
1695 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1696 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001697 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001698 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001699 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301700
Sujithe8324352009-01-16 21:38:42 +05301701 spin_lock_bh(&txctl->txq->axq_lock);
1702
Felix Fietkau248a38d2010-12-10 21:16:46 +01001703 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001704 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1705 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001706 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001707
Felix Fietkau066dae92010-11-07 14:59:39 +01001708 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001709 }
1710
1711 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001712 /*
1713 * Try aggregation if it's a unicast data frame
1714 * and the destination is HT capable.
1715 */
1716 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301717 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001718 INIT_LIST_HEAD(&bf_head);
1719 list_add_tail(&bf->list, &bf_head);
1720
Felix Fietkau61117f02010-11-11 03:18:36 +01001721 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001722 bf->bf_state.bfs_paprd = txctl->paprd;
1723
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001724 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1726 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001727
Felix Fietkau248a38d2010-12-10 21:16:46 +01001728 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301729 }
1730
1731 spin_unlock_bh(&txctl->txq->axq_lock);
1732}
1733
1734/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001735int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301736 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001737{
Felix Fietkau28d16702010-11-14 15:20:10 +01001738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1739 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001740 struct ieee80211_sta *sta = info->control.sta;
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001741 struct ath_wiphy *aphy = hw->priv;
1742 struct ath_softc *sc = aphy->sc;
Felix Fietkau84642d62010-06-01 21:33:13 +02001743 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001744 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001745 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001746 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001747 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001748
Ben Greeara9927ba2010-12-06 21:13:49 -08001749 /* NOTE: sta can be NULL according to net/mac80211.h */
1750 if (sta)
1751 txctl->an = (struct ath_node *)sta->drv_priv;
1752
Felix Fietkau04caf862010-11-14 15:20:12 +01001753 if (info->control.hw_key)
1754 frmlen += info->control.hw_key->icv_len;
1755
Felix Fietkau28d16702010-11-14 15:20:10 +01001756 /*
1757 * As a temporary workaround, assign seq# here; this will likely need
1758 * to be cleaned up to work better with Beacon transmission and virtual
1759 * BSSes.
1760 */
1761 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1762 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1763 sc->tx.seq_no += 0x10;
1764 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1765 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1766 }
1767
1768 /* Add the padding after the header if this is not already done */
1769 padpos = ath9k_cmn_padpos(hdr->frame_control);
1770 padsize = padpos & 3;
1771 if (padsize && skb->len > padpos) {
1772 if (skb_headroom(skb) < padsize)
1773 return -ENOMEM;
1774
1775 skb_push(skb, padsize);
1776 memmove(skb->data, skb->data + padsize, padpos);
1777 }
1778
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001779 setup_frame_info(hw, skb, frmlen);
1780
1781 /*
1782 * At this point, the vif, hw_key and sta pointers in the tx control
1783 * info are no longer valid (overwritten by the ath_frame_info data.
1784 */
1785
1786 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001787 if (unlikely(!bf))
1788 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001789
Felix Fietkau066dae92010-11-07 14:59:39 +01001790 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001791 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001792 if (txq == sc->tx.txq_map[q] &&
1793 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1794 ath_mac80211_stop_queue(sc, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001795 txq->stopped = 1;
1796 }
1797 spin_unlock_bh(&txq->axq_lock);
1798
Sujithe8324352009-01-16 21:38:42 +05301799 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001800
1801 return 0;
1802}
1803
Sujithe8324352009-01-16 21:38:42 +05301804/*****************/
1805/* TX Completion */
1806/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001807
Sujithe8324352009-01-16 21:38:42 +05301808static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau61117f02010-11-11 03:18:36 +01001809 struct ath_wiphy *aphy, int tx_flags, int ftype,
Felix Fietkau066dae92010-11-07 14:59:39 +01001810 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001811{
Sujithe8324352009-01-16 21:38:42 +05301812 struct ieee80211_hw *hw = sc->hw;
1813 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001814 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001815 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001816 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301817
Joe Perches226afe62010-12-02 19:12:37 -08001818 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301819
Felix Fietkau827e69b2009-11-15 23:09:25 +01001820 if (aphy)
1821 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301822
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301823 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301824 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301825
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301826 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301827 /* Frame was ACKed */
1828 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1829 }
1830
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001831 padpos = ath9k_cmn_padpos(hdr->frame_control);
1832 padsize = padpos & 3;
1833 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301834 /*
1835 * Remove MAC header padding before giving the frame back to
1836 * mac80211.
1837 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001838 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301839 skb_pull(skb, padsize);
1840 }
1841
Sujith1b04b932010-01-08 10:36:05 +05301842 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1843 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001844 ath_dbg(common, ATH_DBG_PS,
1845 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301846 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1847 PS_WAIT_FOR_CAB |
1848 PS_WAIT_FOR_PSPOLL_DATA |
1849 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001850 }
1851
Felix Fietkau61117f02010-11-11 03:18:36 +01001852 if (unlikely(ftype))
1853 ath9k_tx_status(hw, skb, ftype);
Felix Fietkau97923b12010-06-12 00:33:55 -04001854 else {
1855 q = skb_get_queue_mapping(skb);
Felix Fietkau066dae92010-11-07 14:59:39 +01001856 if (txq == sc->tx.txq_map[q]) {
1857 spin_lock_bh(&txq->axq_lock);
1858 if (WARN_ON(--txq->pending_frames < 0))
1859 txq->pending_frames = 0;
1860 spin_unlock_bh(&txq->axq_lock);
1861 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001862
Felix Fietkau827e69b2009-11-15 23:09:25 +01001863 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001864 }
Sujithe8324352009-01-16 21:38:42 +05301865}
1866
1867static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001868 struct ath_txq *txq, struct list_head *bf_q,
1869 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301870{
1871 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301872 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301873 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301874
Sujithe8324352009-01-16 21:38:42 +05301875 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301876 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301877
1878 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301879 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301880
1881 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301882 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301883 }
1884
Ben Greearc1739eb32010-10-14 12:45:29 -07001885 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001886 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001887
1888 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001889 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001890 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001891 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001892 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001893 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001894 ath_debug_stat_tx(sc, bf, ts);
Felix Fietkau61117f02010-11-11 03:18:36 +01001895 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1896 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001897 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001898 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1899 * accidentally reference it later.
1900 */
1901 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301902
1903 /*
1904 * Return the list of ath_buf of this mpdu to free queue
1905 */
1906 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1907 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1908 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1909}
1910
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001911static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +01001912 int nframes, int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301913{
Sujitha22be222009-03-30 15:28:36 +05301914 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301916 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001917 struct ieee80211_hw *hw = bf->aphy->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001918 struct ath_softc *sc = bf->aphy->sc;
1919 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301920 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301921
Sujith95e4acb2009-03-13 08:56:09 +05301922 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001923 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301924
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001925 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301926 WARN_ON(tx_rateindex >= hw->max_rates);
1927
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001928 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301929 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001930 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001931 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301932
Felix Fietkaub572d032010-11-14 15:20:07 +01001933 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001934
Felix Fietkaub572d032010-11-14 15:20:07 +01001935 tx_info->status.ampdu_len = nframes;
1936 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001937 }
1938
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001939 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301940 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001941 /*
1942 * If an underrun error is seen assume it as an excessive
1943 * retry only if max frame trigger level has been reached
1944 * (2 KB for single stream, and 4 KB for dual stream).
1945 * Adjust the long retry as if the frame was tried
1946 * hw->max_rate_tries times to affect how rate control updates
1947 * PER for the failed rate.
1948 * In case of congestion on the bus penalizing this type of
1949 * underruns should help hardware actually transmit new frames
1950 * successfully by eventually preferring slower rates.
1951 * This itself should also alleviate congestion on the bus.
1952 */
1953 if (ieee80211_is_data(hdr->frame_control) &&
1954 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1955 ATH9K_TX_DELIM_UNDERRUN)) &&
1956 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1957 tx_info->status.rates[tx_rateindex].count =
1958 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301959 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301960
Felix Fietkau545750d2009-11-23 22:21:01 +01001961 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301962 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001963 tx_info->status.rates[i].idx = -1;
1964 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301965
Felix Fietkau78c46532010-06-25 01:26:16 +02001966 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301967}
1968
Felix Fietkau066dae92010-11-07 14:59:39 +01001969static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
Sujith059d8062009-01-16 21:38:49 +05301970{
Felix Fietkau066dae92010-11-07 14:59:39 +01001971 struct ath_txq *txq;
Sujith059d8062009-01-16 21:38:49 +05301972
Felix Fietkau066dae92010-11-07 14:59:39 +01001973 txq = sc->tx.txq_map[qnum];
Sujith059d8062009-01-16 21:38:49 +05301974 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001975 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07001976 if (ath_mac80211_start_queue(sc, qnum))
1977 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05301978 }
1979 spin_unlock_bh(&txq->axq_lock);
1980}
1981
Sujithc4288392008-11-18 09:09:30 +05301982static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001983{
Sujithcbe61d82009-02-09 13:27:12 +05301984 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001985 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001986 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1987 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301988 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07001989 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05301990 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001991 int status;
Felix Fietkau066dae92010-11-07 14:59:39 +01001992 int qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001993
Joe Perches226afe62010-12-02 19:12:37 -08001994 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1995 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1996 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001997
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001998 for (;;) {
1999 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002000 if (list_empty(&txq->axq_q)) {
2001 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002002 spin_unlock_bh(&txq->axq_lock);
2003 break;
2004 }
2005 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2006
2007 /*
2008 * There is a race condition that a BH gets scheduled
2009 * after sw writes TxE and before hw re-load the last
2010 * descriptor to get the newly chained one.
2011 * Software must keep the last DONE descriptor as a
2012 * holding descriptor - software does so by marking
2013 * it with the STALE flag.
2014 */
2015 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302016 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002017 bf_held = bf;
2018 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302019 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002020 break;
2021 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002022 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302023 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002024 }
2025 }
2026
2027 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302028 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002029
Felix Fietkau29bffa92010-03-29 20:14:23 -07002030 memset(&ts, 0, sizeof(ts));
2031 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002032 if (status == -EINPROGRESS) {
2033 spin_unlock_bh(&txq->axq_lock);
2034 break;
2035 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002036
2037 /*
2038 * Remove ath_buf's of the same transmit unit from txq,
2039 * however leave the last descriptor back as the holding
2040 * descriptor for hw.
2041 */
Sujitha119cc42009-03-30 15:28:38 +05302042 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002044 if (!list_is_singular(&lastbf->list))
2045 list_cut_position(&bf_head,
2046 &txq->axq_q, lastbf->list.prev);
2047
2048 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002049 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002050 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002051 if (bf_held)
2052 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002053
2054 if (bf_is_ampdu_not_probing(bf))
2055 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002056 spin_unlock_bh(&txq->axq_lock);
2057
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002058 if (bf_held)
2059 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002060
Sujithcd3d39a2008-08-11 14:03:34 +05302061 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002062 /*
2063 * This frame is sent out as a single frame.
2064 * Use hardware retry status for this frame.
2065 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002066 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302067 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002068 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002069 }
Johannes Berge6a98542008-10-21 12:40:02 +02002070
Felix Fietkau066dae92010-11-07 14:59:39 +01002071 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2072
Sujithcd3d39a2008-08-11 14:03:34 +05302073 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002074 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2075 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002077 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078
Felix Fietkau066dae92010-11-07 14:59:39 +01002079 if (txq == sc->tx.txq_map[qnum])
2080 ath_wake_mac80211_queue(sc, qnum);
Sujith059d8062009-01-16 21:38:49 +05302081
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302083 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002084 ath_txq_schedule(sc, txq);
2085 spin_unlock_bh(&txq->axq_lock);
2086 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002087}
2088
Sujith305fe472009-07-23 15:32:29 +05302089static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002090{
2091 struct ath_softc *sc = container_of(work, struct ath_softc,
2092 tx_complete_work.work);
2093 struct ath_txq *txq;
2094 int i;
2095 bool needreset = false;
2096
2097 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2098 if (ATH_TXQ_SETUP(sc, i)) {
2099 txq = &sc->tx.txq[i];
2100 spin_lock_bh(&txq->axq_lock);
2101 if (txq->axq_depth) {
2102 if (txq->axq_tx_inprogress) {
2103 needreset = true;
2104 spin_unlock_bh(&txq->axq_lock);
2105 break;
2106 } else {
2107 txq->axq_tx_inprogress = true;
2108 }
2109 }
2110 spin_unlock_bh(&txq->axq_lock);
2111 }
2112
2113 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002114 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2115 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302116 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002117 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302118 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002119 }
2120
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002121 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002122 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2123}
2124
2125
Sujithe8324352009-01-16 21:38:42 +05302126
2127void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128{
Sujithe8324352009-01-16 21:38:42 +05302129 int i;
2130 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131
Sujithe8324352009-01-16 21:38:42 +05302132 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133
2134 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302135 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2136 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002137 }
2138}
2139
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002140void ath_tx_edma_tasklet(struct ath_softc *sc)
2141{
2142 struct ath_tx_status txs;
2143 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2144 struct ath_hw *ah = sc->sc_ah;
2145 struct ath_txq *txq;
2146 struct ath_buf *bf, *lastbf;
2147 struct list_head bf_head;
2148 int status;
2149 int txok;
Felix Fietkau066dae92010-11-07 14:59:39 +01002150 int qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002151
2152 for (;;) {
2153 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2154 if (status == -EINPROGRESS)
2155 break;
2156 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002157 ath_dbg(common, ATH_DBG_XMIT,
2158 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002159 break;
2160 }
2161
2162 /* Skip beacon completions */
2163 if (txs.qid == sc->beacon.beaconq)
2164 continue;
2165
2166 txq = &sc->tx.txq[txs.qid];
2167
2168 spin_lock_bh(&txq->axq_lock);
2169 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2170 spin_unlock_bh(&txq->axq_lock);
2171 return;
2172 }
2173
2174 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2175 struct ath_buf, list);
2176 lastbf = bf->bf_lastbf;
2177
2178 INIT_LIST_HEAD(&bf_head);
2179 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2180 &lastbf->list);
2181 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2182 txq->axq_depth--;
2183 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002184 if (bf_is_ampdu_not_probing(bf))
2185 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002186 spin_unlock_bh(&txq->axq_lock);
2187
2188 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2189
2190 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002191 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2192 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002193 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002194 }
2195
Felix Fietkau066dae92010-11-07 14:59:39 +01002196 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2197
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002198 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002199 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2200 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002201 else
2202 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2203 &txs, txok, 0);
2204
Felix Fietkau066dae92010-11-07 14:59:39 +01002205 if (txq == sc->tx.txq_map[qnum])
2206 ath_wake_mac80211_queue(sc, qnum);
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002207
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002208 spin_lock_bh(&txq->axq_lock);
2209 if (!list_empty(&txq->txq_fifo_pending)) {
2210 INIT_LIST_HEAD(&bf_head);
2211 bf = list_first_entry(&txq->txq_fifo_pending,
2212 struct ath_buf, list);
2213 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2214 &bf->bf_lastbf->list);
2215 ath_tx_txqaddbuf(sc, txq, &bf_head);
2216 } else if (sc->sc_flags & SC_OP_TXAGGR)
2217 ath_txq_schedule(sc, txq);
2218 spin_unlock_bh(&txq->axq_lock);
2219 }
2220}
2221
Sujithe8324352009-01-16 21:38:42 +05302222/*****************/
2223/* Init, Cleanup */
2224/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002225
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002226static int ath_txstatus_setup(struct ath_softc *sc, int size)
2227{
2228 struct ath_descdma *dd = &sc->txsdma;
2229 u8 txs_len = sc->sc_ah->caps.txs_len;
2230
2231 dd->dd_desc_len = size * txs_len;
2232 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2233 &dd->dd_desc_paddr, GFP_KERNEL);
2234 if (!dd->dd_desc)
2235 return -ENOMEM;
2236
2237 return 0;
2238}
2239
2240static int ath_tx_edma_init(struct ath_softc *sc)
2241{
2242 int err;
2243
2244 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2245 if (!err)
2246 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2247 sc->txsdma.dd_desc_paddr,
2248 ATH_TXSTATUS_RING_SIZE);
2249
2250 return err;
2251}
2252
2253static void ath_tx_edma_cleanup(struct ath_softc *sc)
2254{
2255 struct ath_descdma *dd = &sc->txsdma;
2256
2257 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2258 dd->dd_desc_paddr);
2259}
2260
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002261int ath_tx_init(struct ath_softc *sc, int nbufs)
2262{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002263 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002264 int error = 0;
2265
Sujith797fe5cb2009-03-30 15:28:45 +05302266 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002267
Sujith797fe5cb2009-03-30 15:28:45 +05302268 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002269 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302270 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002271 ath_err(common,
2272 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302273 goto err;
2274 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002275
Sujith797fe5cb2009-03-30 15:28:45 +05302276 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002277 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302278 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002279 ath_err(common,
2280 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302281 goto err;
2282 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002283
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002284 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2285
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002286 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2287 error = ath_tx_edma_init(sc);
2288 if (error)
2289 goto err;
2290 }
2291
Sujith797fe5cb2009-03-30 15:28:45 +05302292err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002293 if (error != 0)
2294 ath_tx_cleanup(sc);
2295
2296 return error;
2297}
2298
Sujith797fe5cb2009-03-30 15:28:45 +05302299void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002300{
Sujithb77f4832008-12-07 21:44:03 +05302301 if (sc->beacon.bdma.dd_desc_len != 0)
2302 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303
Sujithb77f4832008-12-07 21:44:03 +05302304 if (sc->tx.txdma.dd_desc_len != 0)
2305 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002306
2307 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2308 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002309}
2310
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002311void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2312{
Sujithc5170162008-10-29 10:13:59 +05302313 struct ath_atx_tid *tid;
2314 struct ath_atx_ac *ac;
2315 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316
Sujith8ee5afb2008-12-07 21:43:36 +05302317 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302318 tidno < WME_NUM_TID;
2319 tidno++, tid++) {
2320 tid->an = an;
2321 tid->tidno = tidno;
2322 tid->seq_start = tid->seq_next = 0;
2323 tid->baw_size = WME_MAX_BA;
2324 tid->baw_head = tid->baw_tail = 0;
2325 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302326 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302327 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302328 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302329 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302330 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302331 tid->state &= ~AGGR_ADDBA_COMPLETE;
2332 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302333 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002334
Sujith8ee5afb2008-12-07 21:43:36 +05302335 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302336 acno < WME_NUM_AC; acno++, ac++) {
2337 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002338 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302339 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002340 }
2341}
2342
Sujithb5aa9bf2008-10-29 10:13:31 +05302343void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344{
Felix Fietkau2b409942010-07-07 19:42:08 +02002345 struct ath_atx_ac *ac;
2346 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002348 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302349
Felix Fietkau2b409942010-07-07 19:42:08 +02002350 for (tidno = 0, tid = &an->tid[tidno];
2351 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352
Felix Fietkau2b409942010-07-07 19:42:08 +02002353 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002354 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355
Felix Fietkau2b409942010-07-07 19:42:08 +02002356 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357
Felix Fietkau2b409942010-07-07 19:42:08 +02002358 if (tid->sched) {
2359 list_del(&tid->list);
2360 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002361 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002362
2363 if (ac->sched) {
2364 list_del(&ac->list);
2365 tid->ac->sched = false;
2366 }
2367
2368 ath_tid_drain(sc, txq, tid);
2369 tid->state &= ~AGGR_ADDBA_COMPLETE;
2370 tid->state &= ~AGGR_CLEANUP;
2371
2372 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373 }
2374}