blob: aa67d641f1402967633b0a93e6d14f0b9bd5bd0f [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Felix Fietkau82b873a2010-11-11 03:18:37 +010051static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010053 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkaudb1a0522010-03-29 20:07:11 -070060static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +010061 int nframes, int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
Felix Fietkau827e69b2009-11-15 23:09:25 +0100300 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530304 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530305
306 return tbf;
307}
308
Felix Fietkaub572d032010-11-14 15:20:07 +0100309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
Sujithd43f30152009-01-16 21:38:53 +0530341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100343 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530344{
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530349 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800350 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530351 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530353 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530355 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
357 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200358 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100359 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200360 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100361 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100367 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 memcpy(rates, tx_info->control.rates, sizeof(rates));
370
Sujith1286ec62009-01-27 13:30:37 +0530371 rcu_read_lock();
372
Ben Greear686b9cb2010-09-23 09:44:36 -0700373 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530374 if (!sta) {
375 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200376
Felix Fietkau31e79a52010-07-12 23:16:34 +0200377 INIT_LIST_HEAD(&bf_head);
378 while (bf) {
379 bf_next = bf->bf_next;
380
381 bf->bf_state.bf_type |= BUF_XRETRY;
382 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
383 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head);
385
Felix Fietkaub572d032010-11-14 15:20:07 +0100386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
Sujith1286ec62009-01-27 13:30:37 +0530392 return;
Sujithe8324352009-01-16 21:38:42 +0530393 }
394
Sujith1286ec62009-01-27 13:30:37 +0530395 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530398
Felix Fietkaub11b1602010-07-11 12:48:44 +0200399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200405 txok = false;
406
Sujithe8324352009-01-16 21:38:42 +0530407 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530409
Sujithd43f30152009-01-16 21:38:53 +0530410 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530414 } else {
Sujithd43f30152009-01-16 21:38:53 +0530415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
Sujith2660b812009-02-09 13:27:26 +0530422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530423 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530424 }
425 }
426
427 INIT_LIST_HEAD(&bf_pending);
428 INIT_LIST_HEAD(&bf_head);
429
Felix Fietkaub572d032010-11-14 15:20:07 +0100430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530431 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100432 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530433 bf_next = bf->bf_next;
434
Felix Fietkau78c46532010-06-25 01:26:16 +0200435 skb = bf->bf_mpdu;
436 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200438
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530440 /* transmit completion, subframe is
441 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else if (!isaggr && txok) {
444 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530445 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530446 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100447 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 if (fi->retries < ATH_MAX_SW_RETRIES) {
449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530450 txpending = 1;
451 } else {
452 bf->bf_state.bf_type |= BUF_XRETRY;
453 txfail = 1;
454 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 }
457 } else {
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
463 }
464 }
465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700477 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530478 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530479 }
480
Felix Fietkau90fa5392010-09-20 13:45:38 +0200481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100487 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530488 spin_unlock_bh(&txq->axq_lock);
489
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200491 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaub572d032010-11-14 15:20:07 +0100492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 rc_update = false;
494 } else {
Felix Fietkaub572d032010-11-14 15:20:07 +0100495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 }
497
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530500 } else {
Sujithd43f30152009-01-16 21:38:53 +0530501 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400502 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
503 if (bf->bf_next == NULL && bf_last->bf_stale) {
504 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530505
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400506 tbf = ath_clone_txbuf(sc, bf_last);
507 /*
508 * Update tx baw and complete the
509 * frame with failed status if we
510 * run out of tx buf.
511 */
512 if (!tbf) {
513 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100514 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400516
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 bf->bf_state.bf_type |=
518 BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +0100519 ath_tx_rc_status(bf, ts, nframes,
520 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400521 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head,
523 ts, 0, 0);
524 break;
525 }
526
527 ath9k_hw_cleartxdesc(sc->sc_ah,
528 tbf->bf_desc);
529 list_add_tail(&tbf->list, &bf_head);
530 } else {
531 /*
532 * Clear descriptor status words for
533 * software retry
534 */
535 ath9k_hw_cleartxdesc(sc->sc_ah,
536 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400537 }
Sujithe8324352009-01-16 21:38:42 +0530538 }
539
540 /*
541 * Put this buffer to the temporary pending
542 * queue to retain ordering
543 */
544 list_splice_tail_init(&bf_head, &bf_pending);
545 }
546
547 bf = bf_next;
548 }
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 /* prepend un-acked frames to the beginning of the pending frame queue */
551 if (!list_empty(&bf_pending)) {
552 spin_lock_bh(&txq->axq_lock);
553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Sujithe8324352009-01-16 21:38:42 +0530569 if (needreset)
570 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
Felix Fietkau2ed72222011-01-10 17:05:49 -0700861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
Felix Fietkau231c3a12010-09-20 19:35:28 +0200864 return 0;
Sujithe8324352009-01-16 21:38:42 +0530865}
866
Sujithf83da962009-07-23 15:32:37 +0530867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100871 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530874 return;
Sujithe8324352009-01-16 21:38:42 +0530875
876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530878 return;
Sujithe8324352009-01-16 21:38:42 +0530879 }
880
Sujithe8324352009-01-16 21:38:42 +0530881 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200882 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200883
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
891 txtid->state |= AGGR_CLEANUP;
892 else
893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530894 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau90fa5392010-09-20 13:45:38 +0200896 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530897}
898
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
914}
915
Sujithe8324352009-01-16 21:38:42 +0530916/********************/
917/* Queue Management */
918/********************/
919
Sujithe8324352009-01-16 21:38:42 +0530920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
922{
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
925
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
934 }
935}
936
937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938{
Sujithcbe61d82009-02-09 13:27:12 +0530939 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530941 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400948 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530949
950 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
956
957 /*
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
971 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
Sujithe8324352009-01-16 21:38:42 +0530982 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (qnum == -1) {
984 /*
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
987 */
988 return NULL;
989 }
990 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800991 ath_err(common, "qnum %u out of range, max %zu!\n",
992 qnum, ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530993 ath9k_hw_releasetxqueue(ah, qnum);
994 return NULL;
995 }
996 if (!ATH_TXQ_SETUP(sc, qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[qnum];
998
999 txq->axq_qnum = qnum;
1000 txq->axq_link = NULL;
1001 INIT_LIST_HEAD(&txq->axq_q);
1002 INIT_LIST_HEAD(&txq->axq_acq);
1003 spin_lock_init(&txq->axq_lock);
1004 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001005 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001006 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +05301007 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001008
1009 txq->txq_headidx = txq->txq_tailidx = 0;
1010 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1011 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1012 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301013 }
1014 return &sc->tx.txq[qnum];
1015}
1016
Sujithe8324352009-01-16 21:38:42 +05301017int ath_txq_update(struct ath_softc *sc, int qnum,
1018 struct ath9k_tx_queue_info *qinfo)
1019{
Sujithcbe61d82009-02-09 13:27:12 +05301020 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301021 int error = 0;
1022 struct ath9k_tx_queue_info qi;
1023
1024 if (qnum == sc->beacon.beaconq) {
1025 /*
1026 * XXX: for beacon queue, we just save the parameter.
1027 * It will be picked up by ath_beaconq_config when
1028 * it's necessary.
1029 */
1030 sc->beacon.beacon_qi = *qinfo;
1031 return 0;
1032 }
1033
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001034 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301035
1036 ath9k_hw_get_txq_props(ah, qnum, &qi);
1037 qi.tqi_aifs = qinfo->tqi_aifs;
1038 qi.tqi_cwmin = qinfo->tqi_cwmin;
1039 qi.tqi_cwmax = qinfo->tqi_cwmax;
1040 qi.tqi_burstTime = qinfo->tqi_burstTime;
1041 qi.tqi_readyTime = qinfo->tqi_readyTime;
1042
1043 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001044 ath_err(ath9k_hw_common(sc->sc_ah),
1045 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301046 error = -EIO;
1047 } else {
1048 ath9k_hw_resettxqueue(ah, qnum);
1049 }
1050
1051 return error;
1052}
1053
1054int ath_cabq_update(struct ath_softc *sc)
1055{
1056 struct ath9k_tx_queue_info qi;
1057 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301058
1059 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1060 /*
1061 * Ensure the readytime % is within the bounds.
1062 */
Sujith17d79042009-02-09 13:27:03 +05301063 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1064 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1065 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1066 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301067
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001068 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301069 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301070 ath_txq_update(sc, qnum, &qi);
1071
1072 return 0;
1073}
1074
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001075static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1076{
1077 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1078 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1079}
1080
Sujith043a0402009-01-16 21:38:47 +05301081/*
1082 * Drain a given TX queue (could be Beacon or Data)
1083 *
1084 * This assumes output has been stopped and
1085 * we do not need to block ath_tx_tasklet.
1086 */
1087void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301088{
1089 struct ath_buf *bf, *lastbf;
1090 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001091 struct ath_tx_status ts;
1092
1093 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301094 INIT_LIST_HEAD(&bf_head);
1095
Sujithe8324352009-01-16 21:38:42 +05301096 for (;;) {
1097 spin_lock_bh(&txq->axq_lock);
1098
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001099 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1100 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1101 txq->txq_headidx = txq->txq_tailidx = 0;
1102 spin_unlock_bh(&txq->axq_lock);
1103 break;
1104 } else {
1105 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1106 struct ath_buf, list);
1107 }
1108 } else {
1109 if (list_empty(&txq->axq_q)) {
1110 txq->axq_link = NULL;
1111 spin_unlock_bh(&txq->axq_lock);
1112 break;
1113 }
1114 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1115 list);
Sujithe8324352009-01-16 21:38:42 +05301116
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001117 if (bf->bf_stale) {
1118 list_del(&bf->list);
1119 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301120
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001121 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001122 continue;
1123 }
Sujithe8324352009-01-16 21:38:42 +05301124 }
1125
1126 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301127
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001128 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1129 list_cut_position(&bf_head,
1130 &txq->txq_fifo[txq->txq_tailidx],
1131 &lastbf->list);
1132 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1133 } else {
1134 /* remove ath_buf's of the same mpdu from txq */
1135 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1136 }
1137
Sujithe8324352009-01-16 21:38:42 +05301138 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001139 if (bf_is_ampdu_not_probing(bf))
1140 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301141 spin_unlock_bh(&txq->axq_lock);
1142
1143 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001144 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1145 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301146 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001147 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301148 }
1149
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001150 spin_lock_bh(&txq->axq_lock);
1151 txq->axq_tx_inprogress = false;
1152 spin_unlock_bh(&txq->axq_lock);
1153
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001154 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1155 spin_lock_bh(&txq->axq_lock);
1156 while (!list_empty(&txq->txq_fifo_pending)) {
1157 bf = list_first_entry(&txq->txq_fifo_pending,
1158 struct ath_buf, list);
1159 list_cut_position(&bf_head,
1160 &txq->txq_fifo_pending,
1161 &bf->bf_lastbf->list);
1162 spin_unlock_bh(&txq->axq_lock);
1163
1164 if (bf_isampdu(bf))
1165 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001166 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001167 else
1168 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1169 &ts, 0, 0);
1170 spin_lock_bh(&txq->axq_lock);
1171 }
1172 spin_unlock_bh(&txq->axq_lock);
1173 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001174
1175 /* flush any pending frames if aggregation is enabled */
1176 if (sc->sc_flags & SC_OP_TXAGGR) {
1177 if (!retry_tx) {
1178 spin_lock_bh(&txq->axq_lock);
1179 ath_txq_drain_pending_buffers(sc, txq);
1180 spin_unlock_bh(&txq->axq_lock);
1181 }
1182 }
Sujithe8324352009-01-16 21:38:42 +05301183}
1184
Felix Fietkau080e1a22010-12-05 20:17:53 +01001185bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301186{
Sujithcbe61d82009-02-09 13:27:12 +05301187 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001188 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301189 struct ath_txq *txq;
1190 int i, npend = 0;
1191
1192 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001193 return true;
Sujith043a0402009-01-16 21:38:47 +05301194
1195 /* Stop beacon queue */
1196 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1197
1198 /* Stop data queues */
1199 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1200 if (ATH_TXQ_SETUP(sc, i)) {
1201 txq = &sc->tx.txq[i];
1202 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1203 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1204 }
1205 }
1206
Felix Fietkau080e1a22010-12-05 20:17:53 +01001207 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001208 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301209
1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1211 if (ATH_TXQ_SETUP(sc, i))
1212 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1213 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001214
1215 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301216}
1217
Sujithe8324352009-01-16 21:38:42 +05301218void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1219{
1220 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1221 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1222}
1223
Sujithe8324352009-01-16 21:38:42 +05301224void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1225{
1226 struct ath_atx_ac *ac;
Felix Fietkau21f28e62011-01-15 14:30:14 +01001227 struct ath_atx_tid *tid, *last;
Sujithe8324352009-01-16 21:38:42 +05301228
Felix Fietkau21f28e62011-01-15 14:30:14 +01001229 if (list_empty(&txq->axq_acq) ||
1230 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301231 return;
1232
1233 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Felix Fietkau21f28e62011-01-15 14:30:14 +01001234 last = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
Sujithe8324352009-01-16 21:38:42 +05301235 list_del(&ac->list);
1236 ac->sched = false;
1237
1238 do {
1239 if (list_empty(&ac->tid_q))
1240 return;
1241
1242 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1243 list_del(&tid->list);
1244 tid->sched = false;
1245
1246 if (tid->paused)
1247 continue;
1248
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001249 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301250
1251 /*
1252 * add tid to round-robin queue if more frames
1253 * are pending for the tid
1254 */
1255 if (!list_empty(&tid->buf_q))
1256 ath_tx_queue_tid(txq, tid);
1257
Felix Fietkau21f28e62011-01-15 14:30:14 +01001258 if (tid == last || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1259 break;
Sujithe8324352009-01-16 21:38:42 +05301260 } while (!list_empty(&ac->tid_q));
1261
1262 if (!list_empty(&ac->tid_q)) {
1263 if (!ac->sched) {
1264 ac->sched = true;
1265 list_add_tail(&ac->list, &txq->axq_acq);
1266 }
1267 }
1268}
1269
Sujithe8324352009-01-16 21:38:42 +05301270/***********/
1271/* TX, DMA */
1272/***********/
1273
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001274/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001275 * Insert a chain of ath_buf (descriptors) on a txq and
1276 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001277 */
Sujith102e0572008-10-29 10:15:16 +05301278static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1279 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001280{
Sujithcbe61d82009-02-09 13:27:12 +05301281 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001282 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001283 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301284
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001285 /*
1286 * Insert the frame on the outbound list and
1287 * pass it on to the hardware.
1288 */
1289
1290 if (list_empty(head))
1291 return;
1292
1293 bf = list_first_entry(head, struct ath_buf, list);
1294
Joe Perches226afe62010-12-02 19:12:37 -08001295 ath_dbg(common, ATH_DBG_QUEUE,
1296 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001297
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001298 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1299 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1300 list_splice_tail_init(head, &txq->txq_fifo_pending);
1301 return;
1302 }
1303 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001304 ath_dbg(common, ATH_DBG_XMIT,
1305 "Initializing tx fifo %d which is non-empty\n",
1306 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001307 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1308 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1309 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001310 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001311 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1312 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001313 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001314 list_splice_tail_init(head, &txq->axq_q);
1315
1316 if (txq->axq_link == NULL) {
1317 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001318 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1319 txq->axq_qnum, ito64(bf->bf_daddr),
1320 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001321 } else {
1322 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001323 ath_dbg(common, ATH_DBG_XMIT,
1324 "link[%u] (%p)=%llx (%p)\n",
1325 txq->axq_qnum, txq->axq_link,
1326 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001327 }
1328 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1329 &txq->axq_link);
1330 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001331 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001332 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001333 if (bf_is_ampdu_not_probing(bf))
1334 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001335}
1336
Sujithe8324352009-01-16 21:38:42 +05301337static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001338 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301339{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001340 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001341 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301342
Sujithe8324352009-01-16 21:38:42 +05301343 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301344 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301345
1346 /*
1347 * Do not queue to h/w when any of the following conditions is true:
1348 * - there are pending frames in software queue
1349 * - the TID is currently paused for ADDBA/BAR request
1350 * - seqno is not within block-ack window
1351 * - h/w queue depth exceeds low water mark
1352 */
1353 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001354 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001355 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001356 /*
Sujithe8324352009-01-16 21:38:42 +05301357 * Add this frame to software queue for scheduling later
1358 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001359 */
Felix Fietkau04caf862010-11-14 15:20:12 +01001360 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301361 ath_tx_queue_tid(txctl->txq, tid);
1362 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001363 }
1364
Felix Fietkau04caf862010-11-14 15:20:12 +01001365 INIT_LIST_HEAD(&bf_head);
1366 list_add(&bf->list, &bf_head);
1367
Sujithe8324352009-01-16 21:38:42 +05301368 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001369 if (!fi->retries)
1370 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301371
1372 /* Queue to h/w without aggregation */
Sujithd43f30152009-01-16 21:38:53 +05301373 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001374 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001375 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301376}
1377
Felix Fietkau82b873a2010-11-11 03:18:37 +01001378static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1379 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001380 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001381{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001382 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301383 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001384
Sujithe8324352009-01-16 21:38:42 +05301385 bf = list_first_entry(bf_head, struct ath_buf, list);
1386 bf->bf_state.bf_type &= ~BUF_AMPDU;
1387
1388 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001389 if (tid)
1390 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301391
Sujithd43f30152009-01-16 21:38:53 +05301392 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001393 fi = get_frame_info(bf->bf_mpdu);
1394 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301395 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301396 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001397}
1398
Sujith528f0c62008-10-29 10:14:26 +05301399static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001400{
Sujith528f0c62008-10-29 10:14:26 +05301401 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001402 enum ath9k_pkt_type htype;
1403 __le16 fc;
1404
Sujith528f0c62008-10-29 10:14:26 +05301405 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001406 fc = hdr->frame_control;
1407
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001408 if (ieee80211_is_beacon(fc))
1409 htype = ATH9K_PKT_TYPE_BEACON;
1410 else if (ieee80211_is_probe_resp(fc))
1411 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1412 else if (ieee80211_is_atim(fc))
1413 htype = ATH9K_PKT_TYPE_ATIM;
1414 else if (ieee80211_is_pspoll(fc))
1415 htype = ATH9K_PKT_TYPE_PSPOLL;
1416 else
1417 htype = ATH9K_PKT_TYPE_NORMAL;
1418
1419 return htype;
1420}
1421
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001422static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1423 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301424{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001425 struct ath_wiphy *aphy = hw->priv;
1426 struct ath_softc *sc = aphy->sc;
Sujith528f0c62008-10-29 10:14:26 +05301427 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001428 struct ieee80211_sta *sta = tx_info->control.sta;
1429 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301430 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001431 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301432 struct ath_node *an;
1433 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001434 enum ath9k_key_type keytype;
1435 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001436 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301437
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001438 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301439
Sujith528f0c62008-10-29 10:14:26 +05301440 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001441 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1442 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001443
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001444 an = (struct ath_node *) sta->drv_priv;
1445 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1446
1447 /*
1448 * Override seqno set by upper layer with the one
1449 * in tx aggregation state.
1450 */
1451 tid = ATH_AN_2_TID(an, tidno);
1452 seqno = tid->seq_next;
1453 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1454 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1455 }
1456
1457 memset(fi, 0, sizeof(*fi));
1458 if (hw_key)
1459 fi->keyix = hw_key->hw_key_idx;
1460 else
1461 fi->keyix = ATH9K_TXKEYIX_INVALID;
1462 fi->keytype = keytype;
1463 fi->framelen = framelen;
1464 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301465}
1466
Felix Fietkau82b873a2010-11-11 03:18:37 +01001467static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301468{
1469 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1470 int flags = 0;
1471
1472 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1473 flags |= ATH9K_TXDESC_INTREQ;
1474
1475 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1476 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301477
Felix Fietkau82b873a2010-11-11 03:18:37 +01001478 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001479 flags |= ATH9K_TXDESC_LDPC;
1480
Sujith528f0c62008-10-29 10:14:26 +05301481 return flags;
1482}
1483
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001484/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001485 * rix - rate index
1486 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1487 * width - 0 for 20 MHz, 1 for 40 MHz
1488 * half_gi - to use 4us v/s 3.6 us for symbol time
1489 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001490static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301491 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001492{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001493 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001494 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301495
1496 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001497 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001499 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001500 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1501
1502 if (!half_gi)
1503 duration = SYMBOL_TIME(nsymbols);
1504 else
1505 duration = SYMBOL_TIME_HALFGI(nsymbols);
1506
Sujithe63835b2008-11-18 09:07:53 +05301507 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001508 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301509
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001510 return duration;
1511}
1512
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301513u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1514{
1515 struct ath_hw *ah = sc->sc_ah;
1516 struct ath9k_channel *curchan = ah->curchan;
1517 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1518 (curchan->channelFlags & CHANNEL_5GHZ) &&
1519 (chainmask == 0x7) && (rate < 0x90))
1520 return 0x3;
1521 else
1522 return chainmask;
1523}
1524
Felix Fietkau269c44b2010-11-14 15:20:06 +01001525static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001526{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001527 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001528 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301529 struct sk_buff *skb;
1530 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301531 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001532 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301533 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301534 int i, flags = 0;
1535 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301536 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301537
1538 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301539
Sujitha22be222009-03-30 15:28:36 +05301540 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301541 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301542 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301543 hdr = (struct ieee80211_hdr *)skb->data;
1544 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301545
Sujithc89424d2009-01-30 14:29:28 +05301546 /*
1547 * We check if Short Preamble is needed for the CTS rate by
1548 * checking the BSS's global flag.
1549 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1550 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001551 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1552 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301553 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001554 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001555
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001556 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001557 bool is_40, is_sgi, is_sp;
1558 int phy;
1559
Sujithe63835b2008-11-18 09:07:53 +05301560 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001561 continue;
1562
Sujitha8efee42008-11-18 09:07:30 +05301563 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301564 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001565
Felix Fietkau27032052010-01-17 21:08:50 +01001566 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1567 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301568 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001569 flags |= ATH9K_TXDESC_RTSENA;
1570 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1571 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1572 flags |= ATH9K_TXDESC_CTSENA;
1573 }
1574
Sujithc89424d2009-01-30 14:29:28 +05301575 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1576 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1577 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1578 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001579
Felix Fietkau545750d2009-11-23 22:21:01 +01001580 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1581 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1582 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1583
1584 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1585 /* MCS rates */
1586 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301587 series[i].ChSel = ath_txchainmask_reduction(sc,
1588 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001589 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001590 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001591 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1592 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001593 continue;
1594 }
1595
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301596 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001597 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1598 !(rate->flags & IEEE80211_RATE_ERP_G))
1599 phy = WLAN_RC_PHY_CCK;
1600 else
1601 phy = WLAN_RC_PHY_OFDM;
1602
1603 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1604 series[i].Rate = rate->hw_value;
1605 if (rate->hw_value_short) {
1606 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1607 series[i].Rate |= rate->hw_value_short;
1608 } else {
1609 is_sp = false;
1610 }
1611
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301612 if (bf->bf_state.bfs_paprd)
1613 series[i].ChSel = common->tx_chainmask;
1614 else
1615 series[i].ChSel = ath_txchainmask_reduction(sc,
1616 common->tx_chainmask, series[i].Rate);
1617
Felix Fietkau545750d2009-11-23 22:21:01 +01001618 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001619 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001620 }
1621
Felix Fietkau27032052010-01-17 21:08:50 +01001622 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001623 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001624 flags &= ~ATH9K_TXDESC_RTSENA;
1625
1626 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1627 if (flags & ATH9K_TXDESC_RTSENA)
1628 flags &= ~ATH9K_TXDESC_CTSENA;
1629
Sujithe63835b2008-11-18 09:07:53 +05301630 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301631 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1632 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301633 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301634 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301635
Sujith17d79042009-02-09 13:27:03 +05301636 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301637 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001638}
1639
Felix Fietkau82b873a2010-11-11 03:18:37 +01001640static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001641 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001642 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301643{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001644 struct ath_wiphy *aphy = hw->priv;
1645 struct ath_softc *sc = aphy->sc;
Felix Fietkau04caf862010-11-14 15:20:12 +01001646 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001647 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001648 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001649 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001650 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001651 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001652
1653 bf = ath_tx_get_buffer(sc);
1654 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001655 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001656 return NULL;
1657 }
Sujithe8324352009-01-16 21:38:42 +05301658
Sujithe8324352009-01-16 21:38:42 +05301659 ATH_TXBUF_RESET(bf);
1660
Felix Fietkau827e69b2009-11-15 23:09:25 +01001661 bf->aphy = aphy;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001662 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301663 bf->bf_mpdu = skb;
1664
Ben Greearc1739eb32010-10-14 12:45:29 -07001665 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1666 skb->len, DMA_TO_DEVICE);
1667 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301668 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001669 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001670 ath_err(ath9k_hw_common(sc->sc_ah),
1671 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001672 ath_tx_return_buffer(sc, bf);
1673 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301674 }
1675
Sujithe8324352009-01-16 21:38:42 +05301676 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301677
1678 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001679 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301680
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001681 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1682 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301683
1684 ath9k_hw_filltxdesc(ah, ds,
1685 skb->len, /* segment length */
1686 true, /* first segment */
1687 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001688 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001689 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001690 txq->axq_qnum);
1691
1692
1693 return bf;
1694}
1695
1696/* FIXME: tx power */
1697static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1698 struct ath_tx_control *txctl)
1699{
1700 struct sk_buff *skb = bf->bf_mpdu;
1701 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1702 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001703 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001704 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001705 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301706
Sujithe8324352009-01-16 21:38:42 +05301707 spin_lock_bh(&txctl->txq->axq_lock);
1708
Felix Fietkau248a38d2010-12-10 21:16:46 +01001709 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001710 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1711 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001712 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001713
Felix Fietkau066dae92010-11-07 14:59:39 +01001714 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001715 }
1716
1717 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001718 /*
1719 * Try aggregation if it's a unicast data frame
1720 * and the destination is HT capable.
1721 */
1722 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301723 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001724 INIT_LIST_HEAD(&bf_head);
1725 list_add_tail(&bf->list, &bf_head);
1726
Felix Fietkau61117f02010-11-11 03:18:36 +01001727 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001728 bf->bf_state.bfs_paprd = txctl->paprd;
1729
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001730 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001731 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1732 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001733
Felix Fietkau248a38d2010-12-10 21:16:46 +01001734 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301735 }
1736
1737 spin_unlock_bh(&txctl->txq->axq_lock);
1738}
1739
1740/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001741int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301742 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001743{
Felix Fietkau28d16702010-11-14 15:20:10 +01001744 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001746 struct ieee80211_sta *sta = info->control.sta;
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001747 struct ath_wiphy *aphy = hw->priv;
1748 struct ath_softc *sc = aphy->sc;
Felix Fietkau84642d62010-06-01 21:33:13 +02001749 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001750 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001751 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001752 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001753 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001754
Ben Greeara9927ba2010-12-06 21:13:49 -08001755 /* NOTE: sta can be NULL according to net/mac80211.h */
1756 if (sta)
1757 txctl->an = (struct ath_node *)sta->drv_priv;
1758
Felix Fietkau04caf862010-11-14 15:20:12 +01001759 if (info->control.hw_key)
1760 frmlen += info->control.hw_key->icv_len;
1761
Felix Fietkau28d16702010-11-14 15:20:10 +01001762 /*
1763 * As a temporary workaround, assign seq# here; this will likely need
1764 * to be cleaned up to work better with Beacon transmission and virtual
1765 * BSSes.
1766 */
1767 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1768 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1769 sc->tx.seq_no += 0x10;
1770 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1771 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1772 }
1773
1774 /* Add the padding after the header if this is not already done */
1775 padpos = ath9k_cmn_padpos(hdr->frame_control);
1776 padsize = padpos & 3;
1777 if (padsize && skb->len > padpos) {
1778 if (skb_headroom(skb) < padsize)
1779 return -ENOMEM;
1780
1781 skb_push(skb, padsize);
1782 memmove(skb->data, skb->data + padsize, padpos);
1783 }
1784
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001785 setup_frame_info(hw, skb, frmlen);
1786
1787 /*
1788 * At this point, the vif, hw_key and sta pointers in the tx control
1789 * info are no longer valid (overwritten by the ath_frame_info data.
1790 */
1791
1792 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001793 if (unlikely(!bf))
1794 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001795
Felix Fietkau066dae92010-11-07 14:59:39 +01001796 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001797 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001798 if (txq == sc->tx.txq_map[q] &&
1799 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1800 ath_mac80211_stop_queue(sc, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001801 txq->stopped = 1;
1802 }
1803 spin_unlock_bh(&txq->axq_lock);
1804
Sujithe8324352009-01-16 21:38:42 +05301805 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001806
1807 return 0;
1808}
1809
Sujithe8324352009-01-16 21:38:42 +05301810/*****************/
1811/* TX Completion */
1812/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001813
Sujithe8324352009-01-16 21:38:42 +05301814static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau61117f02010-11-11 03:18:36 +01001815 struct ath_wiphy *aphy, int tx_flags, int ftype,
Felix Fietkau066dae92010-11-07 14:59:39 +01001816 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001817{
Sujithe8324352009-01-16 21:38:42 +05301818 struct ieee80211_hw *hw = sc->hw;
1819 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001820 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001821 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001822 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301823
Joe Perches226afe62010-12-02 19:12:37 -08001824 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301825
Felix Fietkau827e69b2009-11-15 23:09:25 +01001826 if (aphy)
1827 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301828
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301829 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301830 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301831
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301832 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301833 /* Frame was ACKed */
1834 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1835 }
1836
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001837 padpos = ath9k_cmn_padpos(hdr->frame_control);
1838 padsize = padpos & 3;
1839 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301840 /*
1841 * Remove MAC header padding before giving the frame back to
1842 * mac80211.
1843 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001844 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301845 skb_pull(skb, padsize);
1846 }
1847
Sujith1b04b932010-01-08 10:36:05 +05301848 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1849 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001850 ath_dbg(common, ATH_DBG_PS,
1851 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301852 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1853 PS_WAIT_FOR_CAB |
1854 PS_WAIT_FOR_PSPOLL_DATA |
1855 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001856 }
1857
Felix Fietkau61117f02010-11-11 03:18:36 +01001858 if (unlikely(ftype))
1859 ath9k_tx_status(hw, skb, ftype);
Felix Fietkau97923b12010-06-12 00:33:55 -04001860 else {
1861 q = skb_get_queue_mapping(skb);
Felix Fietkau066dae92010-11-07 14:59:39 +01001862 if (txq == sc->tx.txq_map[q]) {
1863 spin_lock_bh(&txq->axq_lock);
1864 if (WARN_ON(--txq->pending_frames < 0))
1865 txq->pending_frames = 0;
1866 spin_unlock_bh(&txq->axq_lock);
1867 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001868
Felix Fietkau827e69b2009-11-15 23:09:25 +01001869 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001870 }
Sujithe8324352009-01-16 21:38:42 +05301871}
1872
1873static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001874 struct ath_txq *txq, struct list_head *bf_q,
1875 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301876{
1877 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301878 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301879 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301880
Sujithe8324352009-01-16 21:38:42 +05301881 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301882 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301883
1884 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301885 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301886
1887 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301888 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301889 }
1890
Ben Greearc1739eb32010-10-14 12:45:29 -07001891 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001892 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001893
1894 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001895 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001896 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001897 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001898 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001899 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001900 ath_debug_stat_tx(sc, bf, ts);
Felix Fietkau61117f02010-11-11 03:18:36 +01001901 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1902 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001903 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001904 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1905 * accidentally reference it later.
1906 */
1907 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301908
1909 /*
1910 * Return the list of ath_buf of this mpdu to free queue
1911 */
1912 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1913 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1914 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1915}
1916
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001917static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +01001918 int nframes, int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301919{
Sujitha22be222009-03-30 15:28:36 +05301920 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301921 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301922 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001923 struct ieee80211_hw *hw = bf->aphy->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001924 struct ath_softc *sc = bf->aphy->sc;
1925 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301926 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301927
Sujith95e4acb2009-03-13 08:56:09 +05301928 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001929 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301930
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001931 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301932 WARN_ON(tx_rateindex >= hw->max_rates);
1933
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001934 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301935 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001936 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001937 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301938
Felix Fietkaub572d032010-11-14 15:20:07 +01001939 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001940
Felix Fietkaub572d032010-11-14 15:20:07 +01001941 tx_info->status.ampdu_len = nframes;
1942 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001943 }
1944
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001945 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301946 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001947 /*
1948 * If an underrun error is seen assume it as an excessive
1949 * retry only if max frame trigger level has been reached
1950 * (2 KB for single stream, and 4 KB for dual stream).
1951 * Adjust the long retry as if the frame was tried
1952 * hw->max_rate_tries times to affect how rate control updates
1953 * PER for the failed rate.
1954 * In case of congestion on the bus penalizing this type of
1955 * underruns should help hardware actually transmit new frames
1956 * successfully by eventually preferring slower rates.
1957 * This itself should also alleviate congestion on the bus.
1958 */
1959 if (ieee80211_is_data(hdr->frame_control) &&
1960 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1961 ATH9K_TX_DELIM_UNDERRUN)) &&
1962 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1963 tx_info->status.rates[tx_rateindex].count =
1964 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301965 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301966
Felix Fietkau545750d2009-11-23 22:21:01 +01001967 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301968 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001969 tx_info->status.rates[i].idx = -1;
1970 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301971
Felix Fietkau78c46532010-06-25 01:26:16 +02001972 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301973}
1974
Felix Fietkau066dae92010-11-07 14:59:39 +01001975static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
Sujith059d8062009-01-16 21:38:49 +05301976{
Felix Fietkau066dae92010-11-07 14:59:39 +01001977 struct ath_txq *txq;
Sujith059d8062009-01-16 21:38:49 +05301978
Felix Fietkau066dae92010-11-07 14:59:39 +01001979 txq = sc->tx.txq_map[qnum];
Sujith059d8062009-01-16 21:38:49 +05301980 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001981 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07001982 if (ath_mac80211_start_queue(sc, qnum))
1983 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05301984 }
1985 spin_unlock_bh(&txq->axq_lock);
1986}
1987
Sujithc4288392008-11-18 09:09:30 +05301988static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001989{
Sujithcbe61d82009-02-09 13:27:12 +05301990 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001991 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001992 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1993 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301994 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07001995 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05301996 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001997 int status;
Felix Fietkau066dae92010-11-07 14:59:39 +01001998 int qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001999
Joe Perches226afe62010-12-02 19:12:37 -08002000 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2001 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2002 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002003
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002004 for (;;) {
2005 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002006 if (list_empty(&txq->axq_q)) {
2007 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002008 spin_unlock_bh(&txq->axq_lock);
2009 break;
2010 }
2011 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2012
2013 /*
2014 * There is a race condition that a BH gets scheduled
2015 * after sw writes TxE and before hw re-load the last
2016 * descriptor to get the newly chained one.
2017 * Software must keep the last DONE descriptor as a
2018 * holding descriptor - software does so by marking
2019 * it with the STALE flag.
2020 */
2021 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302022 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002023 bf_held = bf;
2024 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302025 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002026 break;
2027 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002028 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302029 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002030 }
2031 }
2032
2033 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302034 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002035
Felix Fietkau29bffa92010-03-29 20:14:23 -07002036 memset(&ts, 0, sizeof(ts));
2037 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002038 if (status == -EINPROGRESS) {
2039 spin_unlock_bh(&txq->axq_lock);
2040 break;
2041 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002042 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043
2044 /*
2045 * Remove ath_buf's of the same transmit unit from txq,
2046 * however leave the last descriptor back as the holding
2047 * descriptor for hw.
2048 */
Sujitha119cc42009-03-30 15:28:38 +05302049 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002050 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002051 if (!list_is_singular(&lastbf->list))
2052 list_cut_position(&bf_head,
2053 &txq->axq_q, lastbf->list.prev);
2054
2055 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002056 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002057 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002058 if (bf_held)
2059 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002060
2061 if (bf_is_ampdu_not_probing(bf))
2062 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002063 spin_unlock_bh(&txq->axq_lock);
2064
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002065 if (bf_held)
2066 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002067
Sujithcd3d39a2008-08-11 14:03:34 +05302068 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002069 /*
2070 * This frame is sent out as a single frame.
2071 * Use hardware retry status for this frame.
2072 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002073 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302074 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002075 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 }
Johannes Berge6a98542008-10-21 12:40:02 +02002077
Felix Fietkau066dae92010-11-07 14:59:39 +01002078 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2079
Sujithcd3d39a2008-08-11 14:03:34 +05302080 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002081 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2082 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002083 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002084 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085
Felix Fietkau066dae92010-11-07 14:59:39 +01002086 if (txq == sc->tx.txq_map[qnum])
2087 ath_wake_mac80211_queue(sc, qnum);
Sujith059d8062009-01-16 21:38:49 +05302088
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302090 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002091 ath_txq_schedule(sc, txq);
2092 spin_unlock_bh(&txq->axq_lock);
2093 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094}
2095
Sujith305fe472009-07-23 15:32:29 +05302096static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002097{
2098 struct ath_softc *sc = container_of(work, struct ath_softc,
2099 tx_complete_work.work);
2100 struct ath_txq *txq;
2101 int i;
2102 bool needreset = false;
2103
2104 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2105 if (ATH_TXQ_SETUP(sc, i)) {
2106 txq = &sc->tx.txq[i];
2107 spin_lock_bh(&txq->axq_lock);
2108 if (txq->axq_depth) {
2109 if (txq->axq_tx_inprogress) {
2110 needreset = true;
2111 spin_unlock_bh(&txq->axq_lock);
2112 break;
2113 } else {
2114 txq->axq_tx_inprogress = true;
2115 }
2116 }
2117 spin_unlock_bh(&txq->axq_lock);
2118 }
2119
2120 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002121 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2122 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302123 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002124 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302125 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002126 }
2127
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002128 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002129 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2130}
2131
2132
Sujithe8324352009-01-16 21:38:42 +05302133
2134void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135{
Sujithe8324352009-01-16 21:38:42 +05302136 int i;
2137 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138
Sujithe8324352009-01-16 21:38:42 +05302139 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140
2141 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302142 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2143 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002144 }
2145}
2146
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002147void ath_tx_edma_tasklet(struct ath_softc *sc)
2148{
2149 struct ath_tx_status txs;
2150 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2151 struct ath_hw *ah = sc->sc_ah;
2152 struct ath_txq *txq;
2153 struct ath_buf *bf, *lastbf;
2154 struct list_head bf_head;
2155 int status;
2156 int txok;
Felix Fietkau066dae92010-11-07 14:59:39 +01002157 int qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002158
2159 for (;;) {
2160 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2161 if (status == -EINPROGRESS)
2162 break;
2163 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002164 ath_dbg(common, ATH_DBG_XMIT,
2165 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002166 break;
2167 }
2168
2169 /* Skip beacon completions */
2170 if (txs.qid == sc->beacon.beaconq)
2171 continue;
2172
2173 txq = &sc->tx.txq[txs.qid];
2174
2175 spin_lock_bh(&txq->axq_lock);
2176 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2177 spin_unlock_bh(&txq->axq_lock);
2178 return;
2179 }
2180
2181 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2182 struct ath_buf, list);
2183 lastbf = bf->bf_lastbf;
2184
2185 INIT_LIST_HEAD(&bf_head);
2186 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2187 &lastbf->list);
2188 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2189 txq->axq_depth--;
2190 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002191 if (bf_is_ampdu_not_probing(bf))
2192 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002193 spin_unlock_bh(&txq->axq_lock);
2194
2195 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2196
2197 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002198 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2199 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002200 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002201 }
2202
Felix Fietkau066dae92010-11-07 14:59:39 +01002203 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2204
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002205 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002206 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2207 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002208 else
2209 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2210 &txs, txok, 0);
2211
Felix Fietkau066dae92010-11-07 14:59:39 +01002212 if (txq == sc->tx.txq_map[qnum])
2213 ath_wake_mac80211_queue(sc, qnum);
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002214
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002215 spin_lock_bh(&txq->axq_lock);
2216 if (!list_empty(&txq->txq_fifo_pending)) {
2217 INIT_LIST_HEAD(&bf_head);
2218 bf = list_first_entry(&txq->txq_fifo_pending,
2219 struct ath_buf, list);
2220 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2221 &bf->bf_lastbf->list);
2222 ath_tx_txqaddbuf(sc, txq, &bf_head);
2223 } else if (sc->sc_flags & SC_OP_TXAGGR)
2224 ath_txq_schedule(sc, txq);
2225 spin_unlock_bh(&txq->axq_lock);
2226 }
2227}
2228
Sujithe8324352009-01-16 21:38:42 +05302229/*****************/
2230/* Init, Cleanup */
2231/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002232
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002233static int ath_txstatus_setup(struct ath_softc *sc, int size)
2234{
2235 struct ath_descdma *dd = &sc->txsdma;
2236 u8 txs_len = sc->sc_ah->caps.txs_len;
2237
2238 dd->dd_desc_len = size * txs_len;
2239 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2240 &dd->dd_desc_paddr, GFP_KERNEL);
2241 if (!dd->dd_desc)
2242 return -ENOMEM;
2243
2244 return 0;
2245}
2246
2247static int ath_tx_edma_init(struct ath_softc *sc)
2248{
2249 int err;
2250
2251 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2252 if (!err)
2253 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2254 sc->txsdma.dd_desc_paddr,
2255 ATH_TXSTATUS_RING_SIZE);
2256
2257 return err;
2258}
2259
2260static void ath_tx_edma_cleanup(struct ath_softc *sc)
2261{
2262 struct ath_descdma *dd = &sc->txsdma;
2263
2264 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2265 dd->dd_desc_paddr);
2266}
2267
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002268int ath_tx_init(struct ath_softc *sc, int nbufs)
2269{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002270 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002271 int error = 0;
2272
Sujith797fe5cb2009-03-30 15:28:45 +05302273 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002274
Sujith797fe5cb2009-03-30 15:28:45 +05302275 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002276 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302277 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002278 ath_err(common,
2279 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302280 goto err;
2281 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002282
Sujith797fe5cb2009-03-30 15:28:45 +05302283 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002284 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302285 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002286 ath_err(common,
2287 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302288 goto err;
2289 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002290
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002291 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2292
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002293 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2294 error = ath_tx_edma_init(sc);
2295 if (error)
2296 goto err;
2297 }
2298
Sujith797fe5cb2009-03-30 15:28:45 +05302299err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002300 if (error != 0)
2301 ath_tx_cleanup(sc);
2302
2303 return error;
2304}
2305
Sujith797fe5cb2009-03-30 15:28:45 +05302306void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002307{
Sujithb77f4832008-12-07 21:44:03 +05302308 if (sc->beacon.bdma.dd_desc_len != 0)
2309 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002310
Sujithb77f4832008-12-07 21:44:03 +05302311 if (sc->tx.txdma.dd_desc_len != 0)
2312 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002313
2314 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2315 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316}
2317
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002318void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2319{
Sujithc5170162008-10-29 10:13:59 +05302320 struct ath_atx_tid *tid;
2321 struct ath_atx_ac *ac;
2322 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002323
Sujith8ee5afb2008-12-07 21:43:36 +05302324 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302325 tidno < WME_NUM_TID;
2326 tidno++, tid++) {
2327 tid->an = an;
2328 tid->tidno = tidno;
2329 tid->seq_start = tid->seq_next = 0;
2330 tid->baw_size = WME_MAX_BA;
2331 tid->baw_head = tid->baw_tail = 0;
2332 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302333 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302334 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302335 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302336 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302337 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302338 tid->state &= ~AGGR_ADDBA_COMPLETE;
2339 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302340 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002341
Sujith8ee5afb2008-12-07 21:43:36 +05302342 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302343 acno < WME_NUM_AC; acno++, ac++) {
2344 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002345 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302346 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347 }
2348}
2349
Sujithb5aa9bf2008-10-29 10:13:31 +05302350void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002351{
Felix Fietkau2b409942010-07-07 19:42:08 +02002352 struct ath_atx_ac *ac;
2353 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002354 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002355 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302356
Felix Fietkau2b409942010-07-07 19:42:08 +02002357 for (tidno = 0, tid = &an->tid[tidno];
2358 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359
Felix Fietkau2b409942010-07-07 19:42:08 +02002360 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002361 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362
Felix Fietkau2b409942010-07-07 19:42:08 +02002363 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002364
Felix Fietkau2b409942010-07-07 19:42:08 +02002365 if (tid->sched) {
2366 list_del(&tid->list);
2367 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002368 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002369
2370 if (ac->sched) {
2371 list_del(&ac->list);
2372 tid->ac->sched = false;
2373 }
2374
2375 ath_tid_drain(sc, txq, tid);
2376 tid->state &= ~AGGR_ADDBA_COMPLETE;
2377 tid->state &= ~AGGR_CLEANUP;
2378
2379 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380 }
2381}