blob: 20626729795d8df2322df3e20b52055a50ee99cc [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010052 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053053static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070054 struct ath_txq *txq, struct list_head *bf_q,
55 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053056static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020057 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010058static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
300 tbf->bf_mpdu = bf->bf_mpdu;
301 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400302 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530303 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530304
305 return tbf;
306}
307
Felix Fietkaub572d032010-11-14 15:20:07 +0100308static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
309 struct ath_tx_status *ts, int txok,
310 int *nframes, int *nbad)
311{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100312 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100313 u16 seq_st = 0;
314 u32 ba[WME_BA_BMP_SIZE >> 5];
315 int ba_index;
316 int isaggr = 0;
317
318 *nbad = 0;
319 *nframes = 0;
320
Felix Fietkaub572d032010-11-14 15:20:07 +0100321 isaggr = bf_isaggr(bf);
322 if (isaggr) {
323 seq_st = ts->ts_seqnum;
324 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
325 }
326
327 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100328 fi = get_frame_info(bf->bf_mpdu);
329 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100330
331 (*nframes)++;
332 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
333 (*nbad)++;
334
335 bf = bf->bf_next;
336 }
337}
338
339
Sujithd43f30152009-01-16 21:38:53 +0530340static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
341 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100342 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530343{
344 struct ath_node *an = NULL;
345 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530346 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100347 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530348 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800349 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530350 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530351 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530352 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530353 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530354 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530355 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
356 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200357 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100358 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200359 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100360 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200361 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800367
Felix Fietkau78c46532010-06-25 01:26:16 +0200368 memcpy(rates, tx_info->control.rates, sizeof(rates));
369
Sujith1286ec62009-01-27 13:30:37 +0530370 rcu_read_lock();
371
Ben Greear686b9cb2010-09-23 09:44:36 -0700372 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530373 if (!sta) {
374 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200375
Felix Fietkau31e79a52010-07-12 23:16:34 +0200376 INIT_LIST_HEAD(&bf_head);
377 while (bf) {
378 bf_next = bf->bf_next;
379
380 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaufce041b2011-05-19 12:20:25 +0200381 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200382 list_move_tail(&bf->list, &bf_head);
383
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
386 0, 0);
387
388 bf = bf_next;
389 }
Sujith1286ec62009-01-27 13:30:37 +0530390 return;
Sujithe8324352009-01-16 21:38:42 +0530391 }
392
Sujith1286ec62009-01-27 13:30:37 +0530393 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530396
Felix Fietkaub11b1602010-07-11 12:48:44 +0200397 /*
398 * The hardware occasionally sends a tx status for the wrong TID.
399 * In this case, the BA status cannot be considered valid and all
400 * subframes need to be retransmitted
401 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100402 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200403 txok = false;
404
Sujithe8324352009-01-16 21:38:42 +0530405 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530406 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530407
Sujithd43f30152009-01-16 21:38:53 +0530408 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700409 if (ts->ts_flags & ATH9K_TX_BA) {
410 seq_st = ts->ts_seqnum;
411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530412 } else {
Sujithd43f30152009-01-16 21:38:53 +0530413 /*
414 * AR5416 can become deaf/mute when BA
415 * issue happens. Chip needs to be reset.
416 * But AP code may have sychronization issues
417 * when perform internal reset in this routine.
418 * Only enable reset in STA mode for now.
419 */
Sujith2660b812009-02-09 13:27:26 +0530420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530421 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530422 }
423 }
424
425 INIT_LIST_HEAD(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
427
Felix Fietkaub572d032010-11-14 15:20:07 +0100428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530429 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100430 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530431 bf_next = bf->bf_next;
432
Felix Fietkau78c46532010-06-25 01:26:16 +0200433 skb = bf->bf_mpdu;
434 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100435 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200436
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530438 /* transmit completion, subframe is
439 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530440 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530441 } else if (!isaggr && txok) {
442 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530443 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530444 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200445 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530446 /*
447 * cleanup in progress, just fail
448 * the un-acked sub-frames
449 */
450 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530463 }
464 }
465
Felix Fietkaufce041b2011-05-19 12:20:25 +0200466 /*
467 * Make sure the last desc is reclaimed if it
468 * not a holding desc.
469 */
470 if (!bf_last->bf_stale || bf_next != NULL)
Sujithd43f30152009-01-16 21:38:53 +0530471 list_move_tail(&bf->list, &bf_head);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200472 else
473 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530474
Felix Fietkau90fa5392010-09-20 13:45:38 +0200475 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530476 /*
477 * complete the acked-ones/xretried ones; update
478 * block-ack window
479 */
480 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100481 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530482 spin_unlock_bh(&txq->axq_lock);
483
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530484 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200485 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100486 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 rc_update = false;
488 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 }
491
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700492 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
493 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530494 } else {
Sujithd43f30152009-01-16 21:38:53 +0530495 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200496 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400497 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
498 if (bf->bf_next == NULL && bf_last->bf_stale) {
499 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530500
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400501 tbf = ath_clone_txbuf(sc, bf_last);
502 /*
503 * Update tx baw and complete the
504 * frame with failed status if we
505 * run out of tx buf.
506 */
507 if (!tbf) {
508 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100509 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400510 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400511
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 bf->bf_state.bf_type |=
513 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100514 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100515 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400516 ath_tx_complete_buf(sc, bf, txq,
517 &bf_head,
518 ts, 0, 0);
519 break;
520 }
521
522 ath9k_hw_cleartxdesc(sc->sc_ah,
523 tbf->bf_desc);
524 list_add_tail(&tbf->list, &bf_head);
525 } else {
526 /*
527 * Clear descriptor status words for
528 * software retry
529 */
530 ath9k_hw_cleartxdesc(sc->sc_ah,
531 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400532 }
Sujithe8324352009-01-16 21:38:42 +0530533 }
534
535 /*
536 * Put this buffer to the temporary pending
537 * queue to retain ordering
538 */
539 list_splice_tail_init(&bf_head, &bf_pending);
540 }
541
542 bf = bf_next;
543 }
544
Felix Fietkau4cee7862010-07-23 03:53:16 +0200545 /* prepend un-acked frames to the beginning of the pending frame queue */
546 if (!list_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200547 if (an->sleeping)
548 ieee80211_sta_set_tim(sta);
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200551 if (clear_filter)
552 tid->ac->clear_ps_filter = true;
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 list_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600554 if (!an->sleeping)
555 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200556 spin_unlock_bh(&txq->axq_lock);
557 }
558
Sujithe8324352009-01-16 21:38:42 +0530559 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200560 ath_tx_flush_tid(sc, tid);
561
Sujithe8324352009-01-16 21:38:42 +0530562 if (tid->baw_head == tid->baw_tail) {
563 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530564 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530565 }
Sujithe8324352009-01-16 21:38:42 +0530566 }
567
Sujith1286ec62009-01-27 13:30:37 +0530568 rcu_read_unlock();
569
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530570 if (needreset)
Sujithe8324352009-01-16 21:38:42 +0530571 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530572}
573
574static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
575 struct ath_atx_tid *tid)
576{
Sujithe8324352009-01-16 21:38:42 +0530577 struct sk_buff *skb;
578 struct ieee80211_tx_info *tx_info;
579 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530580 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530581 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530582 int i;
583
Sujitha22be222009-03-30 15:28:36 +0530584 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530585 tx_info = IEEE80211_SKB_CB(skb);
586 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530587
588 /*
589 * Find the lowest frame length among the rate series that will have a
590 * 4ms transmit duration.
591 * TODO - TXOP limit needs to be considered.
592 */
593 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
594
595 for (i = 0; i < 4; i++) {
596 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100597 int modeidx;
598 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530599 legacy = 1;
600 break;
601 }
602
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200603 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100604 modeidx = MCS_HT40;
605 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200606 modeidx = MCS_HT20;
607
608 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
609 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100610
611 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530612 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530613 }
614 }
615
616 /*
617 * limit aggregate size by the minimum rate if rate selected is
618 * not a probe rate, if rate selected is a probe rate then
619 * avoid aggregation of this packet.
620 */
621 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
622 return 0;
623
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530624 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
625 aggr_limit = min((max_4ms_framelen * 3) / 8,
626 (u32)ATH_AMPDU_LIMIT_MAX);
627 else
628 aggr_limit = min(max_4ms_framelen,
629 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530630
631 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300632 * h/w can accept aggregates up to 16 bit lengths (65535).
633 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530634 * as zero. Ignore 65536 since we are constrained by hw.
635 */
Sujith4ef70842009-07-23 15:32:41 +0530636 if (tid->an->maxampdu)
637 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530638
639 return aggr_limit;
640}
641
642/*
Sujithd43f30152009-01-16 21:38:53 +0530643 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530644 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530645 */
646static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530647 struct ath_buf *bf, u16 frmlen,
648 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530649{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530650#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530651 struct sk_buff *skb = bf->bf_mpdu;
652 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530653 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530654 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100655 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200656 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100657 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530658
659 /* Select standard number of delimiters based on frame length alone */
660 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
661
662 /*
663 * If encryption enabled, hardware requires some more padding between
664 * subframes.
665 * TODO - this could be improved to be dependent on the rate.
666 * The hardware can keep up at lower rates, but not higher rates
667 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530668 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
669 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530670 ndelim += ATH_AGGR_ENCRYPTDELIM;
671
672 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530673 * Add delimiter when using RTS/CTS with aggregation
674 * and non enterprise AR9003 card
675 */
676 if (first_subfrm)
677 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
678
679 /*
Sujithe8324352009-01-16 21:38:42 +0530680 * Convert desired mpdu density from microeconds to bytes based
681 * on highest rate in rate series (i.e. first rate) to determine
682 * required minimum length for subframe. Take into account
683 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530684 *
Sujithe8324352009-01-16 21:38:42 +0530685 * If there is no mpdu density restriction, no further calculation
686 * is needed.
687 */
Sujith4ef70842009-07-23 15:32:41 +0530688
689 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530690 return ndelim;
691
692 rix = tx_info->control.rates[0].idx;
693 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530694 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
695 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
696
697 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530698 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530699 else
Sujith4ef70842009-07-23 15:32:41 +0530700 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530701
702 if (nsymbols == 0)
703 nsymbols = 1;
704
Felix Fietkauc6663872010-04-19 19:57:33 +0200705 streams = HT_RC_2_STREAMS(rix);
706 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530707 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
708
Sujithe8324352009-01-16 21:38:42 +0530709 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530710 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
711 ndelim = max(mindelim, ndelim);
712 }
713
714 return ndelim;
715}
716
717static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530718 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530719 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100720 struct list_head *bf_q,
721 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530722{
723#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530724 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
725 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530726 u16 aggr_limit = 0, al = 0, bpad = 0,
727 al_delta, h_baw = tid->baw_size / 2;
728 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200729 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100730 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530731
732 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
733
734 do {
735 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100736 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530740 status = ATH_AGGR_BAW_CLOSED;
741 break;
742 }
743
744 if (!rl) {
745 aggr_limit = ath_lookup_rate(sc, bf, tid);
746 rl = 1;
747 }
748
Sujithd43f30152009-01-16 21:38:53 +0530749 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100750 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530751
Sujithd43f30152009-01-16 21:38:53 +0530752 if (nframes &&
753 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
757
Felix Fietkau0299a502010-10-21 02:47:24 +0200758 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
759 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
760 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
761 break;
762
Sujithd43f30152009-01-16 21:38:53 +0530763 /* do not exceed subframe limit */
764 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530765 status = ATH_AGGR_LIMITED;
766 break;
767 }
768
Sujithd43f30152009-01-16 21:38:53 +0530769 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530770 al += bpad + al_delta;
771
772 /*
773 * Get the delimiters needed to meet the MPDU
774 * density for this node.
775 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530776 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
777 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530778 bpad = PADBYTES(al_delta) + (ndelim << 2);
779
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530780 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530781 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400782 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530783
Sujithd43f30152009-01-16 21:38:53 +0530784 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100785 if (!fi->retries)
786 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530787 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
788 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530789 if (bf_prev) {
790 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400791 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
792 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530793 }
794 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530795
Sujithe8324352009-01-16 21:38:42 +0530796 } while (!list_empty(&tid->buf_q));
797
Felix Fietkau269c44b2010-11-14 15:20:06 +0100798 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530799
Sujithe8324352009-01-16 21:38:42 +0530800 return status;
801#undef PADBYTES
802}
803
804static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
805 struct ath_atx_tid *tid)
806{
Sujithd43f30152009-01-16 21:38:53 +0530807 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530808 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100809 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530810 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100811 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530812
813 do {
814 if (list_empty(&tid->buf_q))
815 return;
816
817 INIT_LIST_HEAD(&bf_q);
818
Felix Fietkau269c44b2010-11-14 15:20:06 +0100819 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530820
821 /*
Sujithd43f30152009-01-16 21:38:53 +0530822 * no frames picked up to be aggregated;
823 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530824 */
825 if (list_empty(&bf_q))
826 break;
827
828 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530829 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530830
Felix Fietkau55195412011-04-17 23:28:09 +0200831 if (tid->ac->clear_ps_filter) {
832 tid->ac->clear_ps_filter = false;
833 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
834 }
835
Sujithd43f30152009-01-16 21:38:53 +0530836 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100837 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100838 fi = get_frame_info(bf->bf_mpdu);
839
Sujithe8324352009-01-16 21:38:42 +0530840 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530841 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100842 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200843 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530844 continue;
845 }
846
Sujithd43f30152009-01-16 21:38:53 +0530847 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530848 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100849 ath_buf_set_rate(sc, bf, aggr_len);
850 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530851
Sujithd43f30152009-01-16 21:38:53 +0530852 /* anchor last desc of aggregate */
853 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530854
Felix Fietkaufce041b2011-05-19 12:20:25 +0200855 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530856 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530857
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100858 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530859 status != ATH_AGGR_BAW_CLOSED);
860}
861
Felix Fietkau231c3a12010-09-20 19:35:28 +0200862int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
863 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530864{
865 struct ath_atx_tid *txtid;
866 struct ath_node *an;
867
868 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530869 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200870
871 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
872 return -EAGAIN;
873
Sujithf83da962009-07-23 15:32:37 +0530874 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200875 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700876 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200877
Felix Fietkau2ed72222011-01-10 17:05:49 -0700878 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
879 txtid->baw_head = txtid->baw_tail = 0;
880
Felix Fietkau231c3a12010-09-20 19:35:28 +0200881 return 0;
Sujithe8324352009-01-16 21:38:42 +0530882}
883
Sujithf83da962009-07-23 15:32:37 +0530884void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530885{
886 struct ath_node *an = (struct ath_node *)sta->drv_priv;
887 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100888 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530889
890 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530891 return;
Sujithe8324352009-01-16 21:38:42 +0530892
893 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530894 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530895 return;
Sujithe8324352009-01-16 21:38:42 +0530896 }
897
Sujithe8324352009-01-16 21:38:42 +0530898 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200899 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200900
901 /*
902 * If frames are still being transmitted for this TID, they will be
903 * cleaned up during tx completion. To prevent race conditions, this
904 * TID can only be reused after all in-progress subframes have been
905 * completed.
906 */
907 if (txtid->baw_head != txtid->baw_tail)
908 txtid->state |= AGGR_CLEANUP;
909 else
910 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530911 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530912
Felix Fietkau90fa5392010-09-20 13:45:38 +0200913 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530914}
915
Felix Fietkau55195412011-04-17 23:28:09 +0200916bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
917{
918 struct ath_atx_tid *tid;
919 struct ath_atx_ac *ac;
920 struct ath_txq *txq;
921 bool buffered = false;
922 int tidno;
923
924 for (tidno = 0, tid = &an->tid[tidno];
925 tidno < WME_NUM_TID; tidno++, tid++) {
926
927 if (!tid->sched)
928 continue;
929
930 ac = tid->ac;
931 txq = ac->txq;
932
933 spin_lock_bh(&txq->axq_lock);
934
935 if (!list_empty(&tid->buf_q))
936 buffered = true;
937
938 tid->sched = false;
939 list_del(&tid->list);
940
941 if (ac->sched) {
942 ac->sched = false;
943 list_del(&ac->list);
944 }
945
946 spin_unlock_bh(&txq->axq_lock);
947 }
948
949 return buffered;
950}
951
952void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
953{
954 struct ath_atx_tid *tid;
955 struct ath_atx_ac *ac;
956 struct ath_txq *txq;
957 int tidno;
958
959 for (tidno = 0, tid = &an->tid[tidno];
960 tidno < WME_NUM_TID; tidno++, tid++) {
961
962 ac = tid->ac;
963 txq = ac->txq;
964
965 spin_lock_bh(&txq->axq_lock);
966 ac->clear_ps_filter = true;
967
968 if (!list_empty(&tid->buf_q) && !tid->paused) {
969 ath_tx_queue_tid(txq, tid);
970 ath_txq_schedule(sc, txq);
971 }
972
973 spin_unlock_bh(&txq->axq_lock);
974 }
975}
976
Sujithe8324352009-01-16 21:38:42 +0530977void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
978{
979 struct ath_atx_tid *txtid;
980 struct ath_node *an;
981
982 an = (struct ath_node *)sta->drv_priv;
983
984 if (sc->sc_flags & SC_OP_TXAGGR) {
985 txtid = ATH_AN_2_TID(an, tid);
986 txtid->baw_size =
987 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
988 txtid->state |= AGGR_ADDBA_COMPLETE;
989 txtid->state &= ~AGGR_ADDBA_PROGRESS;
990 ath_tx_resume_tid(sc, txtid);
991 }
992}
993
Sujithe8324352009-01-16 21:38:42 +0530994/********************/
995/* Queue Management */
996/********************/
997
Sujithe8324352009-01-16 21:38:42 +0530998static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
999 struct ath_txq *txq)
1000{
1001 struct ath_atx_ac *ac, *ac_tmp;
1002 struct ath_atx_tid *tid, *tid_tmp;
1003
1004 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1005 list_del(&ac->list);
1006 ac->sched = false;
1007 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1008 list_del(&tid->list);
1009 tid->sched = false;
1010 ath_tid_drain(sc, txq, tid);
1011 }
1012 }
1013}
1014
1015struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1016{
Sujithcbe61d82009-02-09 13:27:12 +05301017 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001018 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301019 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001020 static const int subtype_txq_to_hwq[] = {
1021 [WME_AC_BE] = ATH_TXQ_AC_BE,
1022 [WME_AC_BK] = ATH_TXQ_AC_BK,
1023 [WME_AC_VI] = ATH_TXQ_AC_VI,
1024 [WME_AC_VO] = ATH_TXQ_AC_VO,
1025 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001026 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301027
1028 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001029 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301030 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1031 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1032 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1033 qi.tqi_physCompBuf = 0;
1034
1035 /*
1036 * Enable interrupts only for EOL and DESC conditions.
1037 * We mark tx descriptors to receive a DESC interrupt
1038 * when a tx queue gets deep; otherwise waiting for the
1039 * EOL to reap descriptors. Note that this is done to
1040 * reduce interrupt load and this only defers reaping
1041 * descriptors, never transmitting frames. Aside from
1042 * reducing interrupts this also permits more concurrency.
1043 * The only potential downside is if the tx queue backs
1044 * up in which case the top half of the kernel may backup
1045 * due to a lack of tx descriptors.
1046 *
1047 * The UAPSD queue is an exception, since we take a desc-
1048 * based intr on the EOSP frames.
1049 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001050 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1051 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1052 TXQ_FLAG_TXERRINT_ENABLE;
1053 } else {
1054 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1055 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1056 else
1057 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1058 TXQ_FLAG_TXDESCINT_ENABLE;
1059 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001060 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1061 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301062 /*
1063 * NB: don't print a message, this happens
1064 * normally on parts with too few tx queues
1065 */
1066 return NULL;
1067 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001068 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001069 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001070 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1071 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301072 return NULL;
1073 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001074 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1075 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301076
Ben Greear60f2d1d2011-01-09 23:11:52 -08001077 txq->axq_qnum = axq_qnum;
1078 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301079 txq->axq_link = NULL;
1080 INIT_LIST_HEAD(&txq->axq_q);
1081 INIT_LIST_HEAD(&txq->axq_acq);
1082 spin_lock_init(&txq->axq_lock);
1083 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001084 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001085 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001086 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001087
1088 txq->txq_headidx = txq->txq_tailidx = 0;
1089 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1090 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301091 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001092 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301093}
1094
Sujithe8324352009-01-16 21:38:42 +05301095int ath_txq_update(struct ath_softc *sc, int qnum,
1096 struct ath9k_tx_queue_info *qinfo)
1097{
Sujithcbe61d82009-02-09 13:27:12 +05301098 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301099 int error = 0;
1100 struct ath9k_tx_queue_info qi;
1101
1102 if (qnum == sc->beacon.beaconq) {
1103 /*
1104 * XXX: for beacon queue, we just save the parameter.
1105 * It will be picked up by ath_beaconq_config when
1106 * it's necessary.
1107 */
1108 sc->beacon.beacon_qi = *qinfo;
1109 return 0;
1110 }
1111
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001112 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301113
1114 ath9k_hw_get_txq_props(ah, qnum, &qi);
1115 qi.tqi_aifs = qinfo->tqi_aifs;
1116 qi.tqi_cwmin = qinfo->tqi_cwmin;
1117 qi.tqi_cwmax = qinfo->tqi_cwmax;
1118 qi.tqi_burstTime = qinfo->tqi_burstTime;
1119 qi.tqi_readyTime = qinfo->tqi_readyTime;
1120
1121 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001122 ath_err(ath9k_hw_common(sc->sc_ah),
1123 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301124 error = -EIO;
1125 } else {
1126 ath9k_hw_resettxqueue(ah, qnum);
1127 }
1128
1129 return error;
1130}
1131
1132int ath_cabq_update(struct ath_softc *sc)
1133{
1134 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001135 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301136 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301137
1138 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1139 /*
1140 * Ensure the readytime % is within the bounds.
1141 */
Sujith17d79042009-02-09 13:27:03 +05301142 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1143 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1144 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1145 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301146
Steve Brown9814f6b2011-02-07 17:10:39 -07001147 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301148 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301149 ath_txq_update(sc, qnum, &qi);
1150
1151 return 0;
1152}
1153
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001154static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1155{
1156 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1157 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1158}
1159
Felix Fietkaufce041b2011-05-19 12:20:25 +02001160static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1161 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301162 __releases(txq->axq_lock)
1163 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301164{
1165 struct ath_buf *bf, *lastbf;
1166 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001167 struct ath_tx_status ts;
1168
1169 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301170 INIT_LIST_HEAD(&bf_head);
1171
Felix Fietkaufce041b2011-05-19 12:20:25 +02001172 while (!list_empty(list)) {
1173 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301174
Felix Fietkaufce041b2011-05-19 12:20:25 +02001175 if (bf->bf_stale) {
1176 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301177
Felix Fietkaufce041b2011-05-19 12:20:25 +02001178 ath_tx_return_buffer(sc, bf);
1179 continue;
Sujithe8324352009-01-16 21:38:42 +05301180 }
1181
1182 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001183 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001184
Sujithe8324352009-01-16 21:38:42 +05301185 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001186 if (bf_is_ampdu_not_probing(bf))
1187 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301188
Felix Fietkaufce041b2011-05-19 12:20:25 +02001189 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301190 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001191 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1192 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301193 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001194 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001195 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001196 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001197}
1198
1199/*
1200 * Drain a given TX queue (could be Beacon or Data)
1201 *
1202 * This assumes output has been stopped and
1203 * we do not need to block ath_tx_tasklet.
1204 */
1205void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1206{
1207 spin_lock_bh(&txq->axq_lock);
1208 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1209 int idx = txq->txq_tailidx;
1210
1211 while (!list_empty(&txq->txq_fifo[idx])) {
1212 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1213 retry_tx);
1214
1215 INCR(idx, ATH_TXFIFO_DEPTH);
1216 }
1217 txq->txq_tailidx = idx;
1218 }
1219
1220 txq->axq_link = NULL;
1221 txq->axq_tx_inprogress = false;
1222 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001223
1224 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001225 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1226 ath_txq_drain_pending_buffers(sc, txq);
1227
1228 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301229}
1230
Felix Fietkau080e1a22010-12-05 20:17:53 +01001231bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301232{
Sujithcbe61d82009-02-09 13:27:12 +05301233 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001234 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301235 struct ath_txq *txq;
1236 int i, npend = 0;
1237
1238 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001239 return true;
Sujith043a0402009-01-16 21:38:47 +05301240
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001241 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301242
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001243 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301244 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001245 if (!ATH_TXQ_SETUP(sc, i))
1246 continue;
1247
1248 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301249 }
1250
Felix Fietkau080e1a22010-12-05 20:17:53 +01001251 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001252 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301253
1254 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001255 if (!ATH_TXQ_SETUP(sc, i))
1256 continue;
1257
1258 /*
1259 * The caller will resume queues with ieee80211_wake_queues.
1260 * Mark the queue as not stopped to prevent ath_tx_complete
1261 * from waking the queue too early.
1262 */
1263 txq = &sc->tx.txq[i];
1264 txq->stopped = false;
1265 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301266 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001267
1268 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301269}
1270
Sujithe8324352009-01-16 21:38:42 +05301271void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1272{
1273 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1274 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1275}
1276
Ben Greear7755bad2011-01-18 17:30:00 -08001277/* For each axq_acq entry, for each tid, try to schedule packets
1278 * for transmit until ampdu_depth has reached min Q depth.
1279 */
Sujithe8324352009-01-16 21:38:42 +05301280void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1281{
Ben Greear7755bad2011-01-18 17:30:00 -08001282 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1283 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301284
Felix Fietkau21f28e62011-01-15 14:30:14 +01001285 if (list_empty(&txq->axq_acq) ||
1286 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301287 return;
1288
1289 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001290 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301291
Ben Greear7755bad2011-01-18 17:30:00 -08001292 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1293 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1294 list_del(&ac->list);
1295 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301296
Ben Greear7755bad2011-01-18 17:30:00 -08001297 while (!list_empty(&ac->tid_q)) {
1298 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1299 list);
1300 list_del(&tid->list);
1301 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301302
Ben Greear7755bad2011-01-18 17:30:00 -08001303 if (tid->paused)
1304 continue;
Sujithe8324352009-01-16 21:38:42 +05301305
Ben Greear7755bad2011-01-18 17:30:00 -08001306 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301307
Ben Greear7755bad2011-01-18 17:30:00 -08001308 /*
1309 * add tid to round-robin queue if more frames
1310 * are pending for the tid
1311 */
1312 if (!list_empty(&tid->buf_q))
1313 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301314
Ben Greear7755bad2011-01-18 17:30:00 -08001315 if (tid == last_tid ||
1316 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1317 break;
Sujithe8324352009-01-16 21:38:42 +05301318 }
Ben Greear7755bad2011-01-18 17:30:00 -08001319
1320 if (!list_empty(&ac->tid_q)) {
1321 if (!ac->sched) {
1322 ac->sched = true;
1323 list_add_tail(&ac->list, &txq->axq_acq);
1324 }
1325 }
1326
1327 if (ac == last_ac ||
1328 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1329 return;
Sujithe8324352009-01-16 21:38:42 +05301330 }
1331}
1332
Sujithe8324352009-01-16 21:38:42 +05301333/***********/
1334/* TX, DMA */
1335/***********/
1336
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001337/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001338 * Insert a chain of ath_buf (descriptors) on a txq and
1339 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001340 */
Sujith102e0572008-10-29 10:15:16 +05301341static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001342 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001343{
Sujithcbe61d82009-02-09 13:27:12 +05301344 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001345 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001346 struct ath_buf *bf, *bf_last;
1347 bool puttxbuf = false;
1348 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301349
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001350 /*
1351 * Insert the frame on the outbound list and
1352 * pass it on to the hardware.
1353 */
1354
1355 if (list_empty(head))
1356 return;
1357
Felix Fietkaufce041b2011-05-19 12:20:25 +02001358 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001359 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001360 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001361
Joe Perches226afe62010-12-02 19:12:37 -08001362 ath_dbg(common, ATH_DBG_QUEUE,
1363 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001364
Felix Fietkaufce041b2011-05-19 12:20:25 +02001365 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1366 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001367 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001368 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001369 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001370 list_splice_tail_init(head, &txq->axq_q);
1371
Felix Fietkaufce041b2011-05-19 12:20:25 +02001372 if (txq->axq_link) {
1373 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001374 ath_dbg(common, ATH_DBG_XMIT,
1375 "link[%u] (%p)=%llx (%p)\n",
1376 txq->axq_qnum, txq->axq_link,
1377 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001378 } else if (!edma)
1379 puttxbuf = true;
1380
1381 txq->axq_link = bf_last->bf_desc;
1382 }
1383
1384 if (puttxbuf) {
1385 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1386 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1387 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1388 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1389 }
1390
1391 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001392 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001393 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001395
1396 if (!internal) {
1397 txq->axq_depth++;
1398 if (bf_is_ampdu_not_probing(bf))
1399 txq->axq_ampdu_depth++;
1400 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001401}
1402
Sujithe8324352009-01-16 21:38:42 +05301403static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001404 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301405{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001406 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001407 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301408
Sujithe8324352009-01-16 21:38:42 +05301409 bf->bf_state.bf_type |= BUF_AMPDU;
1410
1411 /*
1412 * Do not queue to h/w when any of the following conditions is true:
1413 * - there are pending frames in software queue
1414 * - the TID is currently paused for ADDBA/BAR request
1415 * - seqno is not within block-ack window
1416 * - h/w queue depth exceeds low water mark
1417 */
1418 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001419 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001420 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001421 /*
Sujithe8324352009-01-16 21:38:42 +05301422 * Add this frame to software queue for scheduling later
1423 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001424 */
Ben Greearbda8add2011-01-09 23:11:48 -08001425 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001426 list_add_tail(&bf->list, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001427 if (!txctl->an || !txctl->an->sleeping)
1428 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301429 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001430 }
1431
Felix Fietkau04caf862010-11-14 15:20:12 +01001432 INIT_LIST_HEAD(&bf_head);
1433 list_add(&bf->list, &bf_head);
1434
Sujithe8324352009-01-16 21:38:42 +05301435 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001436 if (!fi->retries)
1437 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301438
1439 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001440 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301441 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001442 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001443 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301444}
1445
Felix Fietkau82b873a2010-11-11 03:18:37 +01001446static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1447 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001448 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001449{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001450 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301451 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001452
Sujithe8324352009-01-16 21:38:42 +05301453 bf = list_first_entry(bf_head, struct ath_buf, list);
1454 bf->bf_state.bf_type &= ~BUF_AMPDU;
1455
1456 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001457 if (tid)
1458 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301459
Sujithd43f30152009-01-16 21:38:53 +05301460 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001461 fi = get_frame_info(bf->bf_mpdu);
1462 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001463 ath_tx_txqaddbuf(sc, txq, bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301464 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001465}
1466
Sujith528f0c62008-10-29 10:14:26 +05301467static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001468{
Sujith528f0c62008-10-29 10:14:26 +05301469 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001470 enum ath9k_pkt_type htype;
1471 __le16 fc;
1472
Sujith528f0c62008-10-29 10:14:26 +05301473 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001474 fc = hdr->frame_control;
1475
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001476 if (ieee80211_is_beacon(fc))
1477 htype = ATH9K_PKT_TYPE_BEACON;
1478 else if (ieee80211_is_probe_resp(fc))
1479 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1480 else if (ieee80211_is_atim(fc))
1481 htype = ATH9K_PKT_TYPE_ATIM;
1482 else if (ieee80211_is_pspoll(fc))
1483 htype = ATH9K_PKT_TYPE_PSPOLL;
1484 else
1485 htype = ATH9K_PKT_TYPE_NORMAL;
1486
1487 return htype;
1488}
1489
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001490static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1491 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301492{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001493 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301494 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001495 struct ieee80211_sta *sta = tx_info->control.sta;
1496 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301497 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001498 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001499 struct ath_node *an = NULL;
Sujith528f0c62008-10-29 10:14:26 +05301500 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001501 enum ath9k_key_type keytype;
1502 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001503 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301504
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001505 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301506
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001507 if (sta)
1508 an = (struct ath_node *) sta->drv_priv;
1509
Sujith528f0c62008-10-29 10:14:26 +05301510 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001511 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001512 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001513
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001514 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1515
1516 /*
1517 * Override seqno set by upper layer with the one
1518 * in tx aggregation state.
1519 */
1520 tid = ATH_AN_2_TID(an, tidno);
1521 seqno = tid->seq_next;
1522 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1523 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1524 }
1525
1526 memset(fi, 0, sizeof(*fi));
1527 if (hw_key)
1528 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001529 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1530 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001531 else
1532 fi->keyix = ATH9K_TXKEYIX_INVALID;
1533 fi->keytype = keytype;
1534 fi->framelen = framelen;
1535 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301536}
1537
Felix Fietkau82b873a2010-11-11 03:18:37 +01001538static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301539{
1540 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1541 int flags = 0;
1542
Sujith528f0c62008-10-29 10:14:26 +05301543 flags |= ATH9K_TXDESC_INTREQ;
1544
1545 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1546 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301547
Felix Fietkau82b873a2010-11-11 03:18:37 +01001548 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001549 flags |= ATH9K_TXDESC_LDPC;
1550
Sujith528f0c62008-10-29 10:14:26 +05301551 return flags;
1552}
1553
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001554/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001555 * rix - rate index
1556 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1557 * width - 0 for 20 MHz, 1 for 40 MHz
1558 * half_gi - to use 4us v/s 3.6 us for symbol time
1559 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001560static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301561 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001562{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001563 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001564 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301565
1566 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001567 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001568 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001569 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001570 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1571
1572 if (!half_gi)
1573 duration = SYMBOL_TIME(nsymbols);
1574 else
1575 duration = SYMBOL_TIME_HALFGI(nsymbols);
1576
Sujithe63835b2008-11-18 09:07:53 +05301577 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001578 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301579
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580 return duration;
1581}
1582
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301583u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1584{
1585 struct ath_hw *ah = sc->sc_ah;
1586 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301587 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1588 (curchan->channelFlags & CHANNEL_5GHZ) &&
1589 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301590 return 0x3;
1591 else
1592 return chainmask;
1593}
1594
Felix Fietkau269c44b2010-11-14 15:20:06 +01001595static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001596{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001597 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001598 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301599 struct sk_buff *skb;
1600 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301601 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001602 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301603 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301604 int i, flags = 0;
1605 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301606 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301607
1608 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301609
Sujitha22be222009-03-30 15:28:36 +05301610 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301611 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301612 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301613 hdr = (struct ieee80211_hdr *)skb->data;
1614 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301615
Sujithc89424d2009-01-30 14:29:28 +05301616 /*
1617 * We check if Short Preamble is needed for the CTS rate by
1618 * checking the BSS's global flag.
1619 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1620 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001621 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1622 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301623 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001624 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001625
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001626 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001627 bool is_40, is_sgi, is_sp;
1628 int phy;
1629
Sujithe63835b2008-11-18 09:07:53 +05301630 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001631 continue;
1632
Sujitha8efee42008-11-18 09:07:30 +05301633 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301634 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001635
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301636 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301637 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001638 flags |= ATH9K_TXDESC_RTSENA;
1639 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1640 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1641 flags |= ATH9K_TXDESC_CTSENA;
1642 }
1643
Sujithc89424d2009-01-30 14:29:28 +05301644 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1645 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1646 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1647 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001648
Felix Fietkau545750d2009-11-23 22:21:01 +01001649 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1650 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1651 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1652
1653 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1654 /* MCS rates */
1655 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301656 series[i].ChSel = ath_txchainmask_reduction(sc,
1657 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001658 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001659 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001660 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1661 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001662 continue;
1663 }
1664
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301665 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001666 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1667 !(rate->flags & IEEE80211_RATE_ERP_G))
1668 phy = WLAN_RC_PHY_CCK;
1669 else
1670 phy = WLAN_RC_PHY_OFDM;
1671
1672 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1673 series[i].Rate = rate->hw_value;
1674 if (rate->hw_value_short) {
1675 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1676 series[i].Rate |= rate->hw_value_short;
1677 } else {
1678 is_sp = false;
1679 }
1680
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301681 if (bf->bf_state.bfs_paprd)
1682 series[i].ChSel = common->tx_chainmask;
1683 else
1684 series[i].ChSel = ath_txchainmask_reduction(sc,
1685 common->tx_chainmask, series[i].Rate);
1686
Felix Fietkau545750d2009-11-23 22:21:01 +01001687 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001688 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001689 }
1690
Felix Fietkau27032052010-01-17 21:08:50 +01001691 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001692 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001693 flags &= ~ATH9K_TXDESC_RTSENA;
1694
1695 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1696 if (flags & ATH9K_TXDESC_RTSENA)
1697 flags &= ~ATH9K_TXDESC_CTSENA;
1698
Sujithe63835b2008-11-18 09:07:53 +05301699 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301700 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1701 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301702 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301703 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301704
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001705}
1706
Felix Fietkau82b873a2010-11-11 03:18:37 +01001707static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001708 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001709 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301710{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001711 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001712 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001713 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001714 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001715 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001716 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001717 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001718
1719 bf = ath_tx_get_buffer(sc);
1720 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001721 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001722 return NULL;
1723 }
Sujithe8324352009-01-16 21:38:42 +05301724
Sujithe8324352009-01-16 21:38:42 +05301725 ATH_TXBUF_RESET(bf);
1726
Felix Fietkau82b873a2010-11-11 03:18:37 +01001727 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301728 bf->bf_mpdu = skb;
1729
Ben Greearc1739eb32010-10-14 12:45:29 -07001730 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1731 skb->len, DMA_TO_DEVICE);
1732 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301733 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001734 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001735 ath_err(ath9k_hw_common(sc->sc_ah),
1736 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001737 ath_tx_return_buffer(sc, bf);
1738 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301739 }
1740
Sujithe8324352009-01-16 21:38:42 +05301741 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301742
1743 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001744 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301745
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001746 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1747 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301748
1749 ath9k_hw_filltxdesc(ah, ds,
1750 skb->len, /* segment length */
1751 true, /* first segment */
1752 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001753 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001754 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001755 txq->axq_qnum);
1756
1757
1758 return bf;
1759}
1760
1761/* FIXME: tx power */
1762static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1763 struct ath_tx_control *txctl)
1764{
1765 struct sk_buff *skb = bf->bf_mpdu;
1766 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001768 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001769 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001770 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301771
Sujithe8324352009-01-16 21:38:42 +05301772 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301773 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1774 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001775 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1776 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001777 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001778
Felix Fietkau066dae92010-11-07 14:59:39 +01001779 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001780 }
1781
1782 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001783 /*
1784 * Try aggregation if it's a unicast data frame
1785 * and the destination is HT capable.
1786 */
1787 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301788 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001789 INIT_LIST_HEAD(&bf_head);
1790 list_add_tail(&bf->list, &bf_head);
1791
Felix Fietkau82b873a2010-11-11 03:18:37 +01001792 bf->bf_state.bfs_paprd = txctl->paprd;
1793
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001794 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001795 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1796 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001797
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301798 if (txctl->paprd)
1799 bf->bf_state.bfs_paprd_timestamp = jiffies;
1800
Felix Fietkau55195412011-04-17 23:28:09 +02001801 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1802 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1803
Felix Fietkau248a38d2010-12-10 21:16:46 +01001804 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301805 }
1806
1807 spin_unlock_bh(&txctl->txq->axq_lock);
1808}
1809
1810/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001811int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301812 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001813{
Felix Fietkau28d16702010-11-14 15:20:10 +01001814 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1815 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001816 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001817 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001818 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001819 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001820 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001821 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001822 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001823 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001824
Ben Greeara9927ba2010-12-06 21:13:49 -08001825 /* NOTE: sta can be NULL according to net/mac80211.h */
1826 if (sta)
1827 txctl->an = (struct ath_node *)sta->drv_priv;
1828
Felix Fietkau04caf862010-11-14 15:20:12 +01001829 if (info->control.hw_key)
1830 frmlen += info->control.hw_key->icv_len;
1831
Felix Fietkau28d16702010-11-14 15:20:10 +01001832 /*
1833 * As a temporary workaround, assign seq# here; this will likely need
1834 * to be cleaned up to work better with Beacon transmission and virtual
1835 * BSSes.
1836 */
1837 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1838 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1839 sc->tx.seq_no += 0x10;
1840 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1841 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1842 }
1843
1844 /* Add the padding after the header if this is not already done */
1845 padpos = ath9k_cmn_padpos(hdr->frame_control);
1846 padsize = padpos & 3;
1847 if (padsize && skb->len > padpos) {
1848 if (skb_headroom(skb) < padsize)
1849 return -ENOMEM;
1850
1851 skb_push(skb, padsize);
1852 memmove(skb->data, skb->data + padsize, padpos);
1853 }
1854
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001855 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1856 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1857 !ieee80211_is_data(hdr->frame_control))
1858 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1859
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001860 setup_frame_info(hw, skb, frmlen);
1861
1862 /*
1863 * At this point, the vif, hw_key and sta pointers in the tx control
1864 * info are no longer valid (overwritten by the ath_frame_info data.
1865 */
1866
1867 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001868 if (unlikely(!bf))
1869 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001870
Felix Fietkau066dae92010-11-07 14:59:39 +01001871 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001872 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001873 if (txq == sc->tx.txq_map[q] &&
1874 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001875 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001876 txq->stopped = 1;
1877 }
1878 spin_unlock_bh(&txq->axq_lock);
1879
Sujithe8324352009-01-16 21:38:42 +05301880 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001881
1882 return 0;
1883}
1884
Sujithe8324352009-01-16 21:38:42 +05301885/*****************/
1886/* TX Completion */
1887/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001888
Sujithe8324352009-01-16 21:38:42 +05301889static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301890 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001891{
Sujithe8324352009-01-16 21:38:42 +05301892 struct ieee80211_hw *hw = sc->hw;
1893 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001894 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001895 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001896 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301897
Joe Perches226afe62010-12-02 19:12:37 -08001898 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301899
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301900 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301901 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301902
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301903 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301904 /* Frame was ACKed */
1905 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1906 }
1907
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001908 padpos = ath9k_cmn_padpos(hdr->frame_control);
1909 padsize = padpos & 3;
1910 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301911 /*
1912 * Remove MAC header padding before giving the frame back to
1913 * mac80211.
1914 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001915 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301916 skb_pull(skb, padsize);
1917 }
1918
Sujith1b04b932010-01-08 10:36:05 +05301919 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1920 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001921 ath_dbg(common, ATH_DBG_PS,
1922 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301923 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1924 PS_WAIT_FOR_CAB |
1925 PS_WAIT_FOR_PSPOLL_DATA |
1926 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001927 }
1928
Felix Fietkau7545daf2011-01-24 19:23:16 +01001929 q = skb_get_queue_mapping(skb);
1930 if (txq == sc->tx.txq_map[q]) {
1931 spin_lock_bh(&txq->axq_lock);
1932 if (WARN_ON(--txq->pending_frames < 0))
1933 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001934
Felix Fietkau7545daf2011-01-24 19:23:16 +01001935 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1936 ieee80211_wake_queue(sc->hw, q);
1937 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001938 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001939 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001940 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001941
1942 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301943}
1944
1945static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001946 struct ath_txq *txq, struct list_head *bf_q,
1947 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301948{
1949 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301950 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301951 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301952
Sujithe8324352009-01-16 21:38:42 +05301953 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301954 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301955
1956 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301957 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301958
1959 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301960 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301961 }
1962
Ben Greearc1739eb32010-10-14 12:45:29 -07001963 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001964 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001965
1966 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301967 if (time_after(jiffies,
1968 bf->bf_state.bfs_paprd_timestamp +
1969 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001970 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001971 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001972 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001973 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001974 ath_debug_stat_tx(sc, bf, ts, txq);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301975 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001976 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001977 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1978 * accidentally reference it later.
1979 */
1980 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301981
1982 /*
1983 * Return the list of ath_buf of this mpdu to free queue
1984 */
1985 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1986 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1987 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1988}
1989
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001990static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1991 struct ath_tx_status *ts, int nframes, int nbad,
1992 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301993{
Sujitha22be222009-03-30 15:28:36 +05301994 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301995 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301996 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001997 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001998 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301999 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302000
Sujith95e4acb2009-03-13 08:56:09 +05302001 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002002 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302003
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002004 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302005 WARN_ON(tx_rateindex >= hw->max_rates);
2006
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002007 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302008 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002009 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002010 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302011
Felix Fietkaub572d032010-11-14 15:20:07 +01002012 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002013
Felix Fietkaub572d032010-11-14 15:20:07 +01002014 tx_info->status.ampdu_len = nframes;
2015 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002016 }
2017
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002018 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302019 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002020 /*
2021 * If an underrun error is seen assume it as an excessive
2022 * retry only if max frame trigger level has been reached
2023 * (2 KB for single stream, and 4 KB for dual stream).
2024 * Adjust the long retry as if the frame was tried
2025 * hw->max_rate_tries times to affect how rate control updates
2026 * PER for the failed rate.
2027 * In case of congestion on the bus penalizing this type of
2028 * underruns should help hardware actually transmit new frames
2029 * successfully by eventually preferring slower rates.
2030 * This itself should also alleviate congestion on the bus.
2031 */
2032 if (ieee80211_is_data(hdr->frame_control) &&
2033 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2034 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002035 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002036 tx_info->status.rates[tx_rateindex].count =
2037 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302038 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302039
Felix Fietkau545750d2009-11-23 22:21:01 +01002040 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302041 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002042 tx_info->status.rates[i].idx = -1;
2043 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302044
Felix Fietkau78c46532010-06-25 01:26:16 +02002045 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302046}
2047
Felix Fietkaufce041b2011-05-19 12:20:25 +02002048static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2049 struct ath_tx_status *ts, struct ath_buf *bf,
2050 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302051 __releases(txq->axq_lock)
2052 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002053{
2054 int txok;
2055
2056 txq->axq_depth--;
2057 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2058 txq->axq_tx_inprogress = false;
2059 if (bf_is_ampdu_not_probing(bf))
2060 txq->axq_ampdu_depth--;
2061
2062 spin_unlock_bh(&txq->axq_lock);
2063
2064 if (!bf_isampdu(bf)) {
2065 /*
2066 * This frame is sent out as a single frame.
2067 * Use hardware retry status for this frame.
2068 */
2069 if (ts->ts_status & ATH9K_TXERR_XRETRY)
2070 bf->bf_state.bf_type |= BUF_XRETRY;
2071 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2072 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2073 } else
2074 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2075
2076 spin_lock_bh(&txq->axq_lock);
2077
2078 if (sc->sc_flags & SC_OP_TXAGGR)
2079 ath_txq_schedule(sc, txq);
2080}
2081
Sujithc4288392008-11-18 09:09:30 +05302082static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002083{
Sujithcbe61d82009-02-09 13:27:12 +05302084 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002085 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2087 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302088 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002089 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 int status;
2091
Joe Perches226afe62010-12-02 19:12:37 -08002092 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2093 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2094 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002095
Felix Fietkaufce041b2011-05-19 12:20:25 +02002096 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002097 for (;;) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002098 if (list_empty(&txq->axq_q)) {
2099 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002100 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002101 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002102 break;
2103 }
2104 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2105
2106 /*
2107 * There is a race condition that a BH gets scheduled
2108 * after sw writes TxE and before hw re-load the last
2109 * descriptor to get the newly chained one.
2110 * Software must keep the last DONE descriptor as a
2111 * holding descriptor - software does so by marking
2112 * it with the STALE flag.
2113 */
2114 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302115 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002117 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002118 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002119
2120 bf = list_entry(bf_held->list.next, struct ath_buf,
2121 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002122 }
2123
2124 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302125 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002126
Felix Fietkau29bffa92010-03-29 20:14:23 -07002127 memset(&ts, 0, sizeof(ts));
2128 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002129 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002130 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002131
Ben Greear2dac4fb2011-01-09 23:11:45 -08002132 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133
2134 /*
2135 * Remove ath_buf's of the same transmit unit from txq,
2136 * however leave the last descriptor back as the holding
2137 * descriptor for hw.
2138 */
Sujitha119cc42009-03-30 15:28:38 +05302139 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141 if (!list_is_singular(&lastbf->list))
2142 list_cut_position(&bf_head,
2143 &txq->axq_q, lastbf->list.prev);
2144
Felix Fietkaufce041b2011-05-19 12:20:25 +02002145 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002146 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002147 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 }
Johannes Berge6a98542008-10-21 12:40:02 +02002149
Felix Fietkaufce041b2011-05-19 12:20:25 +02002150 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002152 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002153}
2154
Sujith305fe472009-07-23 15:32:29 +05302155static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002156{
2157 struct ath_softc *sc = container_of(work, struct ath_softc,
2158 tx_complete_work.work);
2159 struct ath_txq *txq;
2160 int i;
2161 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002162#ifdef CONFIG_ATH9K_DEBUGFS
2163 sc->tx_complete_poll_work_seen++;
2164#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002165
2166 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2167 if (ATH_TXQ_SETUP(sc, i)) {
2168 txq = &sc->tx.txq[i];
2169 spin_lock_bh(&txq->axq_lock);
2170 if (txq->axq_depth) {
2171 if (txq->axq_tx_inprogress) {
2172 needreset = true;
2173 spin_unlock_bh(&txq->axq_lock);
2174 break;
2175 } else {
2176 txq->axq_tx_inprogress = true;
2177 }
2178 }
2179 spin_unlock_bh(&txq->axq_lock);
2180 }
2181
2182 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002183 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2184 "tx hung, resetting the chip\n");
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302185 spin_lock_bh(&sc->sc_pcu_lock);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002186 ath_reset(sc, true);
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302187 spin_unlock_bh(&sc->sc_pcu_lock);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002188 }
2189
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002190 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002191 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2192}
2193
2194
Sujithe8324352009-01-16 21:38:42 +05302195
2196void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002197{
Sujithe8324352009-01-16 21:38:42 +05302198 int i;
2199 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002200
Sujithe8324352009-01-16 21:38:42 +05302201 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002202
2203 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302204 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2205 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002206 }
2207}
2208
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002209void ath_tx_edma_tasklet(struct ath_softc *sc)
2210{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002211 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002212 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2213 struct ath_hw *ah = sc->sc_ah;
2214 struct ath_txq *txq;
2215 struct ath_buf *bf, *lastbf;
2216 struct list_head bf_head;
2217 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002218
2219 for (;;) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002220 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002221 if (status == -EINPROGRESS)
2222 break;
2223 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002224 ath_dbg(common, ATH_DBG_XMIT,
2225 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002226 break;
2227 }
2228
2229 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002230 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002231 continue;
2232
Felix Fietkaufce041b2011-05-19 12:20:25 +02002233 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002234
2235 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002236
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002237 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2238 spin_unlock_bh(&txq->axq_lock);
2239 return;
2240 }
2241
2242 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2243 struct ath_buf, list);
2244 lastbf = bf->bf_lastbf;
2245
2246 INIT_LIST_HEAD(&bf_head);
2247 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2248 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249
Felix Fietkaufce041b2011-05-19 12:20:25 +02002250 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2251 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002252
Felix Fietkaufce041b2011-05-19 12:20:25 +02002253 if (!list_empty(&txq->axq_q)) {
2254 struct list_head bf_q;
2255
2256 INIT_LIST_HEAD(&bf_q);
2257 txq->axq_link = NULL;
2258 list_splice_tail_init(&txq->axq_q, &bf_q);
2259 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2260 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261 }
2262
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 spin_unlock_bh(&txq->axq_lock);
2265 }
2266}
2267
Sujithe8324352009-01-16 21:38:42 +05302268/*****************/
2269/* Init, Cleanup */
2270/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002271
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002272static int ath_txstatus_setup(struct ath_softc *sc, int size)
2273{
2274 struct ath_descdma *dd = &sc->txsdma;
2275 u8 txs_len = sc->sc_ah->caps.txs_len;
2276
2277 dd->dd_desc_len = size * txs_len;
2278 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2279 &dd->dd_desc_paddr, GFP_KERNEL);
2280 if (!dd->dd_desc)
2281 return -ENOMEM;
2282
2283 return 0;
2284}
2285
2286static int ath_tx_edma_init(struct ath_softc *sc)
2287{
2288 int err;
2289
2290 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2291 if (!err)
2292 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2293 sc->txsdma.dd_desc_paddr,
2294 ATH_TXSTATUS_RING_SIZE);
2295
2296 return err;
2297}
2298
2299static void ath_tx_edma_cleanup(struct ath_softc *sc)
2300{
2301 struct ath_descdma *dd = &sc->txsdma;
2302
2303 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2304 dd->dd_desc_paddr);
2305}
2306
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002307int ath_tx_init(struct ath_softc *sc, int nbufs)
2308{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002309 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002310 int error = 0;
2311
Sujith797fe5cb2009-03-30 15:28:45 +05302312 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002313
Sujith797fe5cb2009-03-30 15:28:45 +05302314 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002315 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302316 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002317 ath_err(common,
2318 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302319 goto err;
2320 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002321
Sujith797fe5cb2009-03-30 15:28:45 +05302322 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002323 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302324 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002325 ath_err(common,
2326 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302327 goto err;
2328 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002329
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002330 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2331
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002332 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2333 error = ath_tx_edma_init(sc);
2334 if (error)
2335 goto err;
2336 }
2337
Sujith797fe5cb2009-03-30 15:28:45 +05302338err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002339 if (error != 0)
2340 ath_tx_cleanup(sc);
2341
2342 return error;
2343}
2344
Sujith797fe5cb2009-03-30 15:28:45 +05302345void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002346{
Sujithb77f4832008-12-07 21:44:03 +05302347 if (sc->beacon.bdma.dd_desc_len != 0)
2348 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349
Sujithb77f4832008-12-07 21:44:03 +05302350 if (sc->tx.txdma.dd_desc_len != 0)
2351 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002352
2353 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2354 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355}
2356
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2358{
Sujithc5170162008-10-29 10:13:59 +05302359 struct ath_atx_tid *tid;
2360 struct ath_atx_ac *ac;
2361 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362
Sujith8ee5afb2008-12-07 21:43:36 +05302363 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302364 tidno < WME_NUM_TID;
2365 tidno++, tid++) {
2366 tid->an = an;
2367 tid->tidno = tidno;
2368 tid->seq_start = tid->seq_next = 0;
2369 tid->baw_size = WME_MAX_BA;
2370 tid->baw_head = tid->baw_tail = 0;
2371 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302372 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302373 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302374 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302375 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302376 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302377 tid->state &= ~AGGR_ADDBA_COMPLETE;
2378 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302379 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380
Sujith8ee5afb2008-12-07 21:43:36 +05302381 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302382 acno < WME_NUM_AC; acno++, ac++) {
2383 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002384 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302385 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002386 }
2387}
2388
Sujithb5aa9bf2008-10-29 10:13:31 +05302389void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002390{
Felix Fietkau2b409942010-07-07 19:42:08 +02002391 struct ath_atx_ac *ac;
2392 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002394 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302395
Felix Fietkau2b409942010-07-07 19:42:08 +02002396 for (tidno = 0, tid = &an->tid[tidno];
2397 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002398
Felix Fietkau2b409942010-07-07 19:42:08 +02002399 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002400 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401
Felix Fietkau2b409942010-07-07 19:42:08 +02002402 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002403
Felix Fietkau2b409942010-07-07 19:42:08 +02002404 if (tid->sched) {
2405 list_del(&tid->list);
2406 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002408
2409 if (ac->sched) {
2410 list_del(&ac->list);
2411 tid->ac->sched = false;
2412 }
2413
2414 ath_tid_drain(sc, txq, tid);
2415 tid->state &= ~AGGR_ADDBA_COMPLETE;
2416 tid->state &= ~AGGR_CLEANUP;
2417
2418 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002419 }
2420}