blob: 68066c56e4e57c2c1938a1b2ca59fa4b02d04747 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010060static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int nframes, int nbad,
62 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020063static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
64 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020065static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
66 struct ath_txq *txq,
67 struct ath_atx_tid *tid,
68 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053069
Felix Fietkau545750d2009-11-23 22:21:01 +010070enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071 MCS_HT20,
72 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010073 MCS_HT40,
74 MCS_HT40_SGI,
75};
76
Felix Fietkau0e668cd2010-04-19 19:57:32 +020077static int ath_max_4ms_framelen[4][32] = {
78 [MCS_HT20] = {
79 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
80 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
81 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
82 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
83 },
84 [MCS_HT20_SGI] = {
85 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
86 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
87 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
88 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
92 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
93 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
94 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 },
96 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020097 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
98 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
99 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
100 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100101 }
102};
103
Sujithe8324352009-01-16 21:38:42 +0530104/*********************/
105/* Aggregation logic */
106/*********************/
107
Sujithe8324352009-01-16 21:38:42 +0530108static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
109{
110 struct ath_atx_ac *ac = tid->ac;
111
112 if (tid->paused)
113 return;
114
115 if (tid->sched)
116 return;
117
118 tid->sched = true;
119 list_add_tail(&tid->list, &ac->tid_q);
120
121 if (ac->sched)
122 return;
123
124 ac->sched = true;
125 list_add_tail(&ac->list, &txq->axq_acq);
126}
127
Sujithe8324352009-01-16 21:38:42 +0530128static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
129{
Felix Fietkau066dae92010-11-07 14:59:39 +0100130 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530131
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 WARN_ON(!tid->paused);
133
Sujithe8324352009-01-16 21:38:42 +0530134 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200135 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530136
Felix Fietkau56dc6332011-08-28 00:32:22 +0200137 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530138 goto unlock;
139
140 ath_tx_queue_tid(txq, tid);
141 ath_txq_schedule(sc, txq);
142unlock:
143 spin_unlock_bh(&txq->axq_lock);
144}
145
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100146static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100147{
148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100149 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
150 sizeof(tx_info->rate_driver_data));
151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100152}
153
Sujithe8324352009-01-16 21:38:42 +0530154static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
155{
Felix Fietkau066dae92010-11-07 14:59:39 +0100156 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200157 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530158 struct ath_buf *bf;
159 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200160 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100161 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200162
Sujithe8324352009-01-16 21:38:42 +0530163 INIT_LIST_HEAD(&bf_head);
164
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530166 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530167
Felix Fietkau56dc6332011-08-28 00:32:22 +0200168 while ((skb = __skb_dequeue(&tid->buf_q))) {
169 fi = get_frame_info(skb);
170 bf = fi->bf;
171
Felix Fietkaue1566d12010-11-20 03:08:46 +0100172 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200173 if (bf && fi->retries) {
174 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200175 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200177 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200178 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200179 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100180 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530181 }
182
183 spin_unlock_bh(&txq->axq_lock);
184}
185
186static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 int seqno)
188{
189 int index, cindex;
190
191 index = ATH_BA_INDEX(tid->seq_start, seqno);
192 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
193
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200194 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530195
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530197 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
198 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
199 }
200}
201
202static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100203 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530204{
205 int index, cindex;
206
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100207 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530208 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200209 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530210
211 if (index >= ((tid->baw_tail - tid->baw_head) &
212 (ATH_TID_MAX_BUFS - 1))) {
213 tid->baw_tail = cindex;
214 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
215 }
216}
217
218/*
219 * TODO: For frame(s) that are in the retry state, we will reuse the
220 * sequence number(s) without setting the retry bit. The
221 * alternative is to give up on these and BAR the receiver's window
222 * forward.
223 */
224static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
225 struct ath_atx_tid *tid)
226
227{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200228 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530229 struct ath_buf *bf;
230 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700231 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100232 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233
234 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530235 INIT_LIST_HEAD(&bf_head);
236
Felix Fietkau56dc6332011-08-28 00:32:22 +0200237 while ((skb = __skb_dequeue(&tid->buf_q))) {
238 fi = get_frame_info(skb);
239 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530240
Felix Fietkau44f1d262011-08-28 00:32:25 +0200241 if (!bf) {
242 spin_unlock(&txq->axq_lock);
243 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
244 spin_lock(&txq->axq_lock);
245 continue;
246 }
247
Felix Fietkau56dc6332011-08-28 00:32:22 +0200248 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530249
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200251 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530252
253 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700254 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530255 spin_lock(&txq->axq_lock);
256 }
257
258 tid->seq_next = tid->seq_start;
259 tid->baw_tail = tid->baw_head;
260}
261
Sujithfec247c2009-07-27 12:08:16 +0530262static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100263 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530264{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100265 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
274}
275
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200276static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
277{
278 struct ath_buf *bf = NULL;
279
280 spin_lock_bh(&sc->tx.txbuflock);
281
282 if (unlikely(list_empty(&sc->tx.txbuf))) {
283 spin_unlock_bh(&sc->tx.txbuflock);
284 return NULL;
285 }
286
287 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
288 list_del(&bf->list);
289
290 spin_unlock_bh(&sc->tx.txbuflock);
291
292 return bf;
293}
294
295static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
296{
297 spin_lock_bh(&sc->tx.txbuflock);
298 list_add_tail(&bf->list, &sc->tx.txbuf);
299 spin_unlock_bh(&sc->tx.txbuflock);
300}
301
Sujithd43f30152009-01-16 21:38:53 +0530302static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
303{
304 struct ath_buf *tbf;
305
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200306 tbf = ath_tx_get_buffer(sc);
307 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530308 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530309
310 ATH_TXBUF_RESET(tbf);
311
312 tbf->bf_mpdu = bf->bf_mpdu;
313 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400314 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530315 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530316
317 return tbf;
318}
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
321 struct ath_tx_status *ts, int txok,
322 int *nframes, int *nbad)
323{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100324 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100325 u16 seq_st = 0;
326 u32 ba[WME_BA_BMP_SIZE >> 5];
327 int ba_index;
328 int isaggr = 0;
329
330 *nbad = 0;
331 *nframes = 0;
332
Felix Fietkaub572d032010-11-14 15:20:07 +0100333 isaggr = bf_isaggr(bf);
334 if (isaggr) {
335 seq_st = ts->ts_seqnum;
336 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
337 }
338
339 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100340 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200341 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100342
343 (*nframes)++;
344 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
345 (*nbad)++;
346
347 bf = bf->bf_next;
348 }
349}
350
351
Sujithd43f30152009-01-16 21:38:53 +0530352static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
353 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100354 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530355{
356 struct ath_node *an = NULL;
357 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530358 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100359 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800361 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530362 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530363 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200364 struct list_head bf_head;
365 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530366 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530367 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
369 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200370 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100371 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200372 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100373 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200374 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530375
Sujitha22be222009-03-30 15:28:36 +0530376 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530377 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530378
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800380
Felix Fietkau78c46532010-06-25 01:26:16 +0200381 memcpy(rates, tx_info->control.rates, sizeof(rates));
382
Sujith1286ec62009-01-27 13:30:37 +0530383 rcu_read_lock();
384
Ben Greear686b9cb2010-09-23 09:44:36 -0700385 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530386 if (!sta) {
387 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200388
Felix Fietkau31e79a52010-07-12 23:16:34 +0200389 INIT_LIST_HEAD(&bf_head);
390 while (bf) {
391 bf_next = bf->bf_next;
392
393 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaufce041b2011-05-19 12:20:25 +0200394 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200395 list_move_tail(&bf->list, &bf_head);
396
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100397 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
399 0, 0);
400
401 bf = bf_next;
402 }
Sujith1286ec62009-01-27 13:30:37 +0530403 return;
Sujithe8324352009-01-16 21:38:42 +0530404 }
405
Sujith1286ec62009-01-27 13:30:37 +0530406 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
408 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530409
Felix Fietkaub11b1602010-07-11 12:48:44 +0200410 /*
411 * The hardware occasionally sends a tx status for the wrong TID.
412 * In this case, the BA status cannot be considered valid and all
413 * subframes need to be retransmitted
414 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100415 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200416 txok = false;
417
Sujithe8324352009-01-16 21:38:42 +0530418 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530419 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530420
Sujithd43f30152009-01-16 21:38:53 +0530421 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700422 if (ts->ts_flags & ATH9K_TX_BA) {
423 seq_st = ts->ts_seqnum;
424 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530425 } else {
Sujithd43f30152009-01-16 21:38:53 +0530426 /*
427 * AR5416 can become deaf/mute when BA
428 * issue happens. Chip needs to be reset.
429 * But AP code may have sychronization issues
430 * when perform internal reset in this routine.
431 * Only enable reset in STA mode for now.
432 */
Sujith2660b812009-02-09 13:27:26 +0530433 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530434 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530435 }
436 }
437
Felix Fietkau56dc6332011-08-28 00:32:22 +0200438 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530439
Felix Fietkaub572d032010-11-14 15:20:07 +0100440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530441 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200442 u16 seqno = bf->bf_state.seqno;
443
Felix Fietkauf0b82202011-01-15 14:30:15 +0100444 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530445 bf_next = bf->bf_next;
446
Felix Fietkau78c46532010-06-25 01:26:16 +0200447 skb = bf->bf_mpdu;
448 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100449 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200450
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530452 /* transmit completion, subframe is
453 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else if (!isaggr && txok) {
456 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530457 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530458 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200459 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530460 /*
461 * cleanup in progress, just fail
462 * the un-acked sub-frames
463 */
464 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200465 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
466 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
467 !an->sleeping)
468 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
469
470 clear_filter = true;
471 txpending = 1;
472 } else {
473 bf->bf_state.bf_type |= BUF_XRETRY;
474 txfail = 1;
475 sendbar = 1;
476 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530477 }
478 }
479
Felix Fietkaufce041b2011-05-19 12:20:25 +0200480 /*
481 * Make sure the last desc is reclaimed if it
482 * not a holding desc.
483 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200484 INIT_LIST_HEAD(&bf_head);
485 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
486 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530487 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530488
Felix Fietkau90fa5392010-09-20 13:45:38 +0200489 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530490 /*
491 * complete the acked-ones/xretried ones; update
492 * block-ack window
493 */
494 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200495 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530496 spin_unlock_bh(&txq->axq_lock);
497
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200499 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100500 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 rc_update = false;
502 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100503 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530504 }
505
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700506 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
507 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530508 } else {
Sujithd43f30152009-01-16 21:38:53 +0530509 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200510 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
512 if (bf->bf_next == NULL && bf_last->bf_stale) {
513 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530514
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 tbf = ath_clone_txbuf(sc, bf_last);
516 /*
517 * Update tx baw and complete the
518 * frame with failed status if we
519 * run out of tx buf.
520 */
521 if (!tbf) {
522 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200523 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400524 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400525
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400526 bf->bf_state.bf_type |=
527 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100528 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100529 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400530 ath_tx_complete_buf(sc, bf, txq,
531 &bf_head,
532 ts, 0, 0);
533 break;
534 }
535
536 ath9k_hw_cleartxdesc(sc->sc_ah,
537 tbf->bf_desc);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200538 fi->bf = tbf;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400539 } else {
540 /*
541 * Clear descriptor status words for
542 * software retry
543 */
544 ath9k_hw_cleartxdesc(sc->sc_ah,
545 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400546 }
Sujithe8324352009-01-16 21:38:42 +0530547 }
548
549 /*
550 * Put this buffer to the temporary pending
551 * queue to retain ordering
552 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200553 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530554 }
555
556 bf = bf_next;
557 }
558
Felix Fietkau4cee7862010-07-23 03:53:16 +0200559 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200560 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200561 if (an->sleeping)
562 ieee80211_sta_set_tim(sta);
563
Felix Fietkau4cee7862010-07-23 03:53:16 +0200564 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200565 if (clear_filter)
566 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200567 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600568 if (!an->sleeping)
569 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200570 spin_unlock_bh(&txq->axq_lock);
571 }
572
Sujithe8324352009-01-16 21:38:42 +0530573 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200574 ath_tx_flush_tid(sc, tid);
575
Sujithe8324352009-01-16 21:38:42 +0530576 if (tid->baw_head == tid->baw_tail) {
577 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530578 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530579 }
Sujithe8324352009-01-16 21:38:42 +0530580 }
581
Sujith1286ec62009-01-27 13:30:37 +0530582 rcu_read_unlock();
583
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530584 if (needreset)
Sujithe8324352009-01-16 21:38:42 +0530585 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530586}
587
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530588static bool ath_lookup_legacy(struct ath_buf *bf)
589{
590 struct sk_buff *skb;
591 struct ieee80211_tx_info *tx_info;
592 struct ieee80211_tx_rate *rates;
593 int i;
594
595 skb = bf->bf_mpdu;
596 tx_info = IEEE80211_SKB_CB(skb);
597 rates = tx_info->control.rates;
598
Felix Fietkau059ee092011-08-27 10:25:27 +0200599 for (i = 0; i < 4; i++) {
600 if (!rates[i].count || rates[i].idx < 0)
601 break;
602
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530603 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
604 return true;
605 }
606
607 return false;
608}
609
Sujithe8324352009-01-16 21:38:42 +0530610static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
611 struct ath_atx_tid *tid)
612{
Sujithe8324352009-01-16 21:38:42 +0530613 struct sk_buff *skb;
614 struct ieee80211_tx_info *tx_info;
615 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530616 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530617 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530618 int i;
619
Sujitha22be222009-03-30 15:28:36 +0530620 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530621 tx_info = IEEE80211_SKB_CB(skb);
622 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530623
624 /*
625 * Find the lowest frame length among the rate series that will have a
626 * 4ms transmit duration.
627 * TODO - TXOP limit needs to be considered.
628 */
629 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
630
631 for (i = 0; i < 4; i++) {
632 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100633 int modeidx;
634 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530635 legacy = 1;
636 break;
637 }
638
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200639 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100640 modeidx = MCS_HT40;
641 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200642 modeidx = MCS_HT20;
643
644 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
645 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100646
647 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530648 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530649 }
650 }
651
652 /*
653 * limit aggregate size by the minimum rate if rate selected is
654 * not a probe rate, if rate selected is a probe rate then
655 * avoid aggregation of this packet.
656 */
657 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
658 return 0;
659
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530660 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
661 aggr_limit = min((max_4ms_framelen * 3) / 8,
662 (u32)ATH_AMPDU_LIMIT_MAX);
663 else
664 aggr_limit = min(max_4ms_framelen,
665 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530666
667 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300668 * h/w can accept aggregates up to 16 bit lengths (65535).
669 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530670 * as zero. Ignore 65536 since we are constrained by hw.
671 */
Sujith4ef70842009-07-23 15:32:41 +0530672 if (tid->an->maxampdu)
673 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530674
675 return aggr_limit;
676}
677
678/*
Sujithd43f30152009-01-16 21:38:53 +0530679 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530680 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530681 */
682static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530683 struct ath_buf *bf, u16 frmlen,
684 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530685{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530686#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530687 struct sk_buff *skb = bf->bf_mpdu;
688 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530689 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530690 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100691 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200692 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100693 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530694
695 /* Select standard number of delimiters based on frame length alone */
696 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
697
698 /*
699 * If encryption enabled, hardware requires some more padding between
700 * subframes.
701 * TODO - this could be improved to be dependent on the rate.
702 * The hardware can keep up at lower rates, but not higher rates
703 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530704 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
705 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530706 ndelim += ATH_AGGR_ENCRYPTDELIM;
707
708 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530709 * Add delimiter when using RTS/CTS with aggregation
710 * and non enterprise AR9003 card
711 */
712 if (first_subfrm)
713 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
714
715 /*
Sujithe8324352009-01-16 21:38:42 +0530716 * Convert desired mpdu density from microeconds to bytes based
717 * on highest rate in rate series (i.e. first rate) to determine
718 * required minimum length for subframe. Take into account
719 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530720 *
Sujithe8324352009-01-16 21:38:42 +0530721 * If there is no mpdu density restriction, no further calculation
722 * is needed.
723 */
Sujith4ef70842009-07-23 15:32:41 +0530724
725 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530726 return ndelim;
727
728 rix = tx_info->control.rates[0].idx;
729 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530730 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
731 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
732
733 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530734 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530735 else
Sujith4ef70842009-07-23 15:32:41 +0530736 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530737
738 if (nsymbols == 0)
739 nsymbols = 1;
740
Felix Fietkauc6663872010-04-19 19:57:33 +0200741 streams = HT_RC_2_STREAMS(rix);
742 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530743 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
744
Sujithe8324352009-01-16 21:38:42 +0530745 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530746 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
747 ndelim = max(mindelim, ndelim);
748 }
749
750 return ndelim;
751}
752
753static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530754 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530755 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100756 struct list_head *bf_q,
757 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530758{
759#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200760 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530761 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530762 u16 aggr_limit = 0, al = 0, bpad = 0,
763 al_delta, h_baw = tid->baw_size / 2;
764 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200765 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200767 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200768 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530769
770 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200771 skb = skb_peek(&tid->buf_q);
772 fi = get_frame_info(skb);
773 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200774 if (!fi->bf)
775 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200776
Felix Fietkau44f1d262011-08-28 00:32:25 +0200777 if (!bf)
778 continue;
779
780 bf->bf_state.bf_type |= BUF_AMPDU;
781 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200782 if (!bf_first)
783 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530784
Sujithd43f30152009-01-16 21:38:53 +0530785 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200786 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530787 status = ATH_AGGR_BAW_CLOSED;
788 break;
789 }
790
791 if (!rl) {
792 aggr_limit = ath_lookup_rate(sc, bf, tid);
793 rl = 1;
794 }
795
Sujithd43f30152009-01-16 21:38:53 +0530796 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530798
Sujithd43f30152009-01-16 21:38:53 +0530799 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530800 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
801 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530802 status = ATH_AGGR_LIMITED;
803 break;
804 }
805
Felix Fietkau0299a502010-10-21 02:47:24 +0200806 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
807 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
808 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
809 break;
810
Sujithd43f30152009-01-16 21:38:53 +0530811 /* do not exceed subframe limit */
812 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530813 status = ATH_AGGR_LIMITED;
814 break;
815 }
816
Sujithd43f30152009-01-16 21:38:53 +0530817 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530818 al += bpad + al_delta;
819
820 /*
821 * Get the delimiters needed to meet the MPDU
822 * density for this node.
823 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530824 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
825 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530826 bpad = PADBYTES(al_delta) + (ndelim << 2);
827
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530828 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530829 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400830 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530831
Sujithd43f30152009-01-16 21:38:53 +0530832 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100833 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200834 ath_tx_addto_baw(sc, tid, seqno);
Sujithd43f30152009-01-16 21:38:53 +0530835 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200836
837 __skb_unlink(skb, &tid->buf_q);
838 list_add_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530839 if (bf_prev) {
840 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400841 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
842 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530843 }
844 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530845
Felix Fietkau56dc6332011-08-28 00:32:22 +0200846 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530847
Felix Fietkau269c44b2010-11-14 15:20:06 +0100848 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530849
Sujithe8324352009-01-16 21:38:42 +0530850 return status;
851#undef PADBYTES
852}
853
854static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
855 struct ath_atx_tid *tid)
856{
Sujithd43f30152009-01-16 21:38:53 +0530857 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530858 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100859 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530860 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100861 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530862
863 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200864 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530865 return;
866
867 INIT_LIST_HEAD(&bf_q);
868
Felix Fietkau269c44b2010-11-14 15:20:06 +0100869 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530870
871 /*
Sujithd43f30152009-01-16 21:38:53 +0530872 * no frames picked up to be aggregated;
873 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530874 */
875 if (list_empty(&bf_q))
876 break;
877
878 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530879 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530880
Felix Fietkau55195412011-04-17 23:28:09 +0200881 if (tid->ac->clear_ps_filter) {
882 tid->ac->clear_ps_filter = false;
883 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
884 }
885
Sujithd43f30152009-01-16 21:38:53 +0530886 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100887 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100888 fi = get_frame_info(bf->bf_mpdu);
889
Sujithe8324352009-01-16 21:38:42 +0530890 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530891 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100892 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200893 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530894 continue;
895 }
896
Sujithd43f30152009-01-16 21:38:53 +0530897 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530898 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100899 ath_buf_set_rate(sc, bf, aggr_len);
900 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530901
Sujithd43f30152009-01-16 21:38:53 +0530902 /* anchor last desc of aggregate */
903 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530904
Felix Fietkaufce041b2011-05-19 12:20:25 +0200905 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530906 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530907
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100908 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530909 status != ATH_AGGR_BAW_CLOSED);
910}
911
Felix Fietkau231c3a12010-09-20 19:35:28 +0200912int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
913 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530914{
915 struct ath_atx_tid *txtid;
916 struct ath_node *an;
917
918 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530919 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200920
921 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
922 return -EAGAIN;
923
Sujithf83da962009-07-23 15:32:37 +0530924 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200925 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700926 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200927
Felix Fietkau2ed72222011-01-10 17:05:49 -0700928 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
929 txtid->baw_head = txtid->baw_tail = 0;
930
Felix Fietkau231c3a12010-09-20 19:35:28 +0200931 return 0;
Sujithe8324352009-01-16 21:38:42 +0530932}
933
Sujithf83da962009-07-23 15:32:37 +0530934void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530935{
936 struct ath_node *an = (struct ath_node *)sta->drv_priv;
937 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100938 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530939
940 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530941 return;
Sujithe8324352009-01-16 21:38:42 +0530942
943 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530944 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530945 return;
Sujithe8324352009-01-16 21:38:42 +0530946 }
947
Sujithe8324352009-01-16 21:38:42 +0530948 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200949 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200950
951 /*
952 * If frames are still being transmitted for this TID, they will be
953 * cleaned up during tx completion. To prevent race conditions, this
954 * TID can only be reused after all in-progress subframes have been
955 * completed.
956 */
957 if (txtid->baw_head != txtid->baw_tail)
958 txtid->state |= AGGR_CLEANUP;
959 else
960 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530961 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530962
Felix Fietkau90fa5392010-09-20 13:45:38 +0200963 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530964}
965
Felix Fietkau55195412011-04-17 23:28:09 +0200966bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
967{
968 struct ath_atx_tid *tid;
969 struct ath_atx_ac *ac;
970 struct ath_txq *txq;
971 bool buffered = false;
972 int tidno;
973
974 for (tidno = 0, tid = &an->tid[tidno];
975 tidno < WME_NUM_TID; tidno++, tid++) {
976
977 if (!tid->sched)
978 continue;
979
980 ac = tid->ac;
981 txq = ac->txq;
982
983 spin_lock_bh(&txq->axq_lock);
984
Felix Fietkau56dc6332011-08-28 00:32:22 +0200985 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +0200986 buffered = true;
987
988 tid->sched = false;
989 list_del(&tid->list);
990
991 if (ac->sched) {
992 ac->sched = false;
993 list_del(&ac->list);
994 }
995
996 spin_unlock_bh(&txq->axq_lock);
997 }
998
999 return buffered;
1000}
1001
1002void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1003{
1004 struct ath_atx_tid *tid;
1005 struct ath_atx_ac *ac;
1006 struct ath_txq *txq;
1007 int tidno;
1008
1009 for (tidno = 0, tid = &an->tid[tidno];
1010 tidno < WME_NUM_TID; tidno++, tid++) {
1011
1012 ac = tid->ac;
1013 txq = ac->txq;
1014
1015 spin_lock_bh(&txq->axq_lock);
1016 ac->clear_ps_filter = true;
1017
Felix Fietkau56dc6332011-08-28 00:32:22 +02001018 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001019 ath_tx_queue_tid(txq, tid);
1020 ath_txq_schedule(sc, txq);
1021 }
1022
1023 spin_unlock_bh(&txq->axq_lock);
1024 }
1025}
1026
Sujithe8324352009-01-16 21:38:42 +05301027void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1028{
1029 struct ath_atx_tid *txtid;
1030 struct ath_node *an;
1031
1032 an = (struct ath_node *)sta->drv_priv;
1033
1034 if (sc->sc_flags & SC_OP_TXAGGR) {
1035 txtid = ATH_AN_2_TID(an, tid);
1036 txtid->baw_size =
1037 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1038 txtid->state |= AGGR_ADDBA_COMPLETE;
1039 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1040 ath_tx_resume_tid(sc, txtid);
1041 }
1042}
1043
Sujithe8324352009-01-16 21:38:42 +05301044/********************/
1045/* Queue Management */
1046/********************/
1047
Sujithe8324352009-01-16 21:38:42 +05301048static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1049 struct ath_txq *txq)
1050{
1051 struct ath_atx_ac *ac, *ac_tmp;
1052 struct ath_atx_tid *tid, *tid_tmp;
1053
1054 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1055 list_del(&ac->list);
1056 ac->sched = false;
1057 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1058 list_del(&tid->list);
1059 tid->sched = false;
1060 ath_tid_drain(sc, txq, tid);
1061 }
1062 }
1063}
1064
1065struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1066{
Sujithcbe61d82009-02-09 13:27:12 +05301067 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001068 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301069 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001070 static const int subtype_txq_to_hwq[] = {
1071 [WME_AC_BE] = ATH_TXQ_AC_BE,
1072 [WME_AC_BK] = ATH_TXQ_AC_BK,
1073 [WME_AC_VI] = ATH_TXQ_AC_VI,
1074 [WME_AC_VO] = ATH_TXQ_AC_VO,
1075 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001076 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301077
1078 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001079 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301080 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1081 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1082 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1083 qi.tqi_physCompBuf = 0;
1084
1085 /*
1086 * Enable interrupts only for EOL and DESC conditions.
1087 * We mark tx descriptors to receive a DESC interrupt
1088 * when a tx queue gets deep; otherwise waiting for the
1089 * EOL to reap descriptors. Note that this is done to
1090 * reduce interrupt load and this only defers reaping
1091 * descriptors, never transmitting frames. Aside from
1092 * reducing interrupts this also permits more concurrency.
1093 * The only potential downside is if the tx queue backs
1094 * up in which case the top half of the kernel may backup
1095 * due to a lack of tx descriptors.
1096 *
1097 * The UAPSD queue is an exception, since we take a desc-
1098 * based intr on the EOSP frames.
1099 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001100 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1101 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1102 TXQ_FLAG_TXERRINT_ENABLE;
1103 } else {
1104 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1105 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1106 else
1107 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1108 TXQ_FLAG_TXDESCINT_ENABLE;
1109 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001110 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1111 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301112 /*
1113 * NB: don't print a message, this happens
1114 * normally on parts with too few tx queues
1115 */
1116 return NULL;
1117 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001118 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001119 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001120 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1121 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301122 return NULL;
1123 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001124 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1125 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301126
Ben Greear60f2d1d2011-01-09 23:11:52 -08001127 txq->axq_qnum = axq_qnum;
1128 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301129 txq->axq_link = NULL;
1130 INIT_LIST_HEAD(&txq->axq_q);
1131 INIT_LIST_HEAD(&txq->axq_acq);
1132 spin_lock_init(&txq->axq_lock);
1133 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001134 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001135 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001136 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001137
1138 txq->txq_headidx = txq->txq_tailidx = 0;
1139 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1140 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301141 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001142 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301143}
1144
Sujithe8324352009-01-16 21:38:42 +05301145int ath_txq_update(struct ath_softc *sc, int qnum,
1146 struct ath9k_tx_queue_info *qinfo)
1147{
Sujithcbe61d82009-02-09 13:27:12 +05301148 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301149 int error = 0;
1150 struct ath9k_tx_queue_info qi;
1151
1152 if (qnum == sc->beacon.beaconq) {
1153 /*
1154 * XXX: for beacon queue, we just save the parameter.
1155 * It will be picked up by ath_beaconq_config when
1156 * it's necessary.
1157 */
1158 sc->beacon.beacon_qi = *qinfo;
1159 return 0;
1160 }
1161
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001162 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301163
1164 ath9k_hw_get_txq_props(ah, qnum, &qi);
1165 qi.tqi_aifs = qinfo->tqi_aifs;
1166 qi.tqi_cwmin = qinfo->tqi_cwmin;
1167 qi.tqi_cwmax = qinfo->tqi_cwmax;
1168 qi.tqi_burstTime = qinfo->tqi_burstTime;
1169 qi.tqi_readyTime = qinfo->tqi_readyTime;
1170
1171 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001172 ath_err(ath9k_hw_common(sc->sc_ah),
1173 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301174 error = -EIO;
1175 } else {
1176 ath9k_hw_resettxqueue(ah, qnum);
1177 }
1178
1179 return error;
1180}
1181
1182int ath_cabq_update(struct ath_softc *sc)
1183{
1184 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001185 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301186 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301187
1188 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1189 /*
1190 * Ensure the readytime % is within the bounds.
1191 */
Sujith17d79042009-02-09 13:27:03 +05301192 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1193 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1194 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1195 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301196
Steve Brown9814f6b2011-02-07 17:10:39 -07001197 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301198 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301199 ath_txq_update(sc, qnum, &qi);
1200
1201 return 0;
1202}
1203
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001204static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1205{
1206 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1207 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1208}
1209
Felix Fietkaufce041b2011-05-19 12:20:25 +02001210static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1211 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301212 __releases(txq->axq_lock)
1213 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301214{
1215 struct ath_buf *bf, *lastbf;
1216 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001217 struct ath_tx_status ts;
1218
1219 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301220 INIT_LIST_HEAD(&bf_head);
1221
Felix Fietkaufce041b2011-05-19 12:20:25 +02001222 while (!list_empty(list)) {
1223 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301224
Felix Fietkaufce041b2011-05-19 12:20:25 +02001225 if (bf->bf_stale) {
1226 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301227
Felix Fietkaufce041b2011-05-19 12:20:25 +02001228 ath_tx_return_buffer(sc, bf);
1229 continue;
Sujithe8324352009-01-16 21:38:42 +05301230 }
1231
1232 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001233 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001234
Sujithe8324352009-01-16 21:38:42 +05301235 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001236 if (bf_is_ampdu_not_probing(bf))
1237 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301238
Felix Fietkaufce041b2011-05-19 12:20:25 +02001239 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301240 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001241 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1242 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301243 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001244 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001245 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001246 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001247}
1248
1249/*
1250 * Drain a given TX queue (could be Beacon or Data)
1251 *
1252 * This assumes output has been stopped and
1253 * we do not need to block ath_tx_tasklet.
1254 */
1255void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1256{
1257 spin_lock_bh(&txq->axq_lock);
1258 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1259 int idx = txq->txq_tailidx;
1260
1261 while (!list_empty(&txq->txq_fifo[idx])) {
1262 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1263 retry_tx);
1264
1265 INCR(idx, ATH_TXFIFO_DEPTH);
1266 }
1267 txq->txq_tailidx = idx;
1268 }
1269
1270 txq->axq_link = NULL;
1271 txq->axq_tx_inprogress = false;
1272 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001273
1274 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001275 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1276 ath_txq_drain_pending_buffers(sc, txq);
1277
1278 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301279}
1280
Felix Fietkau080e1a22010-12-05 20:17:53 +01001281bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301282{
Sujithcbe61d82009-02-09 13:27:12 +05301283 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001284 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301285 struct ath_txq *txq;
1286 int i, npend = 0;
1287
1288 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001289 return true;
Sujith043a0402009-01-16 21:38:47 +05301290
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001291 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301292
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001293 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301294 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001295 if (!ATH_TXQ_SETUP(sc, i))
1296 continue;
1297
1298 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301299 }
1300
Felix Fietkau080e1a22010-12-05 20:17:53 +01001301 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001302 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301303
1304 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001305 if (!ATH_TXQ_SETUP(sc, i))
1306 continue;
1307
1308 /*
1309 * The caller will resume queues with ieee80211_wake_queues.
1310 * Mark the queue as not stopped to prevent ath_tx_complete
1311 * from waking the queue too early.
1312 */
1313 txq = &sc->tx.txq[i];
1314 txq->stopped = false;
1315 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301316 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001317
1318 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301319}
1320
Sujithe8324352009-01-16 21:38:42 +05301321void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1322{
1323 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1324 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1325}
1326
Ben Greear7755bad2011-01-18 17:30:00 -08001327/* For each axq_acq entry, for each tid, try to schedule packets
1328 * for transmit until ampdu_depth has reached min Q depth.
1329 */
Sujithe8324352009-01-16 21:38:42 +05301330void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1331{
Ben Greear7755bad2011-01-18 17:30:00 -08001332 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1333 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301334
Felix Fietkau21f28e62011-01-15 14:30:14 +01001335 if (list_empty(&txq->axq_acq) ||
1336 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301337 return;
1338
1339 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001340 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301341
Ben Greear7755bad2011-01-18 17:30:00 -08001342 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1343 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1344 list_del(&ac->list);
1345 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301346
Ben Greear7755bad2011-01-18 17:30:00 -08001347 while (!list_empty(&ac->tid_q)) {
1348 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1349 list);
1350 list_del(&tid->list);
1351 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301352
Ben Greear7755bad2011-01-18 17:30:00 -08001353 if (tid->paused)
1354 continue;
Sujithe8324352009-01-16 21:38:42 +05301355
Ben Greear7755bad2011-01-18 17:30:00 -08001356 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301357
Ben Greear7755bad2011-01-18 17:30:00 -08001358 /*
1359 * add tid to round-robin queue if more frames
1360 * are pending for the tid
1361 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001362 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001363 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301364
Ben Greear7755bad2011-01-18 17:30:00 -08001365 if (tid == last_tid ||
1366 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1367 break;
Sujithe8324352009-01-16 21:38:42 +05301368 }
Ben Greear7755bad2011-01-18 17:30:00 -08001369
1370 if (!list_empty(&ac->tid_q)) {
1371 if (!ac->sched) {
1372 ac->sched = true;
1373 list_add_tail(&ac->list, &txq->axq_acq);
1374 }
1375 }
1376
1377 if (ac == last_ac ||
1378 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1379 return;
Sujithe8324352009-01-16 21:38:42 +05301380 }
1381}
1382
Sujithe8324352009-01-16 21:38:42 +05301383/***********/
1384/* TX, DMA */
1385/***********/
1386
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001387/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001388 * Insert a chain of ath_buf (descriptors) on a txq and
1389 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001390 */
Sujith102e0572008-10-29 10:15:16 +05301391static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001392 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001393{
Sujithcbe61d82009-02-09 13:27:12 +05301394 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001395 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001396 struct ath_buf *bf, *bf_last;
1397 bool puttxbuf = false;
1398 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301399
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001400 /*
1401 * Insert the frame on the outbound list and
1402 * pass it on to the hardware.
1403 */
1404
1405 if (list_empty(head))
1406 return;
1407
Felix Fietkaufce041b2011-05-19 12:20:25 +02001408 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001409 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001410 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001411
Joe Perches226afe62010-12-02 19:12:37 -08001412 ath_dbg(common, ATH_DBG_QUEUE,
1413 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001414
Felix Fietkaufce041b2011-05-19 12:20:25 +02001415 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1416 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001417 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001418 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001419 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001420 list_splice_tail_init(head, &txq->axq_q);
1421
Felix Fietkaufce041b2011-05-19 12:20:25 +02001422 if (txq->axq_link) {
1423 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001424 ath_dbg(common, ATH_DBG_XMIT,
1425 "link[%u] (%p)=%llx (%p)\n",
1426 txq->axq_qnum, txq->axq_link,
1427 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001428 } else if (!edma)
1429 puttxbuf = true;
1430
1431 txq->axq_link = bf_last->bf_desc;
1432 }
1433
1434 if (puttxbuf) {
1435 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1436 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1437 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1438 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1439 }
1440
1441 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001442 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001443 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001444 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001445
1446 if (!internal) {
1447 txq->axq_depth++;
1448 if (bf_is_ampdu_not_probing(bf))
1449 txq->axq_ampdu_depth++;
1450 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001451}
1452
Sujithe8324352009-01-16 21:38:42 +05301453static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001454 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301455{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001456 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001457 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001458 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301459
1460 /*
1461 * Do not queue to h/w when any of the following conditions is true:
1462 * - there are pending frames in software queue
1463 * - the TID is currently paused for ADDBA/BAR request
1464 * - seqno is not within block-ack window
1465 * - h/w queue depth exceeds low water mark
1466 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001467 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001468 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001469 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001470 /*
Sujithe8324352009-01-16 21:38:42 +05301471 * Add this frame to software queue for scheduling later
1472 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001473 */
Ben Greearbda8add2011-01-09 23:11:48 -08001474 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001475 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001476 if (!txctl->an || !txctl->an->sleeping)
1477 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301478 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001479 }
1480
Felix Fietkau44f1d262011-08-28 00:32:25 +02001481 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1482 if (!bf)
1483 return;
1484
1485 bf->bf_state.bf_type |= BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001486 INIT_LIST_HEAD(&bf_head);
1487 list_add(&bf->list, &bf_head);
1488
Sujithe8324352009-01-16 21:38:42 +05301489 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001490 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301491
1492 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001493 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301494 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001495 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001496 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301497}
1498
Felix Fietkau82b873a2010-11-11 03:18:37 +01001499static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001500 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001501{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001502 struct ath_frame_info *fi = get_frame_info(skb);
1503 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301504 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001505
Felix Fietkau44f1d262011-08-28 00:32:25 +02001506 bf = fi->bf;
1507 if (!bf)
1508 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1509
1510 if (!bf)
1511 return;
1512
1513 INIT_LIST_HEAD(&bf_head);
1514 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301515 bf->bf_state.bf_type &= ~BUF_AMPDU;
1516
1517 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001518 if (tid)
1519 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301520
Sujithd43f30152009-01-16 21:38:53 +05301521 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001522 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001523 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301524 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001525}
1526
Sujith528f0c62008-10-29 10:14:26 +05301527static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001528{
Sujith528f0c62008-10-29 10:14:26 +05301529 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001530 enum ath9k_pkt_type htype;
1531 __le16 fc;
1532
Sujith528f0c62008-10-29 10:14:26 +05301533 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001534 fc = hdr->frame_control;
1535
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001536 if (ieee80211_is_beacon(fc))
1537 htype = ATH9K_PKT_TYPE_BEACON;
1538 else if (ieee80211_is_probe_resp(fc))
1539 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1540 else if (ieee80211_is_atim(fc))
1541 htype = ATH9K_PKT_TYPE_ATIM;
1542 else if (ieee80211_is_pspoll(fc))
1543 htype = ATH9K_PKT_TYPE_PSPOLL;
1544 else
1545 htype = ATH9K_PKT_TYPE_NORMAL;
1546
1547 return htype;
1548}
1549
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001550static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1551 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301552{
1553 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001554 struct ieee80211_sta *sta = tx_info->control.sta;
1555 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001556 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001557 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001558 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001559 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301560
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001561 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301562
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001563 if (sta)
1564 an = (struct ath_node *) sta->drv_priv;
1565
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001566 memset(fi, 0, sizeof(*fi));
1567 if (hw_key)
1568 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001569 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1570 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001571 else
1572 fi->keyix = ATH9K_TXKEYIX_INVALID;
1573 fi->keytype = keytype;
1574 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301575}
1576
Felix Fietkau82b873a2010-11-11 03:18:37 +01001577static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301578{
1579 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1580 int flags = 0;
1581
Sujith528f0c62008-10-29 10:14:26 +05301582 flags |= ATH9K_TXDESC_INTREQ;
1583
1584 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1585 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301586
Felix Fietkau82b873a2010-11-11 03:18:37 +01001587 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001588 flags |= ATH9K_TXDESC_LDPC;
1589
Sujith528f0c62008-10-29 10:14:26 +05301590 return flags;
1591}
1592
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001593/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001594 * rix - rate index
1595 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1596 * width - 0 for 20 MHz, 1 for 40 MHz
1597 * half_gi - to use 4us v/s 3.6 us for symbol time
1598 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001599static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301600 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001601{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001603 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301604
1605 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001606 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001608 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001609 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1610
1611 if (!half_gi)
1612 duration = SYMBOL_TIME(nsymbols);
1613 else
1614 duration = SYMBOL_TIME_HALFGI(nsymbols);
1615
Sujithe63835b2008-11-18 09:07:53 +05301616 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001617 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301618
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001619 return duration;
1620}
1621
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301622u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1623{
1624 struct ath_hw *ah = sc->sc_ah;
1625 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301626 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1627 (curchan->channelFlags & CHANNEL_5GHZ) &&
1628 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301629 return 0x3;
1630 else
1631 return chainmask;
1632}
1633
Felix Fietkau269c44b2010-11-14 15:20:06 +01001634static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001635{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001636 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001637 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301638 struct sk_buff *skb;
1639 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301640 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001641 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301642 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301643 int i, flags = 0;
1644 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301645 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301646
1647 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301648
Sujitha22be222009-03-30 15:28:36 +05301649 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301650 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301651 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301652 hdr = (struct ieee80211_hdr *)skb->data;
1653 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301654
Sujithc89424d2009-01-30 14:29:28 +05301655 /*
1656 * We check if Short Preamble is needed for the CTS rate by
1657 * checking the BSS's global flag.
1658 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1659 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001660 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1661 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301662 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001663 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001664
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001665 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001666 bool is_40, is_sgi, is_sp;
1667 int phy;
1668
Sujithe63835b2008-11-18 09:07:53 +05301669 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001670 continue;
1671
Sujitha8efee42008-11-18 09:07:30 +05301672 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301673 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001674
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301675 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301676 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001677 flags |= ATH9K_TXDESC_RTSENA;
1678 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1679 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1680 flags |= ATH9K_TXDESC_CTSENA;
1681 }
1682
Sujithc89424d2009-01-30 14:29:28 +05301683 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1684 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1685 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1686 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001687
Felix Fietkau545750d2009-11-23 22:21:01 +01001688 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1689 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1690 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1691
1692 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1693 /* MCS rates */
1694 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301695 series[i].ChSel = ath_txchainmask_reduction(sc,
1696 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001697 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001698 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001699 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1700 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001701 continue;
1702 }
1703
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301704 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001705 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1706 !(rate->flags & IEEE80211_RATE_ERP_G))
1707 phy = WLAN_RC_PHY_CCK;
1708 else
1709 phy = WLAN_RC_PHY_OFDM;
1710
1711 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1712 series[i].Rate = rate->hw_value;
1713 if (rate->hw_value_short) {
1714 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1715 series[i].Rate |= rate->hw_value_short;
1716 } else {
1717 is_sp = false;
1718 }
1719
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301720 if (bf->bf_state.bfs_paprd)
1721 series[i].ChSel = common->tx_chainmask;
1722 else
1723 series[i].ChSel = ath_txchainmask_reduction(sc,
1724 common->tx_chainmask, series[i].Rate);
1725
Felix Fietkau545750d2009-11-23 22:21:01 +01001726 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001727 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001728 }
1729
Felix Fietkau27032052010-01-17 21:08:50 +01001730 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001731 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001732 flags &= ~ATH9K_TXDESC_RTSENA;
1733
1734 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1735 if (flags & ATH9K_TXDESC_RTSENA)
1736 flags &= ~ATH9K_TXDESC_CTSENA;
1737
Sujithe63835b2008-11-18 09:07:53 +05301738 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301739 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1740 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301741 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301742 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301743
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001744}
1745
Felix Fietkau44f1d262011-08-28 00:32:25 +02001746/*
1747 * Assign a descriptor (and sequence number if necessary,
1748 * and map buffer for DMA. Frees skb on error
1749 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001750static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001751 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001752 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001753 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301754{
Felix Fietkau04caf862010-11-14 15:20:12 +01001755 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001756 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001757 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001758 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001759 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001760 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001761 int frm_type;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001762 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001763
1764 bf = ath_tx_get_buffer(sc);
1765 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001766 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001767 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001768 }
Sujithe8324352009-01-16 21:38:42 +05301769
Sujithe8324352009-01-16 21:38:42 +05301770 ATH_TXBUF_RESET(bf);
1771
Felix Fietkaufa05f872011-08-28 00:32:24 +02001772 if (tid) {
1773 seqno = tid->seq_next;
1774 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1775 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1776 bf->bf_state.seqno = seqno;
1777 }
1778
Felix Fietkau82b873a2010-11-11 03:18:37 +01001779 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301780 bf->bf_mpdu = skb;
1781
Ben Greearc1739eb32010-10-14 12:45:29 -07001782 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1783 skb->len, DMA_TO_DEVICE);
1784 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301785 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001786 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001787 ath_err(ath9k_hw_common(sc->sc_ah),
1788 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001789 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001790 goto error;
Sujithe8324352009-01-16 21:38:42 +05301791 }
1792
Sujithe8324352009-01-16 21:38:42 +05301793 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301794
1795 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001796 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301797
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001798 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1799 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301800
1801 ath9k_hw_filltxdesc(ah, ds,
1802 skb->len, /* segment length */
1803 true, /* first segment */
1804 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001805 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001806 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001807 txq->axq_qnum);
1808
Felix Fietkau56dc6332011-08-28 00:32:22 +02001809 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001810
1811 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001812
1813error:
1814 dev_kfree_skb_any(skb);
1815 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001816}
1817
1818/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001819static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001820 struct ath_tx_control *txctl)
1821{
Felix Fietkau04caf862010-11-14 15:20:12 +01001822 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001824 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001825 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001826 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301827
Sujithe8324352009-01-16 21:38:42 +05301828 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301829 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1830 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001831 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1832 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001833 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001834
Felix Fietkau066dae92010-11-07 14:59:39 +01001835 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001836 }
1837
1838 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001839 /*
1840 * Try aggregation if it's a unicast data frame
1841 * and the destination is HT capable.
1842 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001843 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301844 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001845 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1846 if (!bf)
1847 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001848
Felix Fietkau82b873a2010-11-11 03:18:37 +01001849 bf->bf_state.bfs_paprd = txctl->paprd;
1850
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001851 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001852 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1853 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001854
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301855 if (txctl->paprd)
1856 bf->bf_state.bfs_paprd_timestamp = jiffies;
1857
Felix Fietkau55195412011-04-17 23:28:09 +02001858 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1859 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1860
Felix Fietkau44f1d262011-08-28 00:32:25 +02001861 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301862 }
1863
Felix Fietkaufa05f872011-08-28 00:32:24 +02001864out:
Sujithe8324352009-01-16 21:38:42 +05301865 spin_unlock_bh(&txctl->txq->axq_lock);
1866}
1867
1868/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001869int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301870 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001871{
Felix Fietkau28d16702010-11-14 15:20:10 +01001872 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1873 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001874 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001875 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001876 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001877 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001878 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001879 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001880 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001881
Ben Greeara9927ba2010-12-06 21:13:49 -08001882 /* NOTE: sta can be NULL according to net/mac80211.h */
1883 if (sta)
1884 txctl->an = (struct ath_node *)sta->drv_priv;
1885
Felix Fietkau04caf862010-11-14 15:20:12 +01001886 if (info->control.hw_key)
1887 frmlen += info->control.hw_key->icv_len;
1888
Felix Fietkau28d16702010-11-14 15:20:10 +01001889 /*
1890 * As a temporary workaround, assign seq# here; this will likely need
1891 * to be cleaned up to work better with Beacon transmission and virtual
1892 * BSSes.
1893 */
1894 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1895 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1896 sc->tx.seq_no += 0x10;
1897 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1898 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1899 }
1900
1901 /* Add the padding after the header if this is not already done */
1902 padpos = ath9k_cmn_padpos(hdr->frame_control);
1903 padsize = padpos & 3;
1904 if (padsize && skb->len > padpos) {
1905 if (skb_headroom(skb) < padsize)
1906 return -ENOMEM;
1907
1908 skb_push(skb, padsize);
1909 memmove(skb->data, skb->data + padsize, padpos);
1910 }
1911
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001912 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1913 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1914 !ieee80211_is_data(hdr->frame_control))
1915 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1916
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001917 setup_frame_info(hw, skb, frmlen);
1918
1919 /*
1920 * At this point, the vif, hw_key and sta pointers in the tx control
1921 * info are no longer valid (overwritten by the ath_frame_info data.
1922 */
1923
Felix Fietkau066dae92010-11-07 14:59:39 +01001924 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001925 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001926 if (txq == sc->tx.txq_map[q] &&
1927 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001928 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001929 txq->stopped = 1;
1930 }
1931 spin_unlock_bh(&txq->axq_lock);
1932
Felix Fietkau44f1d262011-08-28 00:32:25 +02001933 ath_tx_start_dma(sc, skb, txctl);
1934 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001935}
1936
Sujithe8324352009-01-16 21:38:42 +05301937/*****************/
1938/* TX Completion */
1939/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001940
Sujithe8324352009-01-16 21:38:42 +05301941static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301942 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001943{
Sujithe8324352009-01-16 21:38:42 +05301944 struct ieee80211_hw *hw = sc->hw;
1945 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001946 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001947 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001948 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301949
Joe Perches226afe62010-12-02 19:12:37 -08001950 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301951
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301952 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301953 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301954
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301955 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301956 /* Frame was ACKed */
1957 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1958 }
1959
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001960 padpos = ath9k_cmn_padpos(hdr->frame_control);
1961 padsize = padpos & 3;
1962 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301963 /*
1964 * Remove MAC header padding before giving the frame back to
1965 * mac80211.
1966 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001967 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301968 skb_pull(skb, padsize);
1969 }
1970
Sujith1b04b932010-01-08 10:36:05 +05301971 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1972 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001973 ath_dbg(common, ATH_DBG_PS,
1974 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301975 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1976 PS_WAIT_FOR_CAB |
1977 PS_WAIT_FOR_PSPOLL_DATA |
1978 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001979 }
1980
Felix Fietkau7545daf2011-01-24 19:23:16 +01001981 q = skb_get_queue_mapping(skb);
1982 if (txq == sc->tx.txq_map[q]) {
1983 spin_lock_bh(&txq->axq_lock);
1984 if (WARN_ON(--txq->pending_frames < 0))
1985 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001986
Felix Fietkau7545daf2011-01-24 19:23:16 +01001987 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1988 ieee80211_wake_queue(sc->hw, q);
1989 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001990 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001991 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001992 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001993
1994 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301995}
1996
1997static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001998 struct ath_txq *txq, struct list_head *bf_q,
1999 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05302000{
2001 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05302002 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302003 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302004
Sujithe8324352009-01-16 21:38:42 +05302005 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302006 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05302007
2008 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302009 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302010
2011 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302012 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05302013 }
2014
Ben Greearc1739eb32010-10-14 12:45:29 -07002015 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002016 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002017
2018 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302019 if (time_after(jiffies,
2020 bf->bf_state.bfs_paprd_timestamp +
2021 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002022 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002023 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002024 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002025 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01002026 ath_debug_stat_tx(sc, bf, ts, txq);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302027 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002028 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002029 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2030 * accidentally reference it later.
2031 */
2032 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302033
2034 /*
2035 * Return the list of ath_buf of this mpdu to free queue
2036 */
2037 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2038 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2039 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2040}
2041
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002042static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2043 struct ath_tx_status *ts, int nframes, int nbad,
2044 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302045{
Sujitha22be222009-03-30 15:28:36 +05302046 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302047 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302048 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002049 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002050 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302051 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302052
Sujith95e4acb2009-03-13 08:56:09 +05302053 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002054 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302055
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002056 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302057 WARN_ON(tx_rateindex >= hw->max_rates);
2058
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002059 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302060 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002061 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002062 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302063
Felix Fietkaub572d032010-11-14 15:20:07 +01002064 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002065
Felix Fietkaub572d032010-11-14 15:20:07 +01002066 tx_info->status.ampdu_len = nframes;
2067 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002068 }
2069
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002070 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302071 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002072 /*
2073 * If an underrun error is seen assume it as an excessive
2074 * retry only if max frame trigger level has been reached
2075 * (2 KB for single stream, and 4 KB for dual stream).
2076 * Adjust the long retry as if the frame was tried
2077 * hw->max_rate_tries times to affect how rate control updates
2078 * PER for the failed rate.
2079 * In case of congestion on the bus penalizing this type of
2080 * underruns should help hardware actually transmit new frames
2081 * successfully by eventually preferring slower rates.
2082 * This itself should also alleviate congestion on the bus.
2083 */
2084 if (ieee80211_is_data(hdr->frame_control) &&
2085 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2086 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002087 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002088 tx_info->status.rates[tx_rateindex].count =
2089 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302090 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302091
Felix Fietkau545750d2009-11-23 22:21:01 +01002092 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302093 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002094 tx_info->status.rates[i].idx = -1;
2095 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302096
Felix Fietkau78c46532010-06-25 01:26:16 +02002097 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302098}
2099
Felix Fietkaufce041b2011-05-19 12:20:25 +02002100static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2101 struct ath_tx_status *ts, struct ath_buf *bf,
2102 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302103 __releases(txq->axq_lock)
2104 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002105{
2106 int txok;
2107
2108 txq->axq_depth--;
2109 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2110 txq->axq_tx_inprogress = false;
2111 if (bf_is_ampdu_not_probing(bf))
2112 txq->axq_ampdu_depth--;
2113
2114 spin_unlock_bh(&txq->axq_lock);
2115
2116 if (!bf_isampdu(bf)) {
2117 /*
2118 * This frame is sent out as a single frame.
2119 * Use hardware retry status for this frame.
2120 */
2121 if (ts->ts_status & ATH9K_TXERR_XRETRY)
2122 bf->bf_state.bf_type |= BUF_XRETRY;
2123 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2124 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2125 } else
2126 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2127
2128 spin_lock_bh(&txq->axq_lock);
2129
2130 if (sc->sc_flags & SC_OP_TXAGGR)
2131 ath_txq_schedule(sc, txq);
2132}
2133
Sujithc4288392008-11-18 09:09:30 +05302134static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135{
Sujithcbe61d82009-02-09 13:27:12 +05302136 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002137 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2139 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302140 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002141 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142 int status;
2143
Joe Perches226afe62010-12-02 19:12:37 -08002144 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2145 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2146 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147
Felix Fietkaufce041b2011-05-19 12:20:25 +02002148 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002149 for (;;) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002150 if (list_empty(&txq->axq_q)) {
2151 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002152 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002153 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002154 break;
2155 }
2156 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2157
2158 /*
2159 * There is a race condition that a BH gets scheduled
2160 * after sw writes TxE and before hw re-load the last
2161 * descriptor to get the newly chained one.
2162 * Software must keep the last DONE descriptor as a
2163 * holding descriptor - software does so by marking
2164 * it with the STALE flag.
2165 */
2166 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302167 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002169 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002171
2172 bf = list_entry(bf_held->list.next, struct ath_buf,
2173 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 }
2175
2176 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302177 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178
Felix Fietkau29bffa92010-03-29 20:14:23 -07002179 memset(&ts, 0, sizeof(ts));
2180 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002181 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002183
Ben Greear2dac4fb2011-01-09 23:11:45 -08002184 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002185
2186 /*
2187 * Remove ath_buf's of the same transmit unit from txq,
2188 * however leave the last descriptor back as the holding
2189 * descriptor for hw.
2190 */
Sujitha119cc42009-03-30 15:28:38 +05302191 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002193 if (!list_is_singular(&lastbf->list))
2194 list_cut_position(&bf_head,
2195 &txq->axq_q, lastbf->list.prev);
2196
Felix Fietkaufce041b2011-05-19 12:20:25 +02002197 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002198 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002199 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002200 }
Johannes Berge6a98542008-10-21 12:40:02 +02002201
Felix Fietkaufce041b2011-05-19 12:20:25 +02002202 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002203 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002204 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002205}
2206
Sujith305fe472009-07-23 15:32:29 +05302207static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002208{
2209 struct ath_softc *sc = container_of(work, struct ath_softc,
2210 tx_complete_work.work);
2211 struct ath_txq *txq;
2212 int i;
2213 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002214#ifdef CONFIG_ATH9K_DEBUGFS
2215 sc->tx_complete_poll_work_seen++;
2216#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002217
2218 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2219 if (ATH_TXQ_SETUP(sc, i)) {
2220 txq = &sc->tx.txq[i];
2221 spin_lock_bh(&txq->axq_lock);
2222 if (txq->axq_depth) {
2223 if (txq->axq_tx_inprogress) {
2224 needreset = true;
2225 spin_unlock_bh(&txq->axq_lock);
2226 break;
2227 } else {
2228 txq->axq_tx_inprogress = true;
2229 }
2230 }
2231 spin_unlock_bh(&txq->axq_lock);
2232 }
2233
2234 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002235 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2236 "tx hung, resetting the chip\n");
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302237 spin_lock_bh(&sc->sc_pcu_lock);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002238 ath_reset(sc, true);
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302239 spin_unlock_bh(&sc->sc_pcu_lock);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002240 }
2241
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002242 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002243 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2244}
2245
2246
Sujithe8324352009-01-16 21:38:42 +05302247
2248void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002249{
Sujithe8324352009-01-16 21:38:42 +05302250 int i;
2251 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002252
Sujithe8324352009-01-16 21:38:42 +05302253 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002254
2255 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302256 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2257 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002258 }
2259}
2260
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261void ath_tx_edma_tasklet(struct ath_softc *sc)
2262{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2265 struct ath_hw *ah = sc->sc_ah;
2266 struct ath_txq *txq;
2267 struct ath_buf *bf, *lastbf;
2268 struct list_head bf_head;
2269 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002270
2271 for (;;) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002272 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002273 if (status == -EINPROGRESS)
2274 break;
2275 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002276 ath_dbg(common, ATH_DBG_XMIT,
2277 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002278 break;
2279 }
2280
2281 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002282 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002283 continue;
2284
Felix Fietkaufce041b2011-05-19 12:20:25 +02002285 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002286
2287 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002288
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002289 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2290 spin_unlock_bh(&txq->axq_lock);
2291 return;
2292 }
2293
2294 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2295 struct ath_buf, list);
2296 lastbf = bf->bf_lastbf;
2297
2298 INIT_LIST_HEAD(&bf_head);
2299 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2300 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002301
Felix Fietkaufce041b2011-05-19 12:20:25 +02002302 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2303 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002304
Felix Fietkaufce041b2011-05-19 12:20:25 +02002305 if (!list_empty(&txq->axq_q)) {
2306 struct list_head bf_q;
2307
2308 INIT_LIST_HEAD(&bf_q);
2309 txq->axq_link = NULL;
2310 list_splice_tail_init(&txq->axq_q, &bf_q);
2311 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2312 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002313 }
2314
Felix Fietkaufce041b2011-05-19 12:20:25 +02002315 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002316 spin_unlock_bh(&txq->axq_lock);
2317 }
2318}
2319
Sujithe8324352009-01-16 21:38:42 +05302320/*****************/
2321/* Init, Cleanup */
2322/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002323
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002324static int ath_txstatus_setup(struct ath_softc *sc, int size)
2325{
2326 struct ath_descdma *dd = &sc->txsdma;
2327 u8 txs_len = sc->sc_ah->caps.txs_len;
2328
2329 dd->dd_desc_len = size * txs_len;
2330 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2331 &dd->dd_desc_paddr, GFP_KERNEL);
2332 if (!dd->dd_desc)
2333 return -ENOMEM;
2334
2335 return 0;
2336}
2337
2338static int ath_tx_edma_init(struct ath_softc *sc)
2339{
2340 int err;
2341
2342 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2343 if (!err)
2344 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2345 sc->txsdma.dd_desc_paddr,
2346 ATH_TXSTATUS_RING_SIZE);
2347
2348 return err;
2349}
2350
2351static void ath_tx_edma_cleanup(struct ath_softc *sc)
2352{
2353 struct ath_descdma *dd = &sc->txsdma;
2354
2355 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2356 dd->dd_desc_paddr);
2357}
2358
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359int ath_tx_init(struct ath_softc *sc, int nbufs)
2360{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002361 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362 int error = 0;
2363
Sujith797fe5cb2009-03-30 15:28:45 +05302364 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002365
Sujith797fe5cb2009-03-30 15:28:45 +05302366 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002367 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302368 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002369 ath_err(common,
2370 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302371 goto err;
2372 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373
Sujith797fe5cb2009-03-30 15:28:45 +05302374 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002375 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302376 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002377 ath_err(common,
2378 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302379 goto err;
2380 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002382 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2383
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002384 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2385 error = ath_tx_edma_init(sc);
2386 if (error)
2387 goto err;
2388 }
2389
Sujith797fe5cb2009-03-30 15:28:45 +05302390err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391 if (error != 0)
2392 ath_tx_cleanup(sc);
2393
2394 return error;
2395}
2396
Sujith797fe5cb2009-03-30 15:28:45 +05302397void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002398{
Sujithb77f4832008-12-07 21:44:03 +05302399 if (sc->beacon.bdma.dd_desc_len != 0)
2400 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401
Sujithb77f4832008-12-07 21:44:03 +05302402 if (sc->tx.txdma.dd_desc_len != 0)
2403 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002404
2405 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2406 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407}
2408
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2410{
Sujithc5170162008-10-29 10:13:59 +05302411 struct ath_atx_tid *tid;
2412 struct ath_atx_ac *ac;
2413 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002414
Sujith8ee5afb2008-12-07 21:43:36 +05302415 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302416 tidno < WME_NUM_TID;
2417 tidno++, tid++) {
2418 tid->an = an;
2419 tid->tidno = tidno;
2420 tid->seq_start = tid->seq_next = 0;
2421 tid->baw_size = WME_MAX_BA;
2422 tid->baw_head = tid->baw_tail = 0;
2423 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302424 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302425 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002426 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302427 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302428 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302429 tid->state &= ~AGGR_ADDBA_COMPLETE;
2430 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302431 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002432
Sujith8ee5afb2008-12-07 21:43:36 +05302433 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302434 acno < WME_NUM_AC; acno++, ac++) {
2435 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002436 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302437 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002438 }
2439}
2440
Sujithb5aa9bf2008-10-29 10:13:31 +05302441void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002442{
Felix Fietkau2b409942010-07-07 19:42:08 +02002443 struct ath_atx_ac *ac;
2444 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002445 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002446 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302447
Felix Fietkau2b409942010-07-07 19:42:08 +02002448 for (tidno = 0, tid = &an->tid[tidno];
2449 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002450
Felix Fietkau2b409942010-07-07 19:42:08 +02002451 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002452 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002453
Felix Fietkau2b409942010-07-07 19:42:08 +02002454 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002455
Felix Fietkau2b409942010-07-07 19:42:08 +02002456 if (tid->sched) {
2457 list_del(&tid->list);
2458 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002459 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002460
2461 if (ac->sched) {
2462 list_del(&ac->list);
2463 tid->ac->sched = false;
2464 }
2465
2466 ath_tid_drain(sc, txq, tid);
2467 tid->state &= ~AGGR_ADDBA_COMPLETE;
2468 tid->state &= ~AGGR_CLEANUP;
2469
2470 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002471 }
2472}