blob: 48ac9ff01ac099224ce2e40ce6de88282cded9fc [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010060static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int nframes, int nbad,
62 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020063static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
64 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020065static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
66 struct ath_txq *txq,
67 struct ath_atx_tid *tid,
68 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053069
Felix Fietkau545750d2009-11-23 22:21:01 +010070enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071 MCS_HT20,
72 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010073 MCS_HT40,
74 MCS_HT40_SGI,
75};
76
Felix Fietkau0e668cd2010-04-19 19:57:32 +020077static int ath_max_4ms_framelen[4][32] = {
78 [MCS_HT20] = {
79 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
80 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
81 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
82 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
83 },
84 [MCS_HT20_SGI] = {
85 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
86 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
87 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
88 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
92 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
93 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
94 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 },
96 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020097 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
98 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
99 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
100 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100101 }
102};
103
Sujithe8324352009-01-16 21:38:42 +0530104/*********************/
105/* Aggregation logic */
106/*********************/
107
Sujithe8324352009-01-16 21:38:42 +0530108static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
109{
110 struct ath_atx_ac *ac = tid->ac;
111
112 if (tid->paused)
113 return;
114
115 if (tid->sched)
116 return;
117
118 tid->sched = true;
119 list_add_tail(&tid->list, &ac->tid_q);
120
121 if (ac->sched)
122 return;
123
124 ac->sched = true;
125 list_add_tail(&ac->list, &txq->axq_acq);
126}
127
Sujithe8324352009-01-16 21:38:42 +0530128static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
129{
Felix Fietkau066dae92010-11-07 14:59:39 +0100130 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530131
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 WARN_ON(!tid->paused);
133
Sujithe8324352009-01-16 21:38:42 +0530134 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200135 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530136
Felix Fietkau56dc6332011-08-28 00:32:22 +0200137 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530138 goto unlock;
139
140 ath_tx_queue_tid(txq, tid);
141 ath_txq_schedule(sc, txq);
142unlock:
143 spin_unlock_bh(&txq->axq_lock);
144}
145
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100146static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100147{
148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100149 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
150 sizeof(tx_info->rate_driver_data));
151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100152}
153
Sujithe8324352009-01-16 21:38:42 +0530154static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
155{
Felix Fietkau066dae92010-11-07 14:59:39 +0100156 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200157 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530158 struct ath_buf *bf;
159 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200160 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100161 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200162
Sujithe8324352009-01-16 21:38:42 +0530163 INIT_LIST_HEAD(&bf_head);
164
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530166 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530167
Felix Fietkau56dc6332011-08-28 00:32:22 +0200168 while ((skb = __skb_dequeue(&tid->buf_q))) {
169 fi = get_frame_info(skb);
170 bf = fi->bf;
171
Felix Fietkaue1566d12010-11-20 03:08:46 +0100172 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200173 if (bf && fi->retries) {
174 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200175 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200177 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200178 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200179 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100180 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530181 }
182
183 spin_unlock_bh(&txq->axq_lock);
184}
185
186static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 int seqno)
188{
189 int index, cindex;
190
191 index = ATH_BA_INDEX(tid->seq_start, seqno);
192 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
193
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200194 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530195
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530197 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
198 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
199 }
200}
201
202static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100203 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530204{
205 int index, cindex;
206
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100207 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530208 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200209 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530210
211 if (index >= ((tid->baw_tail - tid->baw_head) &
212 (ATH_TID_MAX_BUFS - 1))) {
213 tid->baw_tail = cindex;
214 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
215 }
216}
217
218/*
219 * TODO: For frame(s) that are in the retry state, we will reuse the
220 * sequence number(s) without setting the retry bit. The
221 * alternative is to give up on these and BAR the receiver's window
222 * forward.
223 */
224static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
225 struct ath_atx_tid *tid)
226
227{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200228 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530229 struct ath_buf *bf;
230 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700231 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100232 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233
234 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530235 INIT_LIST_HEAD(&bf_head);
236
Felix Fietkau56dc6332011-08-28 00:32:22 +0200237 while ((skb = __skb_dequeue(&tid->buf_q))) {
238 fi = get_frame_info(skb);
239 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530240
Felix Fietkau44f1d262011-08-28 00:32:25 +0200241 if (!bf) {
242 spin_unlock(&txq->axq_lock);
243 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
244 spin_lock(&txq->axq_lock);
245 continue;
246 }
247
Felix Fietkau56dc6332011-08-28 00:32:22 +0200248 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530249
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200251 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530252
253 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700254 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530255 spin_lock(&txq->axq_lock);
256 }
257
258 tid->seq_next = tid->seq_start;
259 tid->baw_tail = tid->baw_head;
260}
261
Sujithfec247c2009-07-27 12:08:16 +0530262static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100263 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530264{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100265 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
274}
275
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200276static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
277{
278 struct ath_buf *bf = NULL;
279
280 spin_lock_bh(&sc->tx.txbuflock);
281
282 if (unlikely(list_empty(&sc->tx.txbuf))) {
283 spin_unlock_bh(&sc->tx.txbuflock);
284 return NULL;
285 }
286
287 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
288 list_del(&bf->list);
289
290 spin_unlock_bh(&sc->tx.txbuflock);
291
292 return bf;
293}
294
295static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
296{
297 spin_lock_bh(&sc->tx.txbuflock);
298 list_add_tail(&bf->list, &sc->tx.txbuf);
299 spin_unlock_bh(&sc->tx.txbuflock);
300}
301
Sujithd43f30152009-01-16 21:38:53 +0530302static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
303{
304 struct ath_buf *tbf;
305
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200306 tbf = ath_tx_get_buffer(sc);
307 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530308 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530309
310 ATH_TXBUF_RESET(tbf);
311
312 tbf->bf_mpdu = bf->bf_mpdu;
313 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400314 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530315 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530316
317 return tbf;
318}
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
321 struct ath_tx_status *ts, int txok,
322 int *nframes, int *nbad)
323{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100324 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100325 u16 seq_st = 0;
326 u32 ba[WME_BA_BMP_SIZE >> 5];
327 int ba_index;
328 int isaggr = 0;
329
330 *nbad = 0;
331 *nframes = 0;
332
Felix Fietkaub572d032010-11-14 15:20:07 +0100333 isaggr = bf_isaggr(bf);
334 if (isaggr) {
335 seq_st = ts->ts_seqnum;
336 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
337 }
338
339 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100340 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200341 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100342
343 (*nframes)++;
344 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
345 (*nbad)++;
346
347 bf = bf->bf_next;
348 }
349}
350
351
Sujithd43f30152009-01-16 21:38:53 +0530352static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
353 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100354 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530355{
356 struct ath_node *an = NULL;
357 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530358 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100359 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800361 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530362 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530363 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200364 struct list_head bf_head;
365 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530366 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530367 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
369 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200370 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100371 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200372 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100373 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200374 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530375
Sujitha22be222009-03-30 15:28:36 +0530376 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530377 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530378
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800380
Felix Fietkau78c46532010-06-25 01:26:16 +0200381 memcpy(rates, tx_info->control.rates, sizeof(rates));
382
Sujith1286ec62009-01-27 13:30:37 +0530383 rcu_read_lock();
384
Ben Greear686b9cb2010-09-23 09:44:36 -0700385 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530386 if (!sta) {
387 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200388
Felix Fietkau31e79a52010-07-12 23:16:34 +0200389 INIT_LIST_HEAD(&bf_head);
390 while (bf) {
391 bf_next = bf->bf_next;
392
Felix Fietkaufce041b2011-05-19 12:20:25 +0200393 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200394 list_move_tail(&bf->list, &bf_head);
395
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100396 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200397 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
398 0, 0);
399
400 bf = bf_next;
401 }
Sujith1286ec62009-01-27 13:30:37 +0530402 return;
Sujithe8324352009-01-16 21:38:42 +0530403 }
404
Sujith1286ec62009-01-27 13:30:37 +0530405 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100406 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
407 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530408
Felix Fietkaub11b1602010-07-11 12:48:44 +0200409 /*
410 * The hardware occasionally sends a tx status for the wrong TID.
411 * In this case, the BA status cannot be considered valid and all
412 * subframes need to be retransmitted
413 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100414 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200415 txok = false;
416
Sujithe8324352009-01-16 21:38:42 +0530417 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530418 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530419
Sujithd43f30152009-01-16 21:38:53 +0530420 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700421 if (ts->ts_flags & ATH9K_TX_BA) {
422 seq_st = ts->ts_seqnum;
423 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530424 } else {
Sujithd43f30152009-01-16 21:38:53 +0530425 /*
426 * AR5416 can become deaf/mute when BA
427 * issue happens. Chip needs to be reset.
428 * But AP code may have sychronization issues
429 * when perform internal reset in this routine.
430 * Only enable reset in STA mode for now.
431 */
Sujith2660b812009-02-09 13:27:26 +0530432 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530433 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530434 }
435 }
436
Felix Fietkau56dc6332011-08-28 00:32:22 +0200437 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530438
Felix Fietkaub572d032010-11-14 15:20:07 +0100439 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530440 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200441 u16 seqno = bf->bf_state.seqno;
442
Felix Fietkauf0b82202011-01-15 14:30:15 +0100443 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530444 bf_next = bf->bf_next;
445
Felix Fietkau78c46532010-06-25 01:26:16 +0200446 skb = bf->bf_mpdu;
447 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200449
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200450 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530451 /* transmit completion, subframe is
452 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530453 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530454 } else if (!isaggr && txok) {
455 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530456 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530457 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200458 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530459 /*
460 * cleanup in progress, just fail
461 * the un-acked sub-frames
462 */
463 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200464 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
465 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
466 !an->sleeping)
467 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
468
469 clear_filter = true;
470 txpending = 1;
471 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200472 txfail = 1;
473 sendbar = 1;
474 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530475 }
476 }
477
Felix Fietkaufce041b2011-05-19 12:20:25 +0200478 /*
479 * Make sure the last desc is reclaimed if it
480 * not a holding desc.
481 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200482 INIT_LIST_HEAD(&bf_head);
483 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
484 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530485 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530486
Felix Fietkau90fa5392010-09-20 13:45:38 +0200487 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530488 /*
489 * complete the acked-ones/xretried ones; update
490 * block-ack window
491 */
492 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200493 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530494 spin_unlock_bh(&txq->axq_lock);
495
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200497 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100498 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530499 rc_update = false;
500 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100501 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530502 }
503
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700504 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
505 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530506 } else {
Sujithd43f30152009-01-16 21:38:53 +0530507 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200508 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400509 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
510 if (bf->bf_next == NULL && bf_last->bf_stale) {
511 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530512
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400513 tbf = ath_clone_txbuf(sc, bf_last);
514 /*
515 * Update tx baw and complete the
516 * frame with failed status if we
517 * run out of tx buf.
518 */
519 if (!tbf) {
520 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200521 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400523
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100524 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100525 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400526 ath_tx_complete_buf(sc, bf, txq,
527 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200528 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400529 break;
530 }
531
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 tbf->bf_desc);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200534 fi->bf = tbf;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400535 } else {
536 /*
537 * Clear descriptor status words for
538 * software retry
539 */
540 ath9k_hw_cleartxdesc(sc->sc_ah,
541 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400542 }
Sujithe8324352009-01-16 21:38:42 +0530543 }
544
545 /*
546 * Put this buffer to the temporary pending
547 * queue to retain ordering
548 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200549 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530550 }
551
552 bf = bf_next;
553 }
554
Felix Fietkau4cee7862010-07-23 03:53:16 +0200555 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200556 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200557 if (an->sleeping)
558 ieee80211_sta_set_tim(sta);
559
Felix Fietkau4cee7862010-07-23 03:53:16 +0200560 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200561 if (clear_filter)
562 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200563 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600564 if (!an->sleeping)
565 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200566 spin_unlock_bh(&txq->axq_lock);
567 }
568
Sujithe8324352009-01-16 21:38:42 +0530569 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200570 ath_tx_flush_tid(sc, tid);
571
Sujithe8324352009-01-16 21:38:42 +0530572 if (tid->baw_head == tid->baw_tail) {
573 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530574 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530575 }
Sujithe8324352009-01-16 21:38:42 +0530576 }
577
Sujith1286ec62009-01-27 13:30:37 +0530578 rcu_read_unlock();
579
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530580 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200581 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530582}
583
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530584static bool ath_lookup_legacy(struct ath_buf *bf)
585{
586 struct sk_buff *skb;
587 struct ieee80211_tx_info *tx_info;
588 struct ieee80211_tx_rate *rates;
589 int i;
590
591 skb = bf->bf_mpdu;
592 tx_info = IEEE80211_SKB_CB(skb);
593 rates = tx_info->control.rates;
594
Felix Fietkau059ee092011-08-27 10:25:27 +0200595 for (i = 0; i < 4; i++) {
596 if (!rates[i].count || rates[i].idx < 0)
597 break;
598
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530599 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
600 return true;
601 }
602
603 return false;
604}
605
Sujithe8324352009-01-16 21:38:42 +0530606static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
607 struct ath_atx_tid *tid)
608{
Sujithe8324352009-01-16 21:38:42 +0530609 struct sk_buff *skb;
610 struct ieee80211_tx_info *tx_info;
611 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530612 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530613 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530614 int i;
615
Sujitha22be222009-03-30 15:28:36 +0530616 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530617 tx_info = IEEE80211_SKB_CB(skb);
618 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530619
620 /*
621 * Find the lowest frame length among the rate series that will have a
622 * 4ms transmit duration.
623 * TODO - TXOP limit needs to be considered.
624 */
625 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
626
627 for (i = 0; i < 4; i++) {
628 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100629 int modeidx;
630 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530631 legacy = 1;
632 break;
633 }
634
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200635 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100636 modeidx = MCS_HT40;
637 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200638 modeidx = MCS_HT20;
639
640 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
641 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100642
643 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530644 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530645 }
646 }
647
648 /*
649 * limit aggregate size by the minimum rate if rate selected is
650 * not a probe rate, if rate selected is a probe rate then
651 * avoid aggregation of this packet.
652 */
653 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
654 return 0;
655
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530656 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
657 aggr_limit = min((max_4ms_framelen * 3) / 8,
658 (u32)ATH_AMPDU_LIMIT_MAX);
659 else
660 aggr_limit = min(max_4ms_framelen,
661 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530662
663 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300664 * h/w can accept aggregates up to 16 bit lengths (65535).
665 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530666 * as zero. Ignore 65536 since we are constrained by hw.
667 */
Sujith4ef70842009-07-23 15:32:41 +0530668 if (tid->an->maxampdu)
669 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530670
671 return aggr_limit;
672}
673
674/*
Sujithd43f30152009-01-16 21:38:53 +0530675 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530676 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530677 */
678static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530679 struct ath_buf *bf, u16 frmlen,
680 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530681{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530682#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530683 struct sk_buff *skb = bf->bf_mpdu;
684 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530685 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530686 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100687 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200688 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100689 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530690
691 /* Select standard number of delimiters based on frame length alone */
692 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
693
694 /*
695 * If encryption enabled, hardware requires some more padding between
696 * subframes.
697 * TODO - this could be improved to be dependent on the rate.
698 * The hardware can keep up at lower rates, but not higher rates
699 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530700 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
701 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530702 ndelim += ATH_AGGR_ENCRYPTDELIM;
703
704 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530705 * Add delimiter when using RTS/CTS with aggregation
706 * and non enterprise AR9003 card
707 */
Felix Fietkau34597312011-08-29 18:57:54 +0200708 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
709 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530710 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
711
712 /*
Sujithe8324352009-01-16 21:38:42 +0530713 * Convert desired mpdu density from microeconds to bytes based
714 * on highest rate in rate series (i.e. first rate) to determine
715 * required minimum length for subframe. Take into account
716 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530717 *
Sujithe8324352009-01-16 21:38:42 +0530718 * If there is no mpdu density restriction, no further calculation
719 * is needed.
720 */
Sujith4ef70842009-07-23 15:32:41 +0530721
722 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530723 return ndelim;
724
725 rix = tx_info->control.rates[0].idx;
726 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530727 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
728 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
729
730 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530731 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530732 else
Sujith4ef70842009-07-23 15:32:41 +0530733 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530734
735 if (nsymbols == 0)
736 nsymbols = 1;
737
Felix Fietkauc6663872010-04-19 19:57:33 +0200738 streams = HT_RC_2_STREAMS(rix);
739 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530740 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
741
Sujithe8324352009-01-16 21:38:42 +0530742 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530743 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
744 ndelim = max(mindelim, ndelim);
745 }
746
747 return ndelim;
748}
749
750static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530751 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530752 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100753 struct list_head *bf_q,
754 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530755{
756#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530758 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530759 u16 aggr_limit = 0, al = 0, bpad = 0,
760 al_delta, h_baw = tid->baw_size / 2;
761 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200762 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100763 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200764 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200765 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530766
767 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200768 skb = skb_peek(&tid->buf_q);
769 fi = get_frame_info(skb);
770 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200771 if (!fi->bf)
772 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200773
Felix Fietkau44f1d262011-08-28 00:32:25 +0200774 if (!bf)
775 continue;
776
Felix Fietkau399c6482011-09-14 21:24:17 +0200777 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200778 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200779 if (!bf_first)
780 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200783 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530784 status = ATH_AGGR_BAW_CLOSED;
785 break;
786 }
787
788 if (!rl) {
789 aggr_limit = ath_lookup_rate(sc, bf, tid);
790 rl = 1;
791 }
792
Sujithd43f30152009-01-16 21:38:53 +0530793 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100794 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530795
Sujithd43f30152009-01-16 21:38:53 +0530796 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530797 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
798 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530799 status = ATH_AGGR_LIMITED;
800 break;
801 }
802
Felix Fietkau0299a502010-10-21 02:47:24 +0200803 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
804 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
805 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
806 break;
807
Sujithd43f30152009-01-16 21:38:53 +0530808 /* do not exceed subframe limit */
809 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530810 status = ATH_AGGR_LIMITED;
811 break;
812 }
813
Sujithd43f30152009-01-16 21:38:53 +0530814 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530815 al += bpad + al_delta;
816
817 /*
818 * Get the delimiters needed to meet the MPDU
819 * density for this node.
820 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530821 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
822 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530823 bpad = PADBYTES(al_delta) + (ndelim << 2);
824
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530825 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530826 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530827
Sujithd43f30152009-01-16 21:38:53 +0530828 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100829 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200830 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200831 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200832
833 __skb_unlink(skb, &tid->buf_q);
834 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200835 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530836 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200837
Sujithe8324352009-01-16 21:38:42 +0530838 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530839
Felix Fietkau56dc6332011-08-28 00:32:22 +0200840 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530841
Felix Fietkau269c44b2010-11-14 15:20:06 +0100842 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530843
Sujithe8324352009-01-16 21:38:42 +0530844 return status;
845#undef PADBYTES
846}
847
Felix Fietkau399c6482011-09-14 21:24:17 +0200848static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, int len)
849{
850 struct ath_hw *ah = sc->sc_ah;
851 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
852 struct ath_buf *bf_first = bf;
853
854 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
855 bool clrdmask = !!(tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT);
856
857 u32 ds_next;
858
859 ath_buf_set_rate(sc, bf, len);
860
861 while (bf) {
862 if (bf->bf_next)
863 ds_next = bf->bf_next->bf_daddr;
864 else
865 ds_next = 0;
866
867 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, clrdmask);
868 if (!aggr)
869 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
870 else if (!bf->bf_next)
871 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_desc);
872 else {
873 if (bf == bf_first)
874 ath9k_hw_set11n_aggr_first(sc->sc_ah,
875 bf->bf_desc, len);
876
877 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc,
878 bf->bf_state.ndelim);
879 }
880
881 ath9k_hw_set_desc_link(ah, bf->bf_desc, ds_next);
882 bf = bf->bf_next;
883 }
884}
885
Sujithe8324352009-01-16 21:38:42 +0530886static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
887 struct ath_atx_tid *tid)
888{
Sujithd43f30152009-01-16 21:38:53 +0530889 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530890 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +0200891 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530892 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100893 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530894
895 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200896 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530897 return;
898
899 INIT_LIST_HEAD(&bf_q);
900
Felix Fietkau269c44b2010-11-14 15:20:06 +0100901 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530902
903 /*
Sujithd43f30152009-01-16 21:38:53 +0530904 * no frames picked up to be aggregated;
905 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530906 */
907 if (list_empty(&bf_q))
908 break;
909
910 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530911 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +0200912 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530913
Felix Fietkau55195412011-04-17 23:28:09 +0200914 if (tid->ac->clear_ps_filter) {
915 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +0200916 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
917 } else {
918 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +0200919 }
920
Sujithd43f30152009-01-16 21:38:53 +0530921 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100922 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +0200923 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
924 bf->bf_state.bf_type = BUF_AMPDU;
925 } else {
926 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530927 }
928
Felix Fietkau399c6482011-09-14 21:24:17 +0200929 ath_tx_fill_desc(sc, bf, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200930 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100931 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530932 status != ATH_AGGR_BAW_CLOSED);
933}
934
Felix Fietkau231c3a12010-09-20 19:35:28 +0200935int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
936 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530937{
938 struct ath_atx_tid *txtid;
939 struct ath_node *an;
940
941 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530942 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200943
944 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
945 return -EAGAIN;
946
Sujithf83da962009-07-23 15:32:37 +0530947 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200948 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700949 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200950
Felix Fietkau2ed72222011-01-10 17:05:49 -0700951 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
952 txtid->baw_head = txtid->baw_tail = 0;
953
Felix Fietkau231c3a12010-09-20 19:35:28 +0200954 return 0;
Sujithe8324352009-01-16 21:38:42 +0530955}
956
Sujithf83da962009-07-23 15:32:37 +0530957void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530958{
959 struct ath_node *an = (struct ath_node *)sta->drv_priv;
960 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100961 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530962
963 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530964 return;
Sujithe8324352009-01-16 21:38:42 +0530965
966 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530967 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530968 return;
Sujithe8324352009-01-16 21:38:42 +0530969 }
970
Sujithe8324352009-01-16 21:38:42 +0530971 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200972 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200973
974 /*
975 * If frames are still being transmitted for this TID, they will be
976 * cleaned up during tx completion. To prevent race conditions, this
977 * TID can only be reused after all in-progress subframes have been
978 * completed.
979 */
980 if (txtid->baw_head != txtid->baw_tail)
981 txtid->state |= AGGR_CLEANUP;
982 else
983 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530984 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530985
Felix Fietkau90fa5392010-09-20 13:45:38 +0200986 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530987}
988
Felix Fietkau55195412011-04-17 23:28:09 +0200989bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
990{
991 struct ath_atx_tid *tid;
992 struct ath_atx_ac *ac;
993 struct ath_txq *txq;
994 bool buffered = false;
995 int tidno;
996
997 for (tidno = 0, tid = &an->tid[tidno];
998 tidno < WME_NUM_TID; tidno++, tid++) {
999
1000 if (!tid->sched)
1001 continue;
1002
1003 ac = tid->ac;
1004 txq = ac->txq;
1005
1006 spin_lock_bh(&txq->axq_lock);
1007
Felix Fietkau56dc6332011-08-28 00:32:22 +02001008 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +02001009 buffered = true;
1010
1011 tid->sched = false;
1012 list_del(&tid->list);
1013
1014 if (ac->sched) {
1015 ac->sched = false;
1016 list_del(&ac->list);
1017 }
1018
1019 spin_unlock_bh(&txq->axq_lock);
1020 }
1021
1022 return buffered;
1023}
1024
1025void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1026{
1027 struct ath_atx_tid *tid;
1028 struct ath_atx_ac *ac;
1029 struct ath_txq *txq;
1030 int tidno;
1031
1032 for (tidno = 0, tid = &an->tid[tidno];
1033 tidno < WME_NUM_TID; tidno++, tid++) {
1034
1035 ac = tid->ac;
1036 txq = ac->txq;
1037
1038 spin_lock_bh(&txq->axq_lock);
1039 ac->clear_ps_filter = true;
1040
Felix Fietkau56dc6332011-08-28 00:32:22 +02001041 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001042 ath_tx_queue_tid(txq, tid);
1043 ath_txq_schedule(sc, txq);
1044 }
1045
1046 spin_unlock_bh(&txq->axq_lock);
1047 }
1048}
1049
Sujithe8324352009-01-16 21:38:42 +05301050void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1051{
1052 struct ath_atx_tid *txtid;
1053 struct ath_node *an;
1054
1055 an = (struct ath_node *)sta->drv_priv;
1056
1057 if (sc->sc_flags & SC_OP_TXAGGR) {
1058 txtid = ATH_AN_2_TID(an, tid);
1059 txtid->baw_size =
1060 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1061 txtid->state |= AGGR_ADDBA_COMPLETE;
1062 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1063 ath_tx_resume_tid(sc, txtid);
1064 }
1065}
1066
Sujithe8324352009-01-16 21:38:42 +05301067/********************/
1068/* Queue Management */
1069/********************/
1070
Sujithe8324352009-01-16 21:38:42 +05301071static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1072 struct ath_txq *txq)
1073{
1074 struct ath_atx_ac *ac, *ac_tmp;
1075 struct ath_atx_tid *tid, *tid_tmp;
1076
1077 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1078 list_del(&ac->list);
1079 ac->sched = false;
1080 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1081 list_del(&tid->list);
1082 tid->sched = false;
1083 ath_tid_drain(sc, txq, tid);
1084 }
1085 }
1086}
1087
1088struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1089{
Sujithcbe61d82009-02-09 13:27:12 +05301090 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001091 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301092 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001093 static const int subtype_txq_to_hwq[] = {
1094 [WME_AC_BE] = ATH_TXQ_AC_BE,
1095 [WME_AC_BK] = ATH_TXQ_AC_BK,
1096 [WME_AC_VI] = ATH_TXQ_AC_VI,
1097 [WME_AC_VO] = ATH_TXQ_AC_VO,
1098 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001099 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301100
1101 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001102 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301103 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1104 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1105 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1106 qi.tqi_physCompBuf = 0;
1107
1108 /*
1109 * Enable interrupts only for EOL and DESC conditions.
1110 * We mark tx descriptors to receive a DESC interrupt
1111 * when a tx queue gets deep; otherwise waiting for the
1112 * EOL to reap descriptors. Note that this is done to
1113 * reduce interrupt load and this only defers reaping
1114 * descriptors, never transmitting frames. Aside from
1115 * reducing interrupts this also permits more concurrency.
1116 * The only potential downside is if the tx queue backs
1117 * up in which case the top half of the kernel may backup
1118 * due to a lack of tx descriptors.
1119 *
1120 * The UAPSD queue is an exception, since we take a desc-
1121 * based intr on the EOSP frames.
1122 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001123 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1124 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1125 TXQ_FLAG_TXERRINT_ENABLE;
1126 } else {
1127 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1128 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1129 else
1130 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1131 TXQ_FLAG_TXDESCINT_ENABLE;
1132 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001133 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1134 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301135 /*
1136 * NB: don't print a message, this happens
1137 * normally on parts with too few tx queues
1138 */
1139 return NULL;
1140 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001141 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001142 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001143 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1144 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301145 return NULL;
1146 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001147 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1148 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301149
Ben Greear60f2d1d2011-01-09 23:11:52 -08001150 txq->axq_qnum = axq_qnum;
1151 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301152 txq->axq_link = NULL;
1153 INIT_LIST_HEAD(&txq->axq_q);
1154 INIT_LIST_HEAD(&txq->axq_acq);
1155 spin_lock_init(&txq->axq_lock);
1156 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001157 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001158 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001159 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001160
1161 txq->txq_headidx = txq->txq_tailidx = 0;
1162 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1163 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301164 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001165 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301166}
1167
Sujithe8324352009-01-16 21:38:42 +05301168int ath_txq_update(struct ath_softc *sc, int qnum,
1169 struct ath9k_tx_queue_info *qinfo)
1170{
Sujithcbe61d82009-02-09 13:27:12 +05301171 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301172 int error = 0;
1173 struct ath9k_tx_queue_info qi;
1174
1175 if (qnum == sc->beacon.beaconq) {
1176 /*
1177 * XXX: for beacon queue, we just save the parameter.
1178 * It will be picked up by ath_beaconq_config when
1179 * it's necessary.
1180 */
1181 sc->beacon.beacon_qi = *qinfo;
1182 return 0;
1183 }
1184
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001185 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301186
1187 ath9k_hw_get_txq_props(ah, qnum, &qi);
1188 qi.tqi_aifs = qinfo->tqi_aifs;
1189 qi.tqi_cwmin = qinfo->tqi_cwmin;
1190 qi.tqi_cwmax = qinfo->tqi_cwmax;
1191 qi.tqi_burstTime = qinfo->tqi_burstTime;
1192 qi.tqi_readyTime = qinfo->tqi_readyTime;
1193
1194 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001195 ath_err(ath9k_hw_common(sc->sc_ah),
1196 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301197 error = -EIO;
1198 } else {
1199 ath9k_hw_resettxqueue(ah, qnum);
1200 }
1201
1202 return error;
1203}
1204
1205int ath_cabq_update(struct ath_softc *sc)
1206{
1207 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001208 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301209 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301210
1211 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1212 /*
1213 * Ensure the readytime % is within the bounds.
1214 */
Sujith17d79042009-02-09 13:27:03 +05301215 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1216 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1217 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1218 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301219
Steve Brown9814f6b2011-02-07 17:10:39 -07001220 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301221 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301222 ath_txq_update(sc, qnum, &qi);
1223
1224 return 0;
1225}
1226
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001227static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1228{
1229 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1230 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1231}
1232
Felix Fietkaufce041b2011-05-19 12:20:25 +02001233static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1234 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301235 __releases(txq->axq_lock)
1236 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301237{
1238 struct ath_buf *bf, *lastbf;
1239 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001240 struct ath_tx_status ts;
1241
1242 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301243 INIT_LIST_HEAD(&bf_head);
1244
Felix Fietkaufce041b2011-05-19 12:20:25 +02001245 while (!list_empty(list)) {
1246 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301247
Felix Fietkaufce041b2011-05-19 12:20:25 +02001248 if (bf->bf_stale) {
1249 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301250
Felix Fietkaufce041b2011-05-19 12:20:25 +02001251 ath_tx_return_buffer(sc, bf);
1252 continue;
Sujithe8324352009-01-16 21:38:42 +05301253 }
1254
1255 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001256 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001257
Sujithe8324352009-01-16 21:38:42 +05301258 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001259 if (bf_is_ampdu_not_probing(bf))
1260 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301261
Felix Fietkaufce041b2011-05-19 12:20:25 +02001262 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301263 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001264 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1265 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301266 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001267 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001268 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001269 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001270}
1271
1272/*
1273 * Drain a given TX queue (could be Beacon or Data)
1274 *
1275 * This assumes output has been stopped and
1276 * we do not need to block ath_tx_tasklet.
1277 */
1278void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1279{
1280 spin_lock_bh(&txq->axq_lock);
1281 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1282 int idx = txq->txq_tailidx;
1283
1284 while (!list_empty(&txq->txq_fifo[idx])) {
1285 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1286 retry_tx);
1287
1288 INCR(idx, ATH_TXFIFO_DEPTH);
1289 }
1290 txq->txq_tailidx = idx;
1291 }
1292
1293 txq->axq_link = NULL;
1294 txq->axq_tx_inprogress = false;
1295 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001296
1297 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001298 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1299 ath_txq_drain_pending_buffers(sc, txq);
1300
1301 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301302}
1303
Felix Fietkau080e1a22010-12-05 20:17:53 +01001304bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301305{
Sujithcbe61d82009-02-09 13:27:12 +05301306 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001307 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301308 struct ath_txq *txq;
1309 int i, npend = 0;
1310
1311 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001312 return true;
Sujith043a0402009-01-16 21:38:47 +05301313
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001314 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301315
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001316 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301317 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001318 if (!ATH_TXQ_SETUP(sc, i))
1319 continue;
1320
1321 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301322 }
1323
Felix Fietkau080e1a22010-12-05 20:17:53 +01001324 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001325 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301326
1327 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001328 if (!ATH_TXQ_SETUP(sc, i))
1329 continue;
1330
1331 /*
1332 * The caller will resume queues with ieee80211_wake_queues.
1333 * Mark the queue as not stopped to prevent ath_tx_complete
1334 * from waking the queue too early.
1335 */
1336 txq = &sc->tx.txq[i];
1337 txq->stopped = false;
1338 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301339 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001340
1341 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301342}
1343
Sujithe8324352009-01-16 21:38:42 +05301344void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1345{
1346 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1347 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1348}
1349
Ben Greear7755bad2011-01-18 17:30:00 -08001350/* For each axq_acq entry, for each tid, try to schedule packets
1351 * for transmit until ampdu_depth has reached min Q depth.
1352 */
Sujithe8324352009-01-16 21:38:42 +05301353void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1354{
Ben Greear7755bad2011-01-18 17:30:00 -08001355 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1356 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301357
Felix Fietkau236de512011-09-03 01:40:25 +02001358 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001359 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301360 return;
1361
1362 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001363 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301364
Ben Greear7755bad2011-01-18 17:30:00 -08001365 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1366 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1367 list_del(&ac->list);
1368 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301369
Ben Greear7755bad2011-01-18 17:30:00 -08001370 while (!list_empty(&ac->tid_q)) {
1371 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1372 list);
1373 list_del(&tid->list);
1374 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301375
Ben Greear7755bad2011-01-18 17:30:00 -08001376 if (tid->paused)
1377 continue;
Sujithe8324352009-01-16 21:38:42 +05301378
Ben Greear7755bad2011-01-18 17:30:00 -08001379 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301380
Ben Greear7755bad2011-01-18 17:30:00 -08001381 /*
1382 * add tid to round-robin queue if more frames
1383 * are pending for the tid
1384 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001385 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001386 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301387
Ben Greear7755bad2011-01-18 17:30:00 -08001388 if (tid == last_tid ||
1389 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1390 break;
Sujithe8324352009-01-16 21:38:42 +05301391 }
Ben Greear7755bad2011-01-18 17:30:00 -08001392
1393 if (!list_empty(&ac->tid_q)) {
1394 if (!ac->sched) {
1395 ac->sched = true;
1396 list_add_tail(&ac->list, &txq->axq_acq);
1397 }
1398 }
1399
1400 if (ac == last_ac ||
1401 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1402 return;
Sujithe8324352009-01-16 21:38:42 +05301403 }
1404}
1405
Sujithe8324352009-01-16 21:38:42 +05301406/***********/
1407/* TX, DMA */
1408/***********/
1409
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001410/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001411 * Insert a chain of ath_buf (descriptors) on a txq and
1412 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001413 */
Sujith102e0572008-10-29 10:15:16 +05301414static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001415 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001416{
Sujithcbe61d82009-02-09 13:27:12 +05301417 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001418 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001419 struct ath_buf *bf, *bf_last;
1420 bool puttxbuf = false;
1421 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301422
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001423 /*
1424 * Insert the frame on the outbound list and
1425 * pass it on to the hardware.
1426 */
1427
1428 if (list_empty(head))
1429 return;
1430
Felix Fietkaufce041b2011-05-19 12:20:25 +02001431 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001432 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001433 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001434
Joe Perches226afe62010-12-02 19:12:37 -08001435 ath_dbg(common, ATH_DBG_QUEUE,
1436 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001437
Felix Fietkaufce041b2011-05-19 12:20:25 +02001438 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1439 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001440 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001441 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001442 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001443 list_splice_tail_init(head, &txq->axq_q);
1444
Felix Fietkaufce041b2011-05-19 12:20:25 +02001445 if (txq->axq_link) {
1446 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001447 ath_dbg(common, ATH_DBG_XMIT,
1448 "link[%u] (%p)=%llx (%p)\n",
1449 txq->axq_qnum, txq->axq_link,
1450 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001451 } else if (!edma)
1452 puttxbuf = true;
1453
1454 txq->axq_link = bf_last->bf_desc;
1455 }
1456
1457 if (puttxbuf) {
1458 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1459 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1460 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1461 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1462 }
1463
1464 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001465 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001466 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001467 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001468
1469 if (!internal) {
1470 txq->axq_depth++;
1471 if (bf_is_ampdu_not_probing(bf))
1472 txq->axq_ampdu_depth++;
1473 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001474}
1475
Sujithe8324352009-01-16 21:38:42 +05301476static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001477 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301478{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001479 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001480 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001481 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301482
1483 /*
1484 * Do not queue to h/w when any of the following conditions is true:
1485 * - there are pending frames in software queue
1486 * - the TID is currently paused for ADDBA/BAR request
1487 * - seqno is not within block-ack window
1488 * - h/w queue depth exceeds low water mark
1489 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001490 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001491 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001492 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001493 /*
Sujithe8324352009-01-16 21:38:42 +05301494 * Add this frame to software queue for scheduling later
1495 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001496 */
Ben Greearbda8add2011-01-09 23:11:48 -08001497 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001498 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001499 if (!txctl->an || !txctl->an->sleeping)
1500 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301501 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001502 }
1503
Felix Fietkau44f1d262011-08-28 00:32:25 +02001504 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1505 if (!bf)
1506 return;
1507
Felix Fietkau399c6482011-09-14 21:24:17 +02001508 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001509 INIT_LIST_HEAD(&bf_head);
1510 list_add(&bf->list, &bf_head);
1511
Sujithe8324352009-01-16 21:38:42 +05301512 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001513 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301514
1515 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001516 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301517 bf->bf_lastbf = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +02001518 ath_tx_fill_desc(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001519 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301520}
1521
Felix Fietkau82b873a2010-11-11 03:18:37 +01001522static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001523 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001524{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001525 struct ath_frame_info *fi = get_frame_info(skb);
1526 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301527 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001528
Felix Fietkau44f1d262011-08-28 00:32:25 +02001529 bf = fi->bf;
1530 if (!bf)
1531 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1532
1533 if (!bf)
1534 return;
1535
1536 INIT_LIST_HEAD(&bf_head);
1537 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001538 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301539
1540 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001541 if (tid)
1542 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301543
Sujithd43f30152009-01-16 21:38:53 +05301544 bf->bf_lastbf = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +02001545 ath_tx_fill_desc(sc, bf, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001546 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301547 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001548}
1549
Sujith528f0c62008-10-29 10:14:26 +05301550static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001551{
Sujith528f0c62008-10-29 10:14:26 +05301552 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001553 enum ath9k_pkt_type htype;
1554 __le16 fc;
1555
Sujith528f0c62008-10-29 10:14:26 +05301556 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001557 fc = hdr->frame_control;
1558
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001559 if (ieee80211_is_beacon(fc))
1560 htype = ATH9K_PKT_TYPE_BEACON;
1561 else if (ieee80211_is_probe_resp(fc))
1562 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1563 else if (ieee80211_is_atim(fc))
1564 htype = ATH9K_PKT_TYPE_ATIM;
1565 else if (ieee80211_is_pspoll(fc))
1566 htype = ATH9K_PKT_TYPE_PSPOLL;
1567 else
1568 htype = ATH9K_PKT_TYPE_NORMAL;
1569
1570 return htype;
1571}
1572
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001573static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1574 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301575{
1576 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001577 struct ieee80211_sta *sta = tx_info->control.sta;
1578 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001579 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001580 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001581 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001582 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301583
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001584 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301585
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001586 if (sta)
1587 an = (struct ath_node *) sta->drv_priv;
1588
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001589 memset(fi, 0, sizeof(*fi));
1590 if (hw_key)
1591 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001592 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1593 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001594 else
1595 fi->keyix = ATH9K_TXKEYIX_INVALID;
1596 fi->keytype = keytype;
1597 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301598}
1599
Felix Fietkau82b873a2010-11-11 03:18:37 +01001600static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301601{
1602 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1603 int flags = 0;
1604
Sujith528f0c62008-10-29 10:14:26 +05301605 flags |= ATH9K_TXDESC_INTREQ;
1606
1607 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1608 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301609
Felix Fietkau82b873a2010-11-11 03:18:37 +01001610 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001611 flags |= ATH9K_TXDESC_LDPC;
1612
Sujith528f0c62008-10-29 10:14:26 +05301613 return flags;
1614}
1615
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001616/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001617 * rix - rate index
1618 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1619 * width - 0 for 20 MHz, 1 for 40 MHz
1620 * half_gi - to use 4us v/s 3.6 us for symbol time
1621 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001622static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301623 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001624{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001625 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001626 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301627
1628 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001629 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001630 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001631 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001632 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1633
1634 if (!half_gi)
1635 duration = SYMBOL_TIME(nsymbols);
1636 else
1637 duration = SYMBOL_TIME_HALFGI(nsymbols);
1638
Sujithe63835b2008-11-18 09:07:53 +05301639 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001640 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301641
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001642 return duration;
1643}
1644
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301645u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1646{
1647 struct ath_hw *ah = sc->sc_ah;
1648 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301649 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1650 (curchan->channelFlags & CHANNEL_5GHZ) &&
1651 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301652 return 0x3;
1653 else
1654 return chainmask;
1655}
1656
Felix Fietkau269c44b2010-11-14 15:20:06 +01001657static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001658{
Felix Fietkau82b2d332011-09-03 01:40:23 +02001659 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001660 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301661 struct sk_buff *skb;
1662 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301663 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001664 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301665 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301666 int i, flags = 0;
1667 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301668 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301669
1670 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301671
Sujitha22be222009-03-30 15:28:36 +05301672 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301673 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301674 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301675 hdr = (struct ieee80211_hdr *)skb->data;
1676 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301677
Sujithc89424d2009-01-30 14:29:28 +05301678 /*
1679 * We check if Short Preamble is needed for the CTS rate by
1680 * checking the BSS's global flag.
1681 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1682 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001683 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1684 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301685 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001686 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001687
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001688 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001689 bool is_40, is_sgi, is_sp;
1690 int phy;
1691
Sujithe63835b2008-11-18 09:07:53 +05301692 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001693 continue;
1694
Sujitha8efee42008-11-18 09:07:30 +05301695 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301696 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001697
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301698 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301699 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001700 flags |= ATH9K_TXDESC_RTSENA;
1701 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1702 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1703 flags |= ATH9K_TXDESC_CTSENA;
1704 }
1705
Sujithc89424d2009-01-30 14:29:28 +05301706 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1707 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1708 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1709 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001710
Felix Fietkau545750d2009-11-23 22:21:01 +01001711 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1712 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1713 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1714
1715 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1716 /* MCS rates */
1717 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301718 series[i].ChSel = ath_txchainmask_reduction(sc,
Felix Fietkau82b2d332011-09-03 01:40:23 +02001719 ah->txchainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001720 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001721 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001722 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1723 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001724 continue;
1725 }
1726
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301727 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001728 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1729 !(rate->flags & IEEE80211_RATE_ERP_G))
1730 phy = WLAN_RC_PHY_CCK;
1731 else
1732 phy = WLAN_RC_PHY_OFDM;
1733
1734 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1735 series[i].Rate = rate->hw_value;
1736 if (rate->hw_value_short) {
1737 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1738 series[i].Rate |= rate->hw_value_short;
1739 } else {
1740 is_sp = false;
1741 }
1742
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301743 if (bf->bf_state.bfs_paprd)
Felix Fietkau82b2d332011-09-03 01:40:23 +02001744 series[i].ChSel = ah->txchainmask;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301745 else
1746 series[i].ChSel = ath_txchainmask_reduction(sc,
Felix Fietkau82b2d332011-09-03 01:40:23 +02001747 ah->txchainmask, series[i].Rate);
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301748
Felix Fietkau545750d2009-11-23 22:21:01 +01001749 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001750 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001751 }
1752
Felix Fietkau27032052010-01-17 21:08:50 +01001753 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001754 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001755 flags &= ~ATH9K_TXDESC_RTSENA;
1756
1757 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1758 if (flags & ATH9K_TXDESC_RTSENA)
1759 flags &= ~ATH9K_TXDESC_CTSENA;
1760
Sujithe63835b2008-11-18 09:07:53 +05301761 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301762 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1763 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301764 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301765 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301766
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001767}
1768
Felix Fietkau44f1d262011-08-28 00:32:25 +02001769/*
1770 * Assign a descriptor (and sequence number if necessary,
1771 * and map buffer for DMA. Frees skb on error
1772 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001773static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001774 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001775 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001776 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301777{
Felix Fietkau04caf862010-11-14 15:20:12 +01001778 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001779 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001780 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001781 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001782 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001783 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001784 int frm_type;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001785 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001786
1787 bf = ath_tx_get_buffer(sc);
1788 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001789 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001790 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001791 }
Sujithe8324352009-01-16 21:38:42 +05301792
Sujithe8324352009-01-16 21:38:42 +05301793 ATH_TXBUF_RESET(bf);
1794
Felix Fietkaufa05f872011-08-28 00:32:24 +02001795 if (tid) {
1796 seqno = tid->seq_next;
1797 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1798 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1799 bf->bf_state.seqno = seqno;
1800 }
1801
Felix Fietkau82b873a2010-11-11 03:18:37 +01001802 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301803 bf->bf_mpdu = skb;
1804
Ben Greearc1739eb2010-10-14 12:45:29 -07001805 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1806 skb->len, DMA_TO_DEVICE);
1807 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301808 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001809 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001810 ath_err(ath9k_hw_common(sc->sc_ah),
1811 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001812 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001813 goto error;
Sujithe8324352009-01-16 21:38:42 +05301814 }
1815
Sujithe8324352009-01-16 21:38:42 +05301816 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301817
1818 ds = bf->bf_desc;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001819 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1820 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301821
1822 ath9k_hw_filltxdesc(ah, ds,
1823 skb->len, /* segment length */
1824 true, /* first segment */
1825 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001826 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001827 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001828 txq->axq_qnum);
1829
Felix Fietkau56dc6332011-08-28 00:32:22 +02001830 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001831
1832 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001833
1834error:
1835 dev_kfree_skb_any(skb);
1836 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001837}
1838
1839/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001840static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001841 struct ath_tx_control *txctl)
1842{
Felix Fietkau04caf862010-11-14 15:20:12 +01001843 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1844 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001845 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001846 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001847 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301848
Sujithe8324352009-01-16 21:38:42 +05301849 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301850 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1851 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001852 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1853 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001854 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001855
Felix Fietkau066dae92010-11-07 14:59:39 +01001856 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001857 }
1858
1859 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001860 /*
1861 * Try aggregation if it's a unicast data frame
1862 * and the destination is HT capable.
1863 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001864 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301865 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001866 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1867 if (!bf)
1868 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001869
Felix Fietkau82b873a2010-11-11 03:18:37 +01001870 bf->bf_state.bfs_paprd = txctl->paprd;
1871
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001872 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001873 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1874 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001875
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301876 if (txctl->paprd)
1877 bf->bf_state.bfs_paprd_timestamp = jiffies;
1878
Felix Fietkau44f1d262011-08-28 00:32:25 +02001879 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301880 }
1881
Felix Fietkaufa05f872011-08-28 00:32:24 +02001882out:
Sujithe8324352009-01-16 21:38:42 +05301883 spin_unlock_bh(&txctl->txq->axq_lock);
1884}
1885
1886/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001887int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301888 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001889{
Felix Fietkau28d16702010-11-14 15:20:10 +01001890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1891 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001892 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001893 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001894 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001895 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001896 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001897 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001898 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001899
Ben Greeara9927ba2010-12-06 21:13:49 -08001900 /* NOTE: sta can be NULL according to net/mac80211.h */
1901 if (sta)
1902 txctl->an = (struct ath_node *)sta->drv_priv;
1903
Felix Fietkau04caf862010-11-14 15:20:12 +01001904 if (info->control.hw_key)
1905 frmlen += info->control.hw_key->icv_len;
1906
Felix Fietkau28d16702010-11-14 15:20:10 +01001907 /*
1908 * As a temporary workaround, assign seq# here; this will likely need
1909 * to be cleaned up to work better with Beacon transmission and virtual
1910 * BSSes.
1911 */
1912 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1913 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1914 sc->tx.seq_no += 0x10;
1915 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1916 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1917 }
1918
1919 /* Add the padding after the header if this is not already done */
1920 padpos = ath9k_cmn_padpos(hdr->frame_control);
1921 padsize = padpos & 3;
1922 if (padsize && skb->len > padpos) {
1923 if (skb_headroom(skb) < padsize)
1924 return -ENOMEM;
1925
1926 skb_push(skb, padsize);
1927 memmove(skb->data, skb->data + padsize, padpos);
1928 }
1929
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001930 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1931 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1932 !ieee80211_is_data(hdr->frame_control))
1933 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1934
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001935 setup_frame_info(hw, skb, frmlen);
1936
1937 /*
1938 * At this point, the vif, hw_key and sta pointers in the tx control
1939 * info are no longer valid (overwritten by the ath_frame_info data.
1940 */
1941
Felix Fietkau066dae92010-11-07 14:59:39 +01001942 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001943 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001944 if (txq == sc->tx.txq_map[q] &&
1945 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001946 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001947 txq->stopped = 1;
1948 }
1949 spin_unlock_bh(&txq->axq_lock);
1950
Felix Fietkau44f1d262011-08-28 00:32:25 +02001951 ath_tx_start_dma(sc, skb, txctl);
1952 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001953}
1954
Sujithe8324352009-01-16 21:38:42 +05301955/*****************/
1956/* TX Completion */
1957/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001958
Sujithe8324352009-01-16 21:38:42 +05301959static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301960 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001961{
Sujithe8324352009-01-16 21:38:42 +05301962 struct ieee80211_hw *hw = sc->hw;
1963 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001964 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001965 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001966 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301967
Joe Perches226afe62010-12-02 19:12:37 -08001968 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301969
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301970 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301971 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301972
Felix Fietkau55797b12011-09-14 21:24:16 +02001973 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301974 /* Frame was ACKed */
1975 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301976
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001977 padpos = ath9k_cmn_padpos(hdr->frame_control);
1978 padsize = padpos & 3;
1979 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301980 /*
1981 * Remove MAC header padding before giving the frame back to
1982 * mac80211.
1983 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001984 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301985 skb_pull(skb, padsize);
1986 }
1987
Sujith1b04b932010-01-08 10:36:05 +05301988 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1989 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001990 ath_dbg(common, ATH_DBG_PS,
1991 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301992 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1993 PS_WAIT_FOR_CAB |
1994 PS_WAIT_FOR_PSPOLL_DATA |
1995 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001996 }
1997
Felix Fietkau7545daf2011-01-24 19:23:16 +01001998 q = skb_get_queue_mapping(skb);
1999 if (txq == sc->tx.txq_map[q]) {
2000 spin_lock_bh(&txq->axq_lock);
2001 if (WARN_ON(--txq->pending_frames < 0))
2002 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01002003
Felix Fietkau7545daf2011-01-24 19:23:16 +01002004 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2005 ieee80211_wake_queue(sc->hw, q);
2006 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01002007 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002008 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002009 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002010
2011 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05302012}
2013
2014static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002015 struct ath_txq *txq, struct list_head *bf_q,
2016 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05302017{
2018 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05302019 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302020 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302021
Sujithe8324352009-01-16 21:38:42 +05302022 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302023 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05302024
Felix Fietkau55797b12011-09-14 21:24:16 +02002025 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302026 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302027
Ben Greearc1739eb2010-10-14 12:45:29 -07002028 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002029 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002030
2031 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302032 if (time_after(jiffies,
2033 bf->bf_state.bfs_paprd_timestamp +
2034 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002035 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002036 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002037 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002038 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002039 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302040 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002041 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002042 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2043 * accidentally reference it later.
2044 */
2045 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302046
2047 /*
2048 * Return the list of ath_buf of this mpdu to free queue
2049 */
2050 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2051 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2052 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2053}
2054
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002055static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2056 struct ath_tx_status *ts, int nframes, int nbad,
2057 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302058{
Sujitha22be222009-03-30 15:28:36 +05302059 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302060 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302061 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002062 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002063 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302064 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302065
Sujith95e4acb2009-03-13 08:56:09 +05302066 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002067 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302068
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002069 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302070 WARN_ON(tx_rateindex >= hw->max_rates);
2071
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002072 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302073 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002074 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002075 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302076
Felix Fietkaub572d032010-11-14 15:20:07 +01002077 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002078
Felix Fietkaub572d032010-11-14 15:20:07 +01002079 tx_info->status.ampdu_len = nframes;
2080 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002081 }
2082
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002083 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302084 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002085 /*
2086 * If an underrun error is seen assume it as an excessive
2087 * retry only if max frame trigger level has been reached
2088 * (2 KB for single stream, and 4 KB for dual stream).
2089 * Adjust the long retry as if the frame was tried
2090 * hw->max_rate_tries times to affect how rate control updates
2091 * PER for the failed rate.
2092 * In case of congestion on the bus penalizing this type of
2093 * underruns should help hardware actually transmit new frames
2094 * successfully by eventually preferring slower rates.
2095 * This itself should also alleviate congestion on the bus.
2096 */
2097 if (ieee80211_is_data(hdr->frame_control) &&
2098 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2099 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002100 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002101 tx_info->status.rates[tx_rateindex].count =
2102 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302103 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302104
Felix Fietkau545750d2009-11-23 22:21:01 +01002105 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302106 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002107 tx_info->status.rates[i].idx = -1;
2108 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302109
Felix Fietkau78c46532010-06-25 01:26:16 +02002110 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302111}
2112
Felix Fietkaufce041b2011-05-19 12:20:25 +02002113static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2114 struct ath_tx_status *ts, struct ath_buf *bf,
2115 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302116 __releases(txq->axq_lock)
2117 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002118{
2119 int txok;
2120
2121 txq->axq_depth--;
2122 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2123 txq->axq_tx_inprogress = false;
2124 if (bf_is_ampdu_not_probing(bf))
2125 txq->axq_ampdu_depth--;
2126
2127 spin_unlock_bh(&txq->axq_lock);
2128
2129 if (!bf_isampdu(bf)) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002130 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2131 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2132 } else
2133 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2134
2135 spin_lock_bh(&txq->axq_lock);
2136
2137 if (sc->sc_flags & SC_OP_TXAGGR)
2138 ath_txq_schedule(sc, txq);
2139}
2140
Sujithc4288392008-11-18 09:09:30 +05302141static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142{
Sujithcbe61d82009-02-09 13:27:12 +05302143 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002144 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002145 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2146 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302147 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002148 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002149 int status;
2150
Joe Perches226afe62010-12-02 19:12:37 -08002151 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2152 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2153 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002154
Felix Fietkaufce041b2011-05-19 12:20:25 +02002155 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002157 if (work_pending(&sc->hw_reset_work))
2158 break;
2159
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002160 if (list_empty(&txq->axq_q)) {
2161 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002162 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002163 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002164 break;
2165 }
2166 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2167
2168 /*
2169 * There is a race condition that a BH gets scheduled
2170 * after sw writes TxE and before hw re-load the last
2171 * descriptor to get the newly chained one.
2172 * Software must keep the last DONE descriptor as a
2173 * holding descriptor - software does so by marking
2174 * it with the STALE flag.
2175 */
2176 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302177 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002179 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002181
2182 bf = list_entry(bf_held->list.next, struct ath_buf,
2183 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002184 }
2185
2186 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302187 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002188
Felix Fietkau29bffa92010-03-29 20:14:23 -07002189 memset(&ts, 0, sizeof(ts));
2190 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002191 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002193
Ben Greear2dac4fb2011-01-09 23:11:45 -08002194 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002195
2196 /*
2197 * Remove ath_buf's of the same transmit unit from txq,
2198 * however leave the last descriptor back as the holding
2199 * descriptor for hw.
2200 */
Sujitha119cc42009-03-30 15:28:38 +05302201 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002202 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002203 if (!list_is_singular(&lastbf->list))
2204 list_cut_position(&bf_head,
2205 &txq->axq_q, lastbf->list.prev);
2206
Felix Fietkaufce041b2011-05-19 12:20:25 +02002207 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002208 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002209 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002210 }
Johannes Berge6a98542008-10-21 12:40:02 +02002211
Felix Fietkaufce041b2011-05-19 12:20:25 +02002212 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002213 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002214 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002215}
2216
Sujith305fe472009-07-23 15:32:29 +05302217static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002218{
2219 struct ath_softc *sc = container_of(work, struct ath_softc,
2220 tx_complete_work.work);
2221 struct ath_txq *txq;
2222 int i;
2223 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002224#ifdef CONFIG_ATH9K_DEBUGFS
2225 sc->tx_complete_poll_work_seen++;
2226#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002227
2228 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2229 if (ATH_TXQ_SETUP(sc, i)) {
2230 txq = &sc->tx.txq[i];
2231 spin_lock_bh(&txq->axq_lock);
2232 if (txq->axq_depth) {
2233 if (txq->axq_tx_inprogress) {
2234 needreset = true;
2235 spin_unlock_bh(&txq->axq_lock);
2236 break;
2237 } else {
2238 txq->axq_tx_inprogress = true;
2239 }
2240 }
2241 spin_unlock_bh(&txq->axq_lock);
2242 }
2243
2244 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002245 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2246 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002247 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002248 }
2249
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002250 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002251 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2252}
2253
2254
Sujithe8324352009-01-16 21:38:42 +05302255
2256void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002257{
Sujithe8324352009-01-16 21:38:42 +05302258 int i;
2259 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002260
Sujithe8324352009-01-16 21:38:42 +05302261 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002262
2263 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302264 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2265 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002266 }
2267}
2268
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002269void ath_tx_edma_tasklet(struct ath_softc *sc)
2270{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002271 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002272 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2273 struct ath_hw *ah = sc->sc_ah;
2274 struct ath_txq *txq;
2275 struct ath_buf *bf, *lastbf;
2276 struct list_head bf_head;
2277 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002278
2279 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002280 if (work_pending(&sc->hw_reset_work))
2281 break;
2282
Felix Fietkaufce041b2011-05-19 12:20:25 +02002283 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002284 if (status == -EINPROGRESS)
2285 break;
2286 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002287 ath_dbg(common, ATH_DBG_XMIT,
2288 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002289 break;
2290 }
2291
2292 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002293 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002294 continue;
2295
Felix Fietkaufce041b2011-05-19 12:20:25 +02002296 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002297
2298 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002299
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002300 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2301 spin_unlock_bh(&txq->axq_lock);
2302 return;
2303 }
2304
2305 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2306 struct ath_buf, list);
2307 lastbf = bf->bf_lastbf;
2308
2309 INIT_LIST_HEAD(&bf_head);
2310 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2311 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002312
Felix Fietkaufce041b2011-05-19 12:20:25 +02002313 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2314 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002315
Felix Fietkaufce041b2011-05-19 12:20:25 +02002316 if (!list_empty(&txq->axq_q)) {
2317 struct list_head bf_q;
2318
2319 INIT_LIST_HEAD(&bf_q);
2320 txq->axq_link = NULL;
2321 list_splice_tail_init(&txq->axq_q, &bf_q);
2322 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2323 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002324 }
2325
Felix Fietkaufce041b2011-05-19 12:20:25 +02002326 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002327 spin_unlock_bh(&txq->axq_lock);
2328 }
2329}
2330
Sujithe8324352009-01-16 21:38:42 +05302331/*****************/
2332/* Init, Cleanup */
2333/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002334
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002335static int ath_txstatus_setup(struct ath_softc *sc, int size)
2336{
2337 struct ath_descdma *dd = &sc->txsdma;
2338 u8 txs_len = sc->sc_ah->caps.txs_len;
2339
2340 dd->dd_desc_len = size * txs_len;
2341 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2342 &dd->dd_desc_paddr, GFP_KERNEL);
2343 if (!dd->dd_desc)
2344 return -ENOMEM;
2345
2346 return 0;
2347}
2348
2349static int ath_tx_edma_init(struct ath_softc *sc)
2350{
2351 int err;
2352
2353 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2354 if (!err)
2355 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2356 sc->txsdma.dd_desc_paddr,
2357 ATH_TXSTATUS_RING_SIZE);
2358
2359 return err;
2360}
2361
2362static void ath_tx_edma_cleanup(struct ath_softc *sc)
2363{
2364 struct ath_descdma *dd = &sc->txsdma;
2365
2366 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2367 dd->dd_desc_paddr);
2368}
2369
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002370int ath_tx_init(struct ath_softc *sc, int nbufs)
2371{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002372 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373 int error = 0;
2374
Sujith797fe5c2009-03-30 15:28:45 +05302375 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376
Sujith797fe5c2009-03-30 15:28:45 +05302377 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002378 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302379 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002380 ath_err(common,
2381 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302382 goto err;
2383 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384
Sujith797fe5c2009-03-30 15:28:45 +05302385 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002386 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302387 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002388 ath_err(common,
2389 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302390 goto err;
2391 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002393 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2394
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002395 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2396 error = ath_tx_edma_init(sc);
2397 if (error)
2398 goto err;
2399 }
2400
Sujith797fe5c2009-03-30 15:28:45 +05302401err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002402 if (error != 0)
2403 ath_tx_cleanup(sc);
2404
2405 return error;
2406}
2407
Sujith797fe5c2009-03-30 15:28:45 +05302408void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409{
Sujithb77f4832008-12-07 21:44:03 +05302410 if (sc->beacon.bdma.dd_desc_len != 0)
2411 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002412
Sujithb77f4832008-12-07 21:44:03 +05302413 if (sc->tx.txdma.dd_desc_len != 0)
2414 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002415
2416 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2417 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418}
2419
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2421{
Sujithc5170162008-10-29 10:13:59 +05302422 struct ath_atx_tid *tid;
2423 struct ath_atx_ac *ac;
2424 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425
Sujith8ee5afb2008-12-07 21:43:36 +05302426 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302427 tidno < WME_NUM_TID;
2428 tidno++, tid++) {
2429 tid->an = an;
2430 tid->tidno = tidno;
2431 tid->seq_start = tid->seq_next = 0;
2432 tid->baw_size = WME_MAX_BA;
2433 tid->baw_head = tid->baw_tail = 0;
2434 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302435 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302436 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002437 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302438 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302439 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302440 tid->state &= ~AGGR_ADDBA_COMPLETE;
2441 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302442 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443
Sujith8ee5afb2008-12-07 21:43:36 +05302444 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302445 acno < WME_NUM_AC; acno++, ac++) {
2446 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002447 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302448 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002449 }
2450}
2451
Sujithb5aa9bf2008-10-29 10:13:31 +05302452void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002453{
Felix Fietkau2b409942010-07-07 19:42:08 +02002454 struct ath_atx_ac *ac;
2455 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002456 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002457 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302458
Felix Fietkau2b409942010-07-07 19:42:08 +02002459 for (tidno = 0, tid = &an->tid[tidno];
2460 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002461
Felix Fietkau2b409942010-07-07 19:42:08 +02002462 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002463 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002464
Felix Fietkau2b409942010-07-07 19:42:08 +02002465 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002466
Felix Fietkau2b409942010-07-07 19:42:08 +02002467 if (tid->sched) {
2468 list_del(&tid->list);
2469 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002470 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002471
2472 if (ac->sched) {
2473 list_del(&ac->list);
2474 tid->ac->sched = false;
2475 }
2476
2477 ath_tid_drain(sc, txq, tid);
2478 tid->state &= ~AGGR_ADDBA_COMPLETE;
2479 tid->state &= ~AGGR_CLEANUP;
2480
2481 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002482 }
2483}