blob: 54049824bf6953324b3b5c5f1fc3d4beb0063d2e [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010060static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int nframes, int nbad,
62 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020063static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
64 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020065static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
66 struct ath_txq *txq,
67 struct ath_atx_tid *tid,
68 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053069
Felix Fietkau545750d2009-11-23 22:21:01 +010070enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071 MCS_HT20,
72 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010073 MCS_HT40,
74 MCS_HT40_SGI,
75};
76
Felix Fietkau0e668cd2010-04-19 19:57:32 +020077static int ath_max_4ms_framelen[4][32] = {
78 [MCS_HT20] = {
79 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
80 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
81 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
82 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
83 },
84 [MCS_HT20_SGI] = {
85 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
86 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
87 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
88 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
92 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
93 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
94 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 },
96 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020097 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
98 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
99 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
100 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100101 }
102};
103
Sujithe8324352009-01-16 21:38:42 +0530104/*********************/
105/* Aggregation logic */
106/*********************/
107
Sujithe8324352009-01-16 21:38:42 +0530108static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
109{
110 struct ath_atx_ac *ac = tid->ac;
111
112 if (tid->paused)
113 return;
114
115 if (tid->sched)
116 return;
117
118 tid->sched = true;
119 list_add_tail(&tid->list, &ac->tid_q);
120
121 if (ac->sched)
122 return;
123
124 ac->sched = true;
125 list_add_tail(&ac->list, &txq->axq_acq);
126}
127
Sujithe8324352009-01-16 21:38:42 +0530128static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
129{
Felix Fietkau066dae92010-11-07 14:59:39 +0100130 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530131
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 WARN_ON(!tid->paused);
133
Sujithe8324352009-01-16 21:38:42 +0530134 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200135 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530136
Felix Fietkau56dc6332011-08-28 00:32:22 +0200137 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530138 goto unlock;
139
140 ath_tx_queue_tid(txq, tid);
141 ath_txq_schedule(sc, txq);
142unlock:
143 spin_unlock_bh(&txq->axq_lock);
144}
145
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100146static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100147{
148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100149 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
150 sizeof(tx_info->rate_driver_data));
151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100152}
153
Sujithe8324352009-01-16 21:38:42 +0530154static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
155{
Felix Fietkau066dae92010-11-07 14:59:39 +0100156 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200157 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530158 struct ath_buf *bf;
159 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200160 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100161 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200162
Sujithe8324352009-01-16 21:38:42 +0530163 INIT_LIST_HEAD(&bf_head);
164
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530166 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530167
Felix Fietkau56dc6332011-08-28 00:32:22 +0200168 while ((skb = __skb_dequeue(&tid->buf_q))) {
169 fi = get_frame_info(skb);
170 bf = fi->bf;
171
Felix Fietkaue1566d12010-11-20 03:08:46 +0100172 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200173 if (bf && fi->retries) {
174 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200175 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200177 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200178 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200179 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100180 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530181 }
182
183 spin_unlock_bh(&txq->axq_lock);
184}
185
186static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 int seqno)
188{
189 int index, cindex;
190
191 index = ATH_BA_INDEX(tid->seq_start, seqno);
192 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
193
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200194 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530195
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530197 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
198 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
199 }
200}
201
202static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100203 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530204{
205 int index, cindex;
206
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100207 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530208 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200209 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530210
211 if (index >= ((tid->baw_tail - tid->baw_head) &
212 (ATH_TID_MAX_BUFS - 1))) {
213 tid->baw_tail = cindex;
214 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
215 }
216}
217
218/*
219 * TODO: For frame(s) that are in the retry state, we will reuse the
220 * sequence number(s) without setting the retry bit. The
221 * alternative is to give up on these and BAR the receiver's window
222 * forward.
223 */
224static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
225 struct ath_atx_tid *tid)
226
227{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200228 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530229 struct ath_buf *bf;
230 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700231 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100232 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233
234 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530235 INIT_LIST_HEAD(&bf_head);
236
Felix Fietkau56dc6332011-08-28 00:32:22 +0200237 while ((skb = __skb_dequeue(&tid->buf_q))) {
238 fi = get_frame_info(skb);
239 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530240
Felix Fietkau44f1d262011-08-28 00:32:25 +0200241 if (!bf) {
242 spin_unlock(&txq->axq_lock);
243 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
244 spin_lock(&txq->axq_lock);
245 continue;
246 }
247
Felix Fietkau56dc6332011-08-28 00:32:22 +0200248 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530249
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200251 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530252
253 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700254 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530255 spin_lock(&txq->axq_lock);
256 }
257
258 tid->seq_next = tid->seq_start;
259 tid->baw_tail = tid->baw_head;
260}
261
Sujithfec247c2009-07-27 12:08:16 +0530262static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100263 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530264{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100265 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
274}
275
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200276static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
277{
278 struct ath_buf *bf = NULL;
279
280 spin_lock_bh(&sc->tx.txbuflock);
281
282 if (unlikely(list_empty(&sc->tx.txbuf))) {
283 spin_unlock_bh(&sc->tx.txbuflock);
284 return NULL;
285 }
286
287 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
288 list_del(&bf->list);
289
290 spin_unlock_bh(&sc->tx.txbuflock);
291
292 return bf;
293}
294
295static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
296{
297 spin_lock_bh(&sc->tx.txbuflock);
298 list_add_tail(&bf->list, &sc->tx.txbuf);
299 spin_unlock_bh(&sc->tx.txbuflock);
300}
301
Sujithd43f30152009-01-16 21:38:53 +0530302static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
303{
304 struct ath_buf *tbf;
305
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200306 tbf = ath_tx_get_buffer(sc);
307 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530308 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530309
310 ATH_TXBUF_RESET(tbf);
311
312 tbf->bf_mpdu = bf->bf_mpdu;
313 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400314 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530315 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530316
317 return tbf;
318}
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
321 struct ath_tx_status *ts, int txok,
322 int *nframes, int *nbad)
323{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100324 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100325 u16 seq_st = 0;
326 u32 ba[WME_BA_BMP_SIZE >> 5];
327 int ba_index;
328 int isaggr = 0;
329
330 *nbad = 0;
331 *nframes = 0;
332
Felix Fietkaub572d032010-11-14 15:20:07 +0100333 isaggr = bf_isaggr(bf);
334 if (isaggr) {
335 seq_st = ts->ts_seqnum;
336 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
337 }
338
339 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100340 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200341 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100342
343 (*nframes)++;
344 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
345 (*nbad)++;
346
347 bf = bf->bf_next;
348 }
349}
350
351
Sujithd43f30152009-01-16 21:38:53 +0530352static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
353 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100354 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530355{
356 struct ath_node *an = NULL;
357 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530358 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100359 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800361 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530362 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530363 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200364 struct list_head bf_head;
365 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530366 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530367 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
369 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200370 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100371 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200372 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100373 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200374 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530375
Sujitha22be222009-03-30 15:28:36 +0530376 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530377 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530378
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800380
Felix Fietkau78c46532010-06-25 01:26:16 +0200381 memcpy(rates, tx_info->control.rates, sizeof(rates));
382
Sujith1286ec62009-01-27 13:30:37 +0530383 rcu_read_lock();
384
Ben Greear686b9cb2010-09-23 09:44:36 -0700385 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530386 if (!sta) {
387 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200388
Felix Fietkau31e79a52010-07-12 23:16:34 +0200389 INIT_LIST_HEAD(&bf_head);
390 while (bf) {
391 bf_next = bf->bf_next;
392
Felix Fietkaufce041b2011-05-19 12:20:25 +0200393 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200394 list_move_tail(&bf->list, &bf_head);
395
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100396 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200397 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
398 0, 0);
399
400 bf = bf_next;
401 }
Sujith1286ec62009-01-27 13:30:37 +0530402 return;
Sujithe8324352009-01-16 21:38:42 +0530403 }
404
Sujith1286ec62009-01-27 13:30:37 +0530405 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100406 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
407 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530408
Felix Fietkaub11b1602010-07-11 12:48:44 +0200409 /*
410 * The hardware occasionally sends a tx status for the wrong TID.
411 * In this case, the BA status cannot be considered valid and all
412 * subframes need to be retransmitted
413 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100414 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200415 txok = false;
416
Sujithe8324352009-01-16 21:38:42 +0530417 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530418 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530419
Sujithd43f30152009-01-16 21:38:53 +0530420 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700421 if (ts->ts_flags & ATH9K_TX_BA) {
422 seq_st = ts->ts_seqnum;
423 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530424 } else {
Sujithd43f30152009-01-16 21:38:53 +0530425 /*
426 * AR5416 can become deaf/mute when BA
427 * issue happens. Chip needs to be reset.
428 * But AP code may have sychronization issues
429 * when perform internal reset in this routine.
430 * Only enable reset in STA mode for now.
431 */
Sujith2660b812009-02-09 13:27:26 +0530432 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530433 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530434 }
435 }
436
Felix Fietkau56dc6332011-08-28 00:32:22 +0200437 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530438
Felix Fietkaub572d032010-11-14 15:20:07 +0100439 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530440 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200441 u16 seqno = bf->bf_state.seqno;
442
Felix Fietkauf0b82202011-01-15 14:30:15 +0100443 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530444 bf_next = bf->bf_next;
445
Felix Fietkau78c46532010-06-25 01:26:16 +0200446 skb = bf->bf_mpdu;
447 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200449
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200450 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530451 /* transmit completion, subframe is
452 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530453 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530454 } else if (!isaggr && txok) {
455 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530456 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530457 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200458 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530459 /*
460 * cleanup in progress, just fail
461 * the un-acked sub-frames
462 */
463 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200464 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
465 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
466 !an->sleeping)
467 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
468
469 clear_filter = true;
470 txpending = 1;
471 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200472 txfail = 1;
473 sendbar = 1;
474 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530475 }
476 }
477
Felix Fietkaufce041b2011-05-19 12:20:25 +0200478 /*
479 * Make sure the last desc is reclaimed if it
480 * not a holding desc.
481 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200482 INIT_LIST_HEAD(&bf_head);
483 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
484 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530485 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530486
Felix Fietkau90fa5392010-09-20 13:45:38 +0200487 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530488 /*
489 * complete the acked-ones/xretried ones; update
490 * block-ack window
491 */
492 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200493 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530494 spin_unlock_bh(&txq->axq_lock);
495
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200497 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100498 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530499 rc_update = false;
500 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100501 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530502 }
503
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700504 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
505 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530506 } else {
Sujithd43f30152009-01-16 21:38:53 +0530507 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200508 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400509 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
510 if (bf->bf_next == NULL && bf_last->bf_stale) {
511 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530512
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400513 tbf = ath_clone_txbuf(sc, bf_last);
514 /*
515 * Update tx baw and complete the
516 * frame with failed status if we
517 * run out of tx buf.
518 */
519 if (!tbf) {
520 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200521 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400523
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100524 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100525 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400526 ath_tx_complete_buf(sc, bf, txq,
527 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200528 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400529 break;
530 }
531
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 tbf->bf_desc);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200534 fi->bf = tbf;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400535 } else {
536 /*
537 * Clear descriptor status words for
538 * software retry
539 */
540 ath9k_hw_cleartxdesc(sc->sc_ah,
541 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400542 }
Sujithe8324352009-01-16 21:38:42 +0530543 }
544
545 /*
546 * Put this buffer to the temporary pending
547 * queue to retain ordering
548 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200549 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530550 }
551
552 bf = bf_next;
553 }
554
Felix Fietkau4cee7862010-07-23 03:53:16 +0200555 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200556 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200557 if (an->sleeping)
558 ieee80211_sta_set_tim(sta);
559
Felix Fietkau4cee7862010-07-23 03:53:16 +0200560 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200561 if (clear_filter)
562 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200563 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600564 if (!an->sleeping)
565 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200566 spin_unlock_bh(&txq->axq_lock);
567 }
568
Sujithe8324352009-01-16 21:38:42 +0530569 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200570 ath_tx_flush_tid(sc, tid);
571
Sujithe8324352009-01-16 21:38:42 +0530572 if (tid->baw_head == tid->baw_tail) {
573 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530574 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530575 }
Sujithe8324352009-01-16 21:38:42 +0530576 }
577
Sujith1286ec62009-01-27 13:30:37 +0530578 rcu_read_unlock();
579
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530580 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200581 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530582}
583
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530584static bool ath_lookup_legacy(struct ath_buf *bf)
585{
586 struct sk_buff *skb;
587 struct ieee80211_tx_info *tx_info;
588 struct ieee80211_tx_rate *rates;
589 int i;
590
591 skb = bf->bf_mpdu;
592 tx_info = IEEE80211_SKB_CB(skb);
593 rates = tx_info->control.rates;
594
Felix Fietkau059ee092011-08-27 10:25:27 +0200595 for (i = 0; i < 4; i++) {
596 if (!rates[i].count || rates[i].idx < 0)
597 break;
598
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530599 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
600 return true;
601 }
602
603 return false;
604}
605
Sujithe8324352009-01-16 21:38:42 +0530606static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
607 struct ath_atx_tid *tid)
608{
Sujithe8324352009-01-16 21:38:42 +0530609 struct sk_buff *skb;
610 struct ieee80211_tx_info *tx_info;
611 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530612 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530613 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530614 int i;
615
Sujitha22be222009-03-30 15:28:36 +0530616 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530617 tx_info = IEEE80211_SKB_CB(skb);
618 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530619
620 /*
621 * Find the lowest frame length among the rate series that will have a
622 * 4ms transmit duration.
623 * TODO - TXOP limit needs to be considered.
624 */
625 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
626
627 for (i = 0; i < 4; i++) {
628 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100629 int modeidx;
630 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530631 legacy = 1;
632 break;
633 }
634
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200635 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100636 modeidx = MCS_HT40;
637 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200638 modeidx = MCS_HT20;
639
640 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
641 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100642
643 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530644 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530645 }
646 }
647
648 /*
649 * limit aggregate size by the minimum rate if rate selected is
650 * not a probe rate, if rate selected is a probe rate then
651 * avoid aggregation of this packet.
652 */
653 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
654 return 0;
655
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530656 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
657 aggr_limit = min((max_4ms_framelen * 3) / 8,
658 (u32)ATH_AMPDU_LIMIT_MAX);
659 else
660 aggr_limit = min(max_4ms_framelen,
661 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530662
663 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300664 * h/w can accept aggregates up to 16 bit lengths (65535).
665 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530666 * as zero. Ignore 65536 since we are constrained by hw.
667 */
Sujith4ef70842009-07-23 15:32:41 +0530668 if (tid->an->maxampdu)
669 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530670
671 return aggr_limit;
672}
673
674/*
Sujithd43f30152009-01-16 21:38:53 +0530675 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530676 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530677 */
678static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530679 struct ath_buf *bf, u16 frmlen,
680 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530681{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530682#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530683 struct sk_buff *skb = bf->bf_mpdu;
684 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530685 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530686 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100687 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200688 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100689 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530690
691 /* Select standard number of delimiters based on frame length alone */
692 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
693
694 /*
695 * If encryption enabled, hardware requires some more padding between
696 * subframes.
697 * TODO - this could be improved to be dependent on the rate.
698 * The hardware can keep up at lower rates, but not higher rates
699 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530700 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
701 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530702 ndelim += ATH_AGGR_ENCRYPTDELIM;
703
704 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530705 * Add delimiter when using RTS/CTS with aggregation
706 * and non enterprise AR9003 card
707 */
Felix Fietkau34597312011-08-29 18:57:54 +0200708 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
709 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530710 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
711
712 /*
Sujithe8324352009-01-16 21:38:42 +0530713 * Convert desired mpdu density from microeconds to bytes based
714 * on highest rate in rate series (i.e. first rate) to determine
715 * required minimum length for subframe. Take into account
716 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530717 *
Sujithe8324352009-01-16 21:38:42 +0530718 * If there is no mpdu density restriction, no further calculation
719 * is needed.
720 */
Sujith4ef70842009-07-23 15:32:41 +0530721
722 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530723 return ndelim;
724
725 rix = tx_info->control.rates[0].idx;
726 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530727 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
728 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
729
730 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530731 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530732 else
Sujith4ef70842009-07-23 15:32:41 +0530733 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530734
735 if (nsymbols == 0)
736 nsymbols = 1;
737
Felix Fietkauc6663872010-04-19 19:57:33 +0200738 streams = HT_RC_2_STREAMS(rix);
739 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530740 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
741
Sujithe8324352009-01-16 21:38:42 +0530742 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530743 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
744 ndelim = max(mindelim, ndelim);
745 }
746
747 return ndelim;
748}
749
750static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530751 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530752 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100753 struct list_head *bf_q,
754 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530755{
756#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530758 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530759 u16 aggr_limit = 0, al = 0, bpad = 0,
760 al_delta, h_baw = tid->baw_size / 2;
761 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200762 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100763 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200764 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200765 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530766
767 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200768 skb = skb_peek(&tid->buf_q);
769 fi = get_frame_info(skb);
770 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200771 if (!fi->bf)
772 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200773
Felix Fietkau44f1d262011-08-28 00:32:25 +0200774 if (!bf)
775 continue;
776
777 bf->bf_state.bf_type |= BUF_AMPDU;
778 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200779 if (!bf_first)
780 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200783 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530784 status = ATH_AGGR_BAW_CLOSED;
785 break;
786 }
787
788 if (!rl) {
789 aggr_limit = ath_lookup_rate(sc, bf, tid);
790 rl = 1;
791 }
792
Sujithd43f30152009-01-16 21:38:53 +0530793 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100794 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530795
Sujithd43f30152009-01-16 21:38:53 +0530796 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530797 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
798 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530799 status = ATH_AGGR_LIMITED;
800 break;
801 }
802
Felix Fietkau0299a502010-10-21 02:47:24 +0200803 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
804 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
805 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
806 break;
807
Sujithd43f30152009-01-16 21:38:53 +0530808 /* do not exceed subframe limit */
809 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530810 status = ATH_AGGR_LIMITED;
811 break;
812 }
813
Sujithd43f30152009-01-16 21:38:53 +0530814 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530815 al += bpad + al_delta;
816
817 /*
818 * Get the delimiters needed to meet the MPDU
819 * density for this node.
820 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530821 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
822 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530823 bpad = PADBYTES(al_delta) + (ndelim << 2);
824
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530825 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530826 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400827 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530828
Sujithd43f30152009-01-16 21:38:53 +0530829 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100830 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200831 ath_tx_addto_baw(sc, tid, seqno);
Sujithd43f30152009-01-16 21:38:53 +0530832 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200833
834 __skb_unlink(skb, &tid->buf_q);
835 list_add_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530836 if (bf_prev) {
837 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400838 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
839 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530840 }
841 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530842
Felix Fietkau56dc6332011-08-28 00:32:22 +0200843 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530844
Felix Fietkau269c44b2010-11-14 15:20:06 +0100845 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530846
Sujithe8324352009-01-16 21:38:42 +0530847 return status;
848#undef PADBYTES
849}
850
851static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
852 struct ath_atx_tid *tid)
853{
Sujithd43f30152009-01-16 21:38:53 +0530854 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530855 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100856 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530857 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100858 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530859
860 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200861 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530862 return;
863
864 INIT_LIST_HEAD(&bf_q);
865
Felix Fietkau269c44b2010-11-14 15:20:06 +0100866 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530867
868 /*
Sujithd43f30152009-01-16 21:38:53 +0530869 * no frames picked up to be aggregated;
870 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530871 */
872 if (list_empty(&bf_q))
873 break;
874
875 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530876 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530877
Felix Fietkau55195412011-04-17 23:28:09 +0200878 if (tid->ac->clear_ps_filter) {
879 tid->ac->clear_ps_filter = false;
880 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
881 }
882
Sujithd43f30152009-01-16 21:38:53 +0530883 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100884 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100885 fi = get_frame_info(bf->bf_mpdu);
886
Sujithe8324352009-01-16 21:38:42 +0530887 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530888 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100889 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200890 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530891 continue;
892 }
893
Sujithd43f30152009-01-16 21:38:53 +0530894 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530895 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100896 ath_buf_set_rate(sc, bf, aggr_len);
897 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530898
Sujithd43f30152009-01-16 21:38:53 +0530899 /* anchor last desc of aggregate */
900 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530901
Felix Fietkaufce041b2011-05-19 12:20:25 +0200902 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530903 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530904
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100905 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530906 status != ATH_AGGR_BAW_CLOSED);
907}
908
Felix Fietkau231c3a12010-09-20 19:35:28 +0200909int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
910 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530911{
912 struct ath_atx_tid *txtid;
913 struct ath_node *an;
914
915 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530916 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200917
918 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
919 return -EAGAIN;
920
Sujithf83da962009-07-23 15:32:37 +0530921 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200922 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700923 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200924
Felix Fietkau2ed72222011-01-10 17:05:49 -0700925 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
926 txtid->baw_head = txtid->baw_tail = 0;
927
Felix Fietkau231c3a12010-09-20 19:35:28 +0200928 return 0;
Sujithe8324352009-01-16 21:38:42 +0530929}
930
Sujithf83da962009-07-23 15:32:37 +0530931void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530932{
933 struct ath_node *an = (struct ath_node *)sta->drv_priv;
934 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100935 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530936
937 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530938 return;
Sujithe8324352009-01-16 21:38:42 +0530939
940 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530941 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530942 return;
Sujithe8324352009-01-16 21:38:42 +0530943 }
944
Sujithe8324352009-01-16 21:38:42 +0530945 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200946 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200947
948 /*
949 * If frames are still being transmitted for this TID, they will be
950 * cleaned up during tx completion. To prevent race conditions, this
951 * TID can only be reused after all in-progress subframes have been
952 * completed.
953 */
954 if (txtid->baw_head != txtid->baw_tail)
955 txtid->state |= AGGR_CLEANUP;
956 else
957 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530958 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530959
Felix Fietkau90fa5392010-09-20 13:45:38 +0200960 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530961}
962
Felix Fietkau55195412011-04-17 23:28:09 +0200963bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
964{
965 struct ath_atx_tid *tid;
966 struct ath_atx_ac *ac;
967 struct ath_txq *txq;
968 bool buffered = false;
969 int tidno;
970
971 for (tidno = 0, tid = &an->tid[tidno];
972 tidno < WME_NUM_TID; tidno++, tid++) {
973
974 if (!tid->sched)
975 continue;
976
977 ac = tid->ac;
978 txq = ac->txq;
979
980 spin_lock_bh(&txq->axq_lock);
981
Felix Fietkau56dc6332011-08-28 00:32:22 +0200982 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +0200983 buffered = true;
984
985 tid->sched = false;
986 list_del(&tid->list);
987
988 if (ac->sched) {
989 ac->sched = false;
990 list_del(&ac->list);
991 }
992
993 spin_unlock_bh(&txq->axq_lock);
994 }
995
996 return buffered;
997}
998
999void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1000{
1001 struct ath_atx_tid *tid;
1002 struct ath_atx_ac *ac;
1003 struct ath_txq *txq;
1004 int tidno;
1005
1006 for (tidno = 0, tid = &an->tid[tidno];
1007 tidno < WME_NUM_TID; tidno++, tid++) {
1008
1009 ac = tid->ac;
1010 txq = ac->txq;
1011
1012 spin_lock_bh(&txq->axq_lock);
1013 ac->clear_ps_filter = true;
1014
Felix Fietkau56dc6332011-08-28 00:32:22 +02001015 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001016 ath_tx_queue_tid(txq, tid);
1017 ath_txq_schedule(sc, txq);
1018 }
1019
1020 spin_unlock_bh(&txq->axq_lock);
1021 }
1022}
1023
Sujithe8324352009-01-16 21:38:42 +05301024void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1025{
1026 struct ath_atx_tid *txtid;
1027 struct ath_node *an;
1028
1029 an = (struct ath_node *)sta->drv_priv;
1030
1031 if (sc->sc_flags & SC_OP_TXAGGR) {
1032 txtid = ATH_AN_2_TID(an, tid);
1033 txtid->baw_size =
1034 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1035 txtid->state |= AGGR_ADDBA_COMPLETE;
1036 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1037 ath_tx_resume_tid(sc, txtid);
1038 }
1039}
1040
Sujithe8324352009-01-16 21:38:42 +05301041/********************/
1042/* Queue Management */
1043/********************/
1044
Sujithe8324352009-01-16 21:38:42 +05301045static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1046 struct ath_txq *txq)
1047{
1048 struct ath_atx_ac *ac, *ac_tmp;
1049 struct ath_atx_tid *tid, *tid_tmp;
1050
1051 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1052 list_del(&ac->list);
1053 ac->sched = false;
1054 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1055 list_del(&tid->list);
1056 tid->sched = false;
1057 ath_tid_drain(sc, txq, tid);
1058 }
1059 }
1060}
1061
1062struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1063{
Sujithcbe61d82009-02-09 13:27:12 +05301064 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001065 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301066 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001067 static const int subtype_txq_to_hwq[] = {
1068 [WME_AC_BE] = ATH_TXQ_AC_BE,
1069 [WME_AC_BK] = ATH_TXQ_AC_BK,
1070 [WME_AC_VI] = ATH_TXQ_AC_VI,
1071 [WME_AC_VO] = ATH_TXQ_AC_VO,
1072 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001073 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301074
1075 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001076 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301077 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1078 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1079 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1080 qi.tqi_physCompBuf = 0;
1081
1082 /*
1083 * Enable interrupts only for EOL and DESC conditions.
1084 * We mark tx descriptors to receive a DESC interrupt
1085 * when a tx queue gets deep; otherwise waiting for the
1086 * EOL to reap descriptors. Note that this is done to
1087 * reduce interrupt load and this only defers reaping
1088 * descriptors, never transmitting frames. Aside from
1089 * reducing interrupts this also permits more concurrency.
1090 * The only potential downside is if the tx queue backs
1091 * up in which case the top half of the kernel may backup
1092 * due to a lack of tx descriptors.
1093 *
1094 * The UAPSD queue is an exception, since we take a desc-
1095 * based intr on the EOSP frames.
1096 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001097 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1098 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1099 TXQ_FLAG_TXERRINT_ENABLE;
1100 } else {
1101 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1102 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1103 else
1104 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1105 TXQ_FLAG_TXDESCINT_ENABLE;
1106 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001107 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1108 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301109 /*
1110 * NB: don't print a message, this happens
1111 * normally on parts with too few tx queues
1112 */
1113 return NULL;
1114 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001115 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001116 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001117 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1118 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301119 return NULL;
1120 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001121 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1122 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301123
Ben Greear60f2d1d2011-01-09 23:11:52 -08001124 txq->axq_qnum = axq_qnum;
1125 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301126 txq->axq_link = NULL;
1127 INIT_LIST_HEAD(&txq->axq_q);
1128 INIT_LIST_HEAD(&txq->axq_acq);
1129 spin_lock_init(&txq->axq_lock);
1130 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001131 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001132 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001133 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001134
1135 txq->txq_headidx = txq->txq_tailidx = 0;
1136 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1137 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301138 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001139 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301140}
1141
Sujithe8324352009-01-16 21:38:42 +05301142int ath_txq_update(struct ath_softc *sc, int qnum,
1143 struct ath9k_tx_queue_info *qinfo)
1144{
Sujithcbe61d82009-02-09 13:27:12 +05301145 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301146 int error = 0;
1147 struct ath9k_tx_queue_info qi;
1148
1149 if (qnum == sc->beacon.beaconq) {
1150 /*
1151 * XXX: for beacon queue, we just save the parameter.
1152 * It will be picked up by ath_beaconq_config when
1153 * it's necessary.
1154 */
1155 sc->beacon.beacon_qi = *qinfo;
1156 return 0;
1157 }
1158
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001159 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301160
1161 ath9k_hw_get_txq_props(ah, qnum, &qi);
1162 qi.tqi_aifs = qinfo->tqi_aifs;
1163 qi.tqi_cwmin = qinfo->tqi_cwmin;
1164 qi.tqi_cwmax = qinfo->tqi_cwmax;
1165 qi.tqi_burstTime = qinfo->tqi_burstTime;
1166 qi.tqi_readyTime = qinfo->tqi_readyTime;
1167
1168 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001169 ath_err(ath9k_hw_common(sc->sc_ah),
1170 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301171 error = -EIO;
1172 } else {
1173 ath9k_hw_resettxqueue(ah, qnum);
1174 }
1175
1176 return error;
1177}
1178
1179int ath_cabq_update(struct ath_softc *sc)
1180{
1181 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001182 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301183 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301184
1185 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1186 /*
1187 * Ensure the readytime % is within the bounds.
1188 */
Sujith17d79042009-02-09 13:27:03 +05301189 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1190 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1191 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1192 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301193
Steve Brown9814f6b2011-02-07 17:10:39 -07001194 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301195 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301196 ath_txq_update(sc, qnum, &qi);
1197
1198 return 0;
1199}
1200
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001201static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1202{
1203 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1204 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1205}
1206
Felix Fietkaufce041b2011-05-19 12:20:25 +02001207static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1208 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301209 __releases(txq->axq_lock)
1210 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301211{
1212 struct ath_buf *bf, *lastbf;
1213 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001214 struct ath_tx_status ts;
1215
1216 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301217 INIT_LIST_HEAD(&bf_head);
1218
Felix Fietkaufce041b2011-05-19 12:20:25 +02001219 while (!list_empty(list)) {
1220 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301221
Felix Fietkaufce041b2011-05-19 12:20:25 +02001222 if (bf->bf_stale) {
1223 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301224
Felix Fietkaufce041b2011-05-19 12:20:25 +02001225 ath_tx_return_buffer(sc, bf);
1226 continue;
Sujithe8324352009-01-16 21:38:42 +05301227 }
1228
1229 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001230 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001231
Sujithe8324352009-01-16 21:38:42 +05301232 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001233 if (bf_is_ampdu_not_probing(bf))
1234 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301235
Felix Fietkaufce041b2011-05-19 12:20:25 +02001236 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301237 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001238 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1239 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301240 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001242 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001243 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001244}
1245
1246/*
1247 * Drain a given TX queue (could be Beacon or Data)
1248 *
1249 * This assumes output has been stopped and
1250 * we do not need to block ath_tx_tasklet.
1251 */
1252void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1253{
1254 spin_lock_bh(&txq->axq_lock);
1255 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1256 int idx = txq->txq_tailidx;
1257
1258 while (!list_empty(&txq->txq_fifo[idx])) {
1259 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1260 retry_tx);
1261
1262 INCR(idx, ATH_TXFIFO_DEPTH);
1263 }
1264 txq->txq_tailidx = idx;
1265 }
1266
1267 txq->axq_link = NULL;
1268 txq->axq_tx_inprogress = false;
1269 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001270
1271 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001272 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1273 ath_txq_drain_pending_buffers(sc, txq);
1274
1275 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301276}
1277
Felix Fietkau080e1a22010-12-05 20:17:53 +01001278bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301279{
Sujithcbe61d82009-02-09 13:27:12 +05301280 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001281 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301282 struct ath_txq *txq;
1283 int i, npend = 0;
1284
1285 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001286 return true;
Sujith043a0402009-01-16 21:38:47 +05301287
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001288 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301289
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001290 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301291 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001292 if (!ATH_TXQ_SETUP(sc, i))
1293 continue;
1294
1295 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301296 }
1297
Felix Fietkau080e1a22010-12-05 20:17:53 +01001298 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001299 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301300
1301 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001302 if (!ATH_TXQ_SETUP(sc, i))
1303 continue;
1304
1305 /*
1306 * The caller will resume queues with ieee80211_wake_queues.
1307 * Mark the queue as not stopped to prevent ath_tx_complete
1308 * from waking the queue too early.
1309 */
1310 txq = &sc->tx.txq[i];
1311 txq->stopped = false;
1312 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301313 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001314
1315 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301316}
1317
Sujithe8324352009-01-16 21:38:42 +05301318void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1319{
1320 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1321 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1322}
1323
Ben Greear7755bad2011-01-18 17:30:00 -08001324/* For each axq_acq entry, for each tid, try to schedule packets
1325 * for transmit until ampdu_depth has reached min Q depth.
1326 */
Sujithe8324352009-01-16 21:38:42 +05301327void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1328{
Ben Greear7755bad2011-01-18 17:30:00 -08001329 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1330 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301331
Felix Fietkau236de512011-09-03 01:40:25 +02001332 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001333 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301334 return;
1335
1336 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001337 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301338
Ben Greear7755bad2011-01-18 17:30:00 -08001339 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1340 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1341 list_del(&ac->list);
1342 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301343
Ben Greear7755bad2011-01-18 17:30:00 -08001344 while (!list_empty(&ac->tid_q)) {
1345 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1346 list);
1347 list_del(&tid->list);
1348 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301349
Ben Greear7755bad2011-01-18 17:30:00 -08001350 if (tid->paused)
1351 continue;
Sujithe8324352009-01-16 21:38:42 +05301352
Ben Greear7755bad2011-01-18 17:30:00 -08001353 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301354
Ben Greear7755bad2011-01-18 17:30:00 -08001355 /*
1356 * add tid to round-robin queue if more frames
1357 * are pending for the tid
1358 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001359 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001360 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301361
Ben Greear7755bad2011-01-18 17:30:00 -08001362 if (tid == last_tid ||
1363 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1364 break;
Sujithe8324352009-01-16 21:38:42 +05301365 }
Ben Greear7755bad2011-01-18 17:30:00 -08001366
1367 if (!list_empty(&ac->tid_q)) {
1368 if (!ac->sched) {
1369 ac->sched = true;
1370 list_add_tail(&ac->list, &txq->axq_acq);
1371 }
1372 }
1373
1374 if (ac == last_ac ||
1375 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1376 return;
Sujithe8324352009-01-16 21:38:42 +05301377 }
1378}
1379
Sujithe8324352009-01-16 21:38:42 +05301380/***********/
1381/* TX, DMA */
1382/***********/
1383
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001384/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001385 * Insert a chain of ath_buf (descriptors) on a txq and
1386 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001387 */
Sujith102e0572008-10-29 10:15:16 +05301388static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001389 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001390{
Sujithcbe61d82009-02-09 13:27:12 +05301391 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001392 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001393 struct ath_buf *bf, *bf_last;
1394 bool puttxbuf = false;
1395 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301396
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001397 /*
1398 * Insert the frame on the outbound list and
1399 * pass it on to the hardware.
1400 */
1401
1402 if (list_empty(head))
1403 return;
1404
Felix Fietkaufce041b2011-05-19 12:20:25 +02001405 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001406 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001407 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001408
Joe Perches226afe62010-12-02 19:12:37 -08001409 ath_dbg(common, ATH_DBG_QUEUE,
1410 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001411
Felix Fietkaufce041b2011-05-19 12:20:25 +02001412 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1413 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001414 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001415 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001416 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001417 list_splice_tail_init(head, &txq->axq_q);
1418
Felix Fietkaufce041b2011-05-19 12:20:25 +02001419 if (txq->axq_link) {
1420 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001421 ath_dbg(common, ATH_DBG_XMIT,
1422 "link[%u] (%p)=%llx (%p)\n",
1423 txq->axq_qnum, txq->axq_link,
1424 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001425 } else if (!edma)
1426 puttxbuf = true;
1427
1428 txq->axq_link = bf_last->bf_desc;
1429 }
1430
1431 if (puttxbuf) {
1432 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1433 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1434 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1435 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1436 }
1437
1438 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001439 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001440 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001441 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001442
1443 if (!internal) {
1444 txq->axq_depth++;
1445 if (bf_is_ampdu_not_probing(bf))
1446 txq->axq_ampdu_depth++;
1447 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001448}
1449
Sujithe8324352009-01-16 21:38:42 +05301450static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001451 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301452{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001453 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001454 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001455 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301456
1457 /*
1458 * Do not queue to h/w when any of the following conditions is true:
1459 * - there are pending frames in software queue
1460 * - the TID is currently paused for ADDBA/BAR request
1461 * - seqno is not within block-ack window
1462 * - h/w queue depth exceeds low water mark
1463 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001464 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001465 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001466 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001467 /*
Sujithe8324352009-01-16 21:38:42 +05301468 * Add this frame to software queue for scheduling later
1469 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001470 */
Ben Greearbda8add2011-01-09 23:11:48 -08001471 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001472 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001473 if (!txctl->an || !txctl->an->sleeping)
1474 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301475 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001476 }
1477
Felix Fietkau44f1d262011-08-28 00:32:25 +02001478 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1479 if (!bf)
1480 return;
1481
1482 bf->bf_state.bf_type |= BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001483 INIT_LIST_HEAD(&bf_head);
1484 list_add(&bf->list, &bf_head);
1485
Sujithe8324352009-01-16 21:38:42 +05301486 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001487 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301488
1489 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001490 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301491 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001492 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001493 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301494}
1495
Felix Fietkau82b873a2010-11-11 03:18:37 +01001496static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001497 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001499 struct ath_frame_info *fi = get_frame_info(skb);
1500 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301501 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001502
Felix Fietkau44f1d262011-08-28 00:32:25 +02001503 bf = fi->bf;
1504 if (!bf)
1505 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1506
1507 if (!bf)
1508 return;
1509
1510 INIT_LIST_HEAD(&bf_head);
1511 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301512 bf->bf_state.bf_type &= ~BUF_AMPDU;
1513
1514 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001515 if (tid)
1516 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301517
Sujithd43f30152009-01-16 21:38:53 +05301518 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001519 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001520 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301521 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522}
1523
Sujith528f0c62008-10-29 10:14:26 +05301524static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001525{
Sujith528f0c62008-10-29 10:14:26 +05301526 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001527 enum ath9k_pkt_type htype;
1528 __le16 fc;
1529
Sujith528f0c62008-10-29 10:14:26 +05301530 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001531 fc = hdr->frame_control;
1532
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001533 if (ieee80211_is_beacon(fc))
1534 htype = ATH9K_PKT_TYPE_BEACON;
1535 else if (ieee80211_is_probe_resp(fc))
1536 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1537 else if (ieee80211_is_atim(fc))
1538 htype = ATH9K_PKT_TYPE_ATIM;
1539 else if (ieee80211_is_pspoll(fc))
1540 htype = ATH9K_PKT_TYPE_PSPOLL;
1541 else
1542 htype = ATH9K_PKT_TYPE_NORMAL;
1543
1544 return htype;
1545}
1546
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001547static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1548 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301549{
1550 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001551 struct ieee80211_sta *sta = tx_info->control.sta;
1552 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001553 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001554 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001555 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001556 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301557
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001558 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301559
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001560 if (sta)
1561 an = (struct ath_node *) sta->drv_priv;
1562
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001563 memset(fi, 0, sizeof(*fi));
1564 if (hw_key)
1565 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001566 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1567 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001568 else
1569 fi->keyix = ATH9K_TXKEYIX_INVALID;
1570 fi->keytype = keytype;
1571 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301572}
1573
Felix Fietkau82b873a2010-11-11 03:18:37 +01001574static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301575{
1576 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1577 int flags = 0;
1578
Sujith528f0c62008-10-29 10:14:26 +05301579 flags |= ATH9K_TXDESC_INTREQ;
1580
1581 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1582 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301583
Felix Fietkau82b873a2010-11-11 03:18:37 +01001584 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001585 flags |= ATH9K_TXDESC_LDPC;
1586
Sujith528f0c62008-10-29 10:14:26 +05301587 return flags;
1588}
1589
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001590/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001591 * rix - rate index
1592 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1593 * width - 0 for 20 MHz, 1 for 40 MHz
1594 * half_gi - to use 4us v/s 3.6 us for symbol time
1595 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001596static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301597 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001598{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001599 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001600 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301601
1602 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001603 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001604 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001605 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001606 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1607
1608 if (!half_gi)
1609 duration = SYMBOL_TIME(nsymbols);
1610 else
1611 duration = SYMBOL_TIME_HALFGI(nsymbols);
1612
Sujithe63835b2008-11-18 09:07:53 +05301613 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001614 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301615
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001616 return duration;
1617}
1618
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301619u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1620{
1621 struct ath_hw *ah = sc->sc_ah;
1622 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301623 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1624 (curchan->channelFlags & CHANNEL_5GHZ) &&
1625 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301626 return 0x3;
1627 else
1628 return chainmask;
1629}
1630
Felix Fietkau269c44b2010-11-14 15:20:06 +01001631static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001632{
Felix Fietkau82b2d332011-09-03 01:40:23 +02001633 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001634 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301635 struct sk_buff *skb;
1636 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301637 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001638 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301639 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301640 int i, flags = 0;
1641 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301642 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301643
1644 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301645
Sujitha22be222009-03-30 15:28:36 +05301646 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301647 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301648 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301649 hdr = (struct ieee80211_hdr *)skb->data;
1650 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301651
Sujithc89424d2009-01-30 14:29:28 +05301652 /*
1653 * We check if Short Preamble is needed for the CTS rate by
1654 * checking the BSS's global flag.
1655 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1656 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001657 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1658 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301659 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001660 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001661
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001662 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001663 bool is_40, is_sgi, is_sp;
1664 int phy;
1665
Sujithe63835b2008-11-18 09:07:53 +05301666 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001667 continue;
1668
Sujitha8efee42008-11-18 09:07:30 +05301669 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301670 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001671
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301672 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301673 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001674 flags |= ATH9K_TXDESC_RTSENA;
1675 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1676 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1677 flags |= ATH9K_TXDESC_CTSENA;
1678 }
1679
Sujithc89424d2009-01-30 14:29:28 +05301680 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1681 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1682 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1683 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001684
Felix Fietkau545750d2009-11-23 22:21:01 +01001685 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1686 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1687 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1688
1689 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1690 /* MCS rates */
1691 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301692 series[i].ChSel = ath_txchainmask_reduction(sc,
Felix Fietkau82b2d332011-09-03 01:40:23 +02001693 ah->txchainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001694 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001695 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001696 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1697 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001698 continue;
1699 }
1700
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301701 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001702 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1703 !(rate->flags & IEEE80211_RATE_ERP_G))
1704 phy = WLAN_RC_PHY_CCK;
1705 else
1706 phy = WLAN_RC_PHY_OFDM;
1707
1708 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1709 series[i].Rate = rate->hw_value;
1710 if (rate->hw_value_short) {
1711 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1712 series[i].Rate |= rate->hw_value_short;
1713 } else {
1714 is_sp = false;
1715 }
1716
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301717 if (bf->bf_state.bfs_paprd)
Felix Fietkau82b2d332011-09-03 01:40:23 +02001718 series[i].ChSel = ah->txchainmask;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301719 else
1720 series[i].ChSel = ath_txchainmask_reduction(sc,
Felix Fietkau82b2d332011-09-03 01:40:23 +02001721 ah->txchainmask, series[i].Rate);
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301722
Felix Fietkau545750d2009-11-23 22:21:01 +01001723 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001724 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001725 }
1726
Felix Fietkau27032052010-01-17 21:08:50 +01001727 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001728 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001729 flags &= ~ATH9K_TXDESC_RTSENA;
1730
1731 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1732 if (flags & ATH9K_TXDESC_RTSENA)
1733 flags &= ~ATH9K_TXDESC_CTSENA;
1734
Sujithe63835b2008-11-18 09:07:53 +05301735 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301736 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1737 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301738 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301739 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301740
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001741}
1742
Felix Fietkau44f1d262011-08-28 00:32:25 +02001743/*
1744 * Assign a descriptor (and sequence number if necessary,
1745 * and map buffer for DMA. Frees skb on error
1746 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001747static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001748 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001749 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001750 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301751{
Felix Fietkau04caf862010-11-14 15:20:12 +01001752 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001753 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001754 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001756 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001757 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001758 int frm_type;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001759 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001760
1761 bf = ath_tx_get_buffer(sc);
1762 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001763 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001764 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001765 }
Sujithe8324352009-01-16 21:38:42 +05301766
Sujithe8324352009-01-16 21:38:42 +05301767 ATH_TXBUF_RESET(bf);
1768
Felix Fietkaufa05f872011-08-28 00:32:24 +02001769 if (tid) {
1770 seqno = tid->seq_next;
1771 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1772 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1773 bf->bf_state.seqno = seqno;
1774 }
1775
Felix Fietkau82b873a2010-11-11 03:18:37 +01001776 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301777 bf->bf_mpdu = skb;
1778
Ben Greearc1739eb32010-10-14 12:45:29 -07001779 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1780 skb->len, DMA_TO_DEVICE);
1781 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301782 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001783 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001784 ath_err(ath9k_hw_common(sc->sc_ah),
1785 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001786 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001787 goto error;
Sujithe8324352009-01-16 21:38:42 +05301788 }
1789
Sujithe8324352009-01-16 21:38:42 +05301790 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301791
1792 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001793 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301794
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001795 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1796 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301797
1798 ath9k_hw_filltxdesc(ah, ds,
1799 skb->len, /* segment length */
1800 true, /* first segment */
1801 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001802 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001803 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001804 txq->axq_qnum);
1805
Felix Fietkau56dc6332011-08-28 00:32:22 +02001806 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001807
1808 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001809
1810error:
1811 dev_kfree_skb_any(skb);
1812 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001813}
1814
1815/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001816static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001817 struct ath_tx_control *txctl)
1818{
Felix Fietkau04caf862010-11-14 15:20:12 +01001819 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1820 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001821 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001822 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001823 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301824
Sujithe8324352009-01-16 21:38:42 +05301825 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301826 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1827 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001828 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1829 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001830 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001831
Felix Fietkau066dae92010-11-07 14:59:39 +01001832 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001833 }
1834
1835 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001836 /*
1837 * Try aggregation if it's a unicast data frame
1838 * and the destination is HT capable.
1839 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001840 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301841 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001842 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1843 if (!bf)
1844 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001845
Felix Fietkau82b873a2010-11-11 03:18:37 +01001846 bf->bf_state.bfs_paprd = txctl->paprd;
1847
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001848 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001849 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1850 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001851
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301852 if (txctl->paprd)
1853 bf->bf_state.bfs_paprd_timestamp = jiffies;
1854
Felix Fietkau55195412011-04-17 23:28:09 +02001855 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1856 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1857
Felix Fietkau44f1d262011-08-28 00:32:25 +02001858 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301859 }
1860
Felix Fietkaufa05f872011-08-28 00:32:24 +02001861out:
Sujithe8324352009-01-16 21:38:42 +05301862 spin_unlock_bh(&txctl->txq->axq_lock);
1863}
1864
1865/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001866int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301867 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001868{
Felix Fietkau28d16702010-11-14 15:20:10 +01001869 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1870 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001871 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001872 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001873 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001874 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001875 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001876 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001877 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001878
Ben Greeara9927ba2010-12-06 21:13:49 -08001879 /* NOTE: sta can be NULL according to net/mac80211.h */
1880 if (sta)
1881 txctl->an = (struct ath_node *)sta->drv_priv;
1882
Felix Fietkau04caf862010-11-14 15:20:12 +01001883 if (info->control.hw_key)
1884 frmlen += info->control.hw_key->icv_len;
1885
Felix Fietkau28d16702010-11-14 15:20:10 +01001886 /*
1887 * As a temporary workaround, assign seq# here; this will likely need
1888 * to be cleaned up to work better with Beacon transmission and virtual
1889 * BSSes.
1890 */
1891 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1892 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1893 sc->tx.seq_no += 0x10;
1894 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1895 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1896 }
1897
1898 /* Add the padding after the header if this is not already done */
1899 padpos = ath9k_cmn_padpos(hdr->frame_control);
1900 padsize = padpos & 3;
1901 if (padsize && skb->len > padpos) {
1902 if (skb_headroom(skb) < padsize)
1903 return -ENOMEM;
1904
1905 skb_push(skb, padsize);
1906 memmove(skb->data, skb->data + padsize, padpos);
1907 }
1908
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001909 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1910 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1911 !ieee80211_is_data(hdr->frame_control))
1912 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1913
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001914 setup_frame_info(hw, skb, frmlen);
1915
1916 /*
1917 * At this point, the vif, hw_key and sta pointers in the tx control
1918 * info are no longer valid (overwritten by the ath_frame_info data.
1919 */
1920
Felix Fietkau066dae92010-11-07 14:59:39 +01001921 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001922 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001923 if (txq == sc->tx.txq_map[q] &&
1924 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001925 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001926 txq->stopped = 1;
1927 }
1928 spin_unlock_bh(&txq->axq_lock);
1929
Felix Fietkau44f1d262011-08-28 00:32:25 +02001930 ath_tx_start_dma(sc, skb, txctl);
1931 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001932}
1933
Sujithe8324352009-01-16 21:38:42 +05301934/*****************/
1935/* TX Completion */
1936/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001937
Sujithe8324352009-01-16 21:38:42 +05301938static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301939 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001940{
Sujithe8324352009-01-16 21:38:42 +05301941 struct ieee80211_hw *hw = sc->hw;
1942 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001943 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001944 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001945 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301946
Joe Perches226afe62010-12-02 19:12:37 -08001947 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301948
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301949 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301950 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301951
Felix Fietkau55797b12011-09-14 21:24:16 +02001952 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301953 /* Frame was ACKed */
1954 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301955
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001956 padpos = ath9k_cmn_padpos(hdr->frame_control);
1957 padsize = padpos & 3;
1958 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301959 /*
1960 * Remove MAC header padding before giving the frame back to
1961 * mac80211.
1962 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001963 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301964 skb_pull(skb, padsize);
1965 }
1966
Sujith1b04b932010-01-08 10:36:05 +05301967 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1968 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001969 ath_dbg(common, ATH_DBG_PS,
1970 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301971 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1972 PS_WAIT_FOR_CAB |
1973 PS_WAIT_FOR_PSPOLL_DATA |
1974 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001975 }
1976
Felix Fietkau7545daf2011-01-24 19:23:16 +01001977 q = skb_get_queue_mapping(skb);
1978 if (txq == sc->tx.txq_map[q]) {
1979 spin_lock_bh(&txq->axq_lock);
1980 if (WARN_ON(--txq->pending_frames < 0))
1981 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001982
Felix Fietkau7545daf2011-01-24 19:23:16 +01001983 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1984 ieee80211_wake_queue(sc->hw, q);
1985 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001986 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001987 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001988 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001989
1990 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301991}
1992
1993static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001994 struct ath_txq *txq, struct list_head *bf_q,
1995 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301996{
1997 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301998 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301999 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302000
Sujithe8324352009-01-16 21:38:42 +05302001 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302002 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05302003
Felix Fietkau55797b12011-09-14 21:24:16 +02002004 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302005 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302006
Ben Greearc1739eb32010-10-14 12:45:29 -07002007 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002008 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002009
2010 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302011 if (time_after(jiffies,
2012 bf->bf_state.bfs_paprd_timestamp +
2013 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002014 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002015 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002016 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002017 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002018 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302019 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002020 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002021 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2022 * accidentally reference it later.
2023 */
2024 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302025
2026 /*
2027 * Return the list of ath_buf of this mpdu to free queue
2028 */
2029 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2030 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2031 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2032}
2033
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002034static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2035 struct ath_tx_status *ts, int nframes, int nbad,
2036 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302037{
Sujitha22be222009-03-30 15:28:36 +05302038 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302039 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302040 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002041 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002042 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302043 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302044
Sujith95e4acb2009-03-13 08:56:09 +05302045 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002046 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302047
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002048 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302049 WARN_ON(tx_rateindex >= hw->max_rates);
2050
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002051 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302052 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002053 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002054 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302055
Felix Fietkaub572d032010-11-14 15:20:07 +01002056 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002057
Felix Fietkaub572d032010-11-14 15:20:07 +01002058 tx_info->status.ampdu_len = nframes;
2059 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002060 }
2061
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002062 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302063 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002064 /*
2065 * If an underrun error is seen assume it as an excessive
2066 * retry only if max frame trigger level has been reached
2067 * (2 KB for single stream, and 4 KB for dual stream).
2068 * Adjust the long retry as if the frame was tried
2069 * hw->max_rate_tries times to affect how rate control updates
2070 * PER for the failed rate.
2071 * In case of congestion on the bus penalizing this type of
2072 * underruns should help hardware actually transmit new frames
2073 * successfully by eventually preferring slower rates.
2074 * This itself should also alleviate congestion on the bus.
2075 */
2076 if (ieee80211_is_data(hdr->frame_control) &&
2077 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2078 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002079 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002080 tx_info->status.rates[tx_rateindex].count =
2081 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302082 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302083
Felix Fietkau545750d2009-11-23 22:21:01 +01002084 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302085 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002086 tx_info->status.rates[i].idx = -1;
2087 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302088
Felix Fietkau78c46532010-06-25 01:26:16 +02002089 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302090}
2091
Felix Fietkaufce041b2011-05-19 12:20:25 +02002092static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2093 struct ath_tx_status *ts, struct ath_buf *bf,
2094 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302095 __releases(txq->axq_lock)
2096 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002097{
2098 int txok;
2099
2100 txq->axq_depth--;
2101 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2102 txq->axq_tx_inprogress = false;
2103 if (bf_is_ampdu_not_probing(bf))
2104 txq->axq_ampdu_depth--;
2105
2106 spin_unlock_bh(&txq->axq_lock);
2107
2108 if (!bf_isampdu(bf)) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002109 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2110 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2111 } else
2112 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2113
2114 spin_lock_bh(&txq->axq_lock);
2115
2116 if (sc->sc_flags & SC_OP_TXAGGR)
2117 ath_txq_schedule(sc, txq);
2118}
2119
Sujithc4288392008-11-18 09:09:30 +05302120static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121{
Sujithcbe61d82009-02-09 13:27:12 +05302122 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002123 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2125 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302126 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002127 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 int status;
2129
Joe Perches226afe62010-12-02 19:12:37 -08002130 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2131 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2132 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133
Felix Fietkaufce041b2011-05-19 12:20:25 +02002134 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002136 if (work_pending(&sc->hw_reset_work))
2137 break;
2138
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002139 if (list_empty(&txq->axq_q)) {
2140 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002141 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002142 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002143 break;
2144 }
2145 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2146
2147 /*
2148 * There is a race condition that a BH gets scheduled
2149 * after sw writes TxE and before hw re-load the last
2150 * descriptor to get the newly chained one.
2151 * Software must keep the last DONE descriptor as a
2152 * holding descriptor - software does so by marking
2153 * it with the STALE flag.
2154 */
2155 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302156 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002157 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002158 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002160
2161 bf = list_entry(bf_held->list.next, struct ath_buf,
2162 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 }
2164
2165 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302166 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002167
Felix Fietkau29bffa92010-03-29 20:14:23 -07002168 memset(&ts, 0, sizeof(ts));
2169 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002170 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002172
Ben Greear2dac4fb2011-01-09 23:11:45 -08002173 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174
2175 /*
2176 * Remove ath_buf's of the same transmit unit from txq,
2177 * however leave the last descriptor back as the holding
2178 * descriptor for hw.
2179 */
Sujitha119cc42009-03-30 15:28:38 +05302180 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182 if (!list_is_singular(&lastbf->list))
2183 list_cut_position(&bf_head,
2184 &txq->axq_q, lastbf->list.prev);
2185
Felix Fietkaufce041b2011-05-19 12:20:25 +02002186 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002187 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002188 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002189 }
Johannes Berge6a98542008-10-21 12:40:02 +02002190
Felix Fietkaufce041b2011-05-19 12:20:25 +02002191 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002193 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002194}
2195
Sujith305fe472009-07-23 15:32:29 +05302196static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002197{
2198 struct ath_softc *sc = container_of(work, struct ath_softc,
2199 tx_complete_work.work);
2200 struct ath_txq *txq;
2201 int i;
2202 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002203#ifdef CONFIG_ATH9K_DEBUGFS
2204 sc->tx_complete_poll_work_seen++;
2205#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002206
2207 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2208 if (ATH_TXQ_SETUP(sc, i)) {
2209 txq = &sc->tx.txq[i];
2210 spin_lock_bh(&txq->axq_lock);
2211 if (txq->axq_depth) {
2212 if (txq->axq_tx_inprogress) {
2213 needreset = true;
2214 spin_unlock_bh(&txq->axq_lock);
2215 break;
2216 } else {
2217 txq->axq_tx_inprogress = true;
2218 }
2219 }
2220 spin_unlock_bh(&txq->axq_lock);
2221 }
2222
2223 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002224 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2225 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002226 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002227 }
2228
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002229 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002230 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2231}
2232
2233
Sujithe8324352009-01-16 21:38:42 +05302234
2235void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002236{
Sujithe8324352009-01-16 21:38:42 +05302237 int i;
2238 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002239
Sujithe8324352009-01-16 21:38:42 +05302240 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002241
2242 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302243 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2244 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002245 }
2246}
2247
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248void ath_tx_edma_tasklet(struct ath_softc *sc)
2249{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002250 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2252 struct ath_hw *ah = sc->sc_ah;
2253 struct ath_txq *txq;
2254 struct ath_buf *bf, *lastbf;
2255 struct list_head bf_head;
2256 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002257
2258 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002259 if (work_pending(&sc->hw_reset_work))
2260 break;
2261
Felix Fietkaufce041b2011-05-19 12:20:25 +02002262 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002263 if (status == -EINPROGRESS)
2264 break;
2265 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002266 ath_dbg(common, ATH_DBG_XMIT,
2267 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002268 break;
2269 }
2270
2271 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002272 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002273 continue;
2274
Felix Fietkaufce041b2011-05-19 12:20:25 +02002275 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002276
2277 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002278
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2280 spin_unlock_bh(&txq->axq_lock);
2281 return;
2282 }
2283
2284 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2285 struct ath_buf, list);
2286 lastbf = bf->bf_lastbf;
2287
2288 INIT_LIST_HEAD(&bf_head);
2289 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2290 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002291
Felix Fietkaufce041b2011-05-19 12:20:25 +02002292 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2293 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002294
Felix Fietkaufce041b2011-05-19 12:20:25 +02002295 if (!list_empty(&txq->axq_q)) {
2296 struct list_head bf_q;
2297
2298 INIT_LIST_HEAD(&bf_q);
2299 txq->axq_link = NULL;
2300 list_splice_tail_init(&txq->axq_q, &bf_q);
2301 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2302 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002303 }
2304
Felix Fietkaufce041b2011-05-19 12:20:25 +02002305 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002306 spin_unlock_bh(&txq->axq_lock);
2307 }
2308}
2309
Sujithe8324352009-01-16 21:38:42 +05302310/*****************/
2311/* Init, Cleanup */
2312/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002313
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002314static int ath_txstatus_setup(struct ath_softc *sc, int size)
2315{
2316 struct ath_descdma *dd = &sc->txsdma;
2317 u8 txs_len = sc->sc_ah->caps.txs_len;
2318
2319 dd->dd_desc_len = size * txs_len;
2320 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2321 &dd->dd_desc_paddr, GFP_KERNEL);
2322 if (!dd->dd_desc)
2323 return -ENOMEM;
2324
2325 return 0;
2326}
2327
2328static int ath_tx_edma_init(struct ath_softc *sc)
2329{
2330 int err;
2331
2332 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2333 if (!err)
2334 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2335 sc->txsdma.dd_desc_paddr,
2336 ATH_TXSTATUS_RING_SIZE);
2337
2338 return err;
2339}
2340
2341static void ath_tx_edma_cleanup(struct ath_softc *sc)
2342{
2343 struct ath_descdma *dd = &sc->txsdma;
2344
2345 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2346 dd->dd_desc_paddr);
2347}
2348
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349int ath_tx_init(struct ath_softc *sc, int nbufs)
2350{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002351 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352 int error = 0;
2353
Sujith797fe5cb2009-03-30 15:28:45 +05302354 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355
Sujith797fe5cb2009-03-30 15:28:45 +05302356 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002357 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302358 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002359 ath_err(common,
2360 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302361 goto err;
2362 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363
Sujith797fe5cb2009-03-30 15:28:45 +05302364 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002365 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302366 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002367 ath_err(common,
2368 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302369 goto err;
2370 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002371
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002372 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2373
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002374 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2375 error = ath_tx_edma_init(sc);
2376 if (error)
2377 goto err;
2378 }
2379
Sujith797fe5cb2009-03-30 15:28:45 +05302380err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381 if (error != 0)
2382 ath_tx_cleanup(sc);
2383
2384 return error;
2385}
2386
Sujith797fe5cb2009-03-30 15:28:45 +05302387void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388{
Sujithb77f4832008-12-07 21:44:03 +05302389 if (sc->beacon.bdma.dd_desc_len != 0)
2390 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391
Sujithb77f4832008-12-07 21:44:03 +05302392 if (sc->tx.txdma.dd_desc_len != 0)
2393 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002394
2395 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2396 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397}
2398
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002399void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2400{
Sujithc5170162008-10-29 10:13:59 +05302401 struct ath_atx_tid *tid;
2402 struct ath_atx_ac *ac;
2403 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002404
Sujith8ee5afb2008-12-07 21:43:36 +05302405 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302406 tidno < WME_NUM_TID;
2407 tidno++, tid++) {
2408 tid->an = an;
2409 tid->tidno = tidno;
2410 tid->seq_start = tid->seq_next = 0;
2411 tid->baw_size = WME_MAX_BA;
2412 tid->baw_head = tid->baw_tail = 0;
2413 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302414 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302415 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002416 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302417 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302418 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302419 tid->state &= ~AGGR_ADDBA_COMPLETE;
2420 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302421 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002422
Sujith8ee5afb2008-12-07 21:43:36 +05302423 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302424 acno < WME_NUM_AC; acno++, ac++) {
2425 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002426 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302427 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428 }
2429}
2430
Sujithb5aa9bf2008-10-29 10:13:31 +05302431void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002432{
Felix Fietkau2b409942010-07-07 19:42:08 +02002433 struct ath_atx_ac *ac;
2434 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002436 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302437
Felix Fietkau2b409942010-07-07 19:42:08 +02002438 for (tidno = 0, tid = &an->tid[tidno];
2439 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002440
Felix Fietkau2b409942010-07-07 19:42:08 +02002441 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002442 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443
Felix Fietkau2b409942010-07-07 19:42:08 +02002444 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002445
Felix Fietkau2b409942010-07-07 19:42:08 +02002446 if (tid->sched) {
2447 list_del(&tid->list);
2448 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002449 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002450
2451 if (ac->sched) {
2452 list_del(&ac->list);
2453 tid->ac->sched = false;
2454 }
2455
2456 ath_tid_drain(sc, txq, tid);
2457 tid->state &= ~AGGR_ADDBA_COMPLETE;
2458 tid->state &= ~AGGR_CLEANUP;
2459
2460 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002461 }
2462}