blob: d7e3f8c0602e735cd8eedf59ef1012fb47b1dd49 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010058static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020061static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
Sujithe8324352009-01-16 21:38:42 +053063
Felix Fietkau545750d2009-11-23 22:21:01 +010064enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020065 MCS_HT20,
66 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010067 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010083 },
84 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020085 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 }
96};
97
Sujithe8324352009-01-16 21:38:42 +053098/*********************/
99/* Aggregation logic */
100/*********************/
101
Sujithe8324352009-01-16 21:38:42 +0530102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
103{
104 struct ath_atx_ac *ac = tid->ac;
105
106 if (tid->paused)
107 return;
108
109 if (tid->sched)
110 return;
111
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
114
115 if (ac->sched)
116 return;
117
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
121
Sujithe8324352009-01-16 21:38:42 +0530122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123{
Felix Fietkau066dae92010-11-07 14:59:39 +0100124 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530125
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200126 WARN_ON(!tid->paused);
127
Sujithe8324352009-01-16 21:38:42 +0530128 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530130
131 if (list_empty(&tid->buf_q))
132 goto unlock;
133
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
138}
139
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100146}
147
Sujithe8324352009-01-16 21:38:42 +0530148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
149{
Felix Fietkau066dae92010-11-07 14:59:39 +0100150 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530151 struct ath_buf *bf;
152 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200153 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100154 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155
Sujithe8324352009-01-16 21:38:42 +0530156 INIT_LIST_HEAD(&bf_head);
157
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530159 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530160
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530163 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164
Felix Fietkaue1566d12010-11-20 03:08:46 +0100165 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
170 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200172 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100173 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530174 }
175
176 spin_unlock_bh(&txq->axq_lock);
177}
178
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
181{
182 int index, cindex;
183
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200187 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530188
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
193}
194
195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100196 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530197{
198 int index, cindex;
199
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100200 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200202 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530203
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 }
209}
210
211/*
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
216 */
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
219
220{
221 struct ath_buf *bf;
222 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700223 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100224 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700225
226 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530227 INIT_LIST_HEAD(&bf_head);
228
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
Sujithe8324352009-01-16 21:38:42 +0530232
Sujithd43f30152009-01-16 21:38:53 +0530233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530239
240 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530242 spin_lock(&txq->axq_lock);
243 }
244
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
247}
248
Sujithfec247c2009-07-27 12:08:16 +0530249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530251{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100252 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530253 struct ieee80211_hdr *hdr;
254
Sujithfec247c2009-07-27 12:08:16 +0530255 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100256 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100257 return;
Sujithe8324352009-01-16 21:38:42 +0530258
Sujithe8324352009-01-16 21:38:42 +0530259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261}
262
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
264{
265 struct ath_buf *bf = NULL;
266
267 spin_lock_bh(&sc->tx.txbuflock);
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
277 spin_unlock_bh(&sc->tx.txbuflock);
278
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
Sujithd43f30152009-01-16 21:38:53 +0530289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530295 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530296
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100346 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530360
Sujitha22be222009-03-30 15:28:36 +0530361 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530362 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530363
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800364 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800365
Felix Fietkau78c46532010-06-25 01:26:16 +0200366 memcpy(rates, tx_info->control.rates, sizeof(rates));
367
Sujith1286ec62009-01-27 13:30:37 +0530368 rcu_read_lock();
369
Ben Greear686b9cb2010-09-23 09:44:36 -0700370 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530371 if (!sta) {
372 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200373
Felix Fietkau31e79a52010-07-12 23:16:34 +0200374 INIT_LIST_HEAD(&bf_head);
375 while (bf) {
376 bf_next = bf->bf_next;
377
378 bf->bf_state.bf_type |= BUF_XRETRY;
379 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
380 !bf->bf_stale || bf_next != NULL)
381 list_move_tail(&bf->list, &bf_head);
382
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
385 0, 0);
386
387 bf = bf_next;
388 }
Sujith1286ec62009-01-27 13:30:37 +0530389 return;
Sujithe8324352009-01-16 21:38:42 +0530390 }
391
Sujith1286ec62009-01-27 13:30:37 +0530392 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100393 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
394 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530395
Felix Fietkaub11b1602010-07-11 12:48:44 +0200396 /*
397 * The hardware occasionally sends a tx status for the wrong TID.
398 * In this case, the BA status cannot be considered valid and all
399 * subframes need to be retransmitted
400 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100401 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200402 txok = false;
403
Sujithe8324352009-01-16 21:38:42 +0530404 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530405 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530406
Sujithd43f30152009-01-16 21:38:53 +0530407 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700408 if (ts->ts_flags & ATH9K_TX_BA) {
409 seq_st = ts->ts_seqnum;
410 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530411 } else {
Sujithd43f30152009-01-16 21:38:53 +0530412 /*
413 * AR5416 can become deaf/mute when BA
414 * issue happens. Chip needs to be reset.
415 * But AP code may have sychronization issues
416 * when perform internal reset in this routine.
417 * Only enable reset in STA mode for now.
418 */
Sujith2660b812009-02-09 13:27:26 +0530419 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530420 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530421 }
422 }
423
424 INIT_LIST_HEAD(&bf_pending);
425 INIT_LIST_HEAD(&bf_head);
426
Felix Fietkaub572d032010-11-14 15:20:07 +0100427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530428 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100429 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530430 bf_next = bf->bf_next;
431
Felix Fietkau78c46532010-06-25 01:26:16 +0200432 skb = bf->bf_mpdu;
433 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100434 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200435
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100436 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530437 /* transmit completion, subframe is
438 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530439 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530440 } else if (!isaggr && txok) {
441 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100444 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100445 if (fi->retries < ATH_MAX_SW_RETRIES) {
446 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530447 txpending = 1;
448 } else {
449 bf->bf_state.bf_type |= BUF_XRETRY;
450 txfail = 1;
451 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 }
454 } else {
455 /*
456 * cleanup in progress, just fail
457 * the un-acked sub-frames
458 */
459 txfail = 1;
460 }
461 }
462
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400463 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
464 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530465 /*
466 * Make sure the last desc is reclaimed if it
467 * not a holding desc.
468 */
469 if (!bf_last->bf_stale)
470 list_move_tail(&bf->list, &bf_head);
471 else
472 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530473 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700474 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530475 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 }
477
Felix Fietkau90fa5392010-09-20 13:45:38 +0200478 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530479 /*
480 * complete the acked-ones/xretried ones; update
481 * block-ack window
482 */
483 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100484 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530485 spin_unlock_bh(&txq->axq_lock);
486
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200488 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 rc_update = false;
491 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 }
494
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
496 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530497 } else {
Sujithd43f30152009-01-16 21:38:53 +0530498 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
500 if (bf->bf_next == NULL && bf_last->bf_stale) {
501 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530502
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400503 tbf = ath_clone_txbuf(sc, bf_last);
504 /*
505 * Update tx baw and complete the
506 * frame with failed status if we
507 * run out of tx buf.
508 */
509 if (!tbf) {
510 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100511 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400513
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400514 bf->bf_state.bf_type |=
515 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100516 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100517 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400518 ath_tx_complete_buf(sc, bf, txq,
519 &bf_head,
520 ts, 0, 0);
521 break;
522 }
523
524 ath9k_hw_cleartxdesc(sc->sc_ah,
525 tbf->bf_desc);
526 list_add_tail(&tbf->list, &bf_head);
527 } else {
528 /*
529 * Clear descriptor status words for
530 * software retry
531 */
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400534 }
Sujithe8324352009-01-16 21:38:42 +0530535 }
536
537 /*
538 * Put this buffer to the temporary pending
539 * queue to retain ordering
540 */
541 list_splice_tail_init(&bf_head, &bf_pending);
542 }
543
544 bf = bf_next;
545 }
546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 /* prepend un-acked frames to the beginning of the pending frame queue */
548 if (!list_empty(&bf_pending)) {
549 spin_lock_bh(&txq->axq_lock);
550 list_splice(&bf_pending, &tid->buf_q);
551 ath_tx_queue_tid(txq, tid);
552 spin_unlock_bh(&txq->axq_lock);
553 }
554
Sujithe8324352009-01-16 21:38:42 +0530555 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200556 ath_tx_flush_tid(sc, tid);
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530560 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530561 }
Sujithe8324352009-01-16 21:38:42 +0530562 }
563
Sujith1286ec62009-01-27 13:30:37 +0530564 rcu_read_unlock();
565
Sujithe8324352009-01-16 21:38:42 +0530566 if (needreset)
567 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530568}
569
570static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
571 struct ath_atx_tid *tid)
572{
Sujithe8324352009-01-16 21:38:42 +0530573 struct sk_buff *skb;
574 struct ieee80211_tx_info *tx_info;
575 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530576 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530577 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530578 int i;
579
Sujitha22be222009-03-30 15:28:36 +0530580 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530581 tx_info = IEEE80211_SKB_CB(skb);
582 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530583
584 /*
585 * Find the lowest frame length among the rate series that will have a
586 * 4ms transmit duration.
587 * TODO - TXOP limit needs to be considered.
588 */
589 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
590
591 for (i = 0; i < 4; i++) {
592 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100593 int modeidx;
594 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530595 legacy = 1;
596 break;
597 }
598
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200599 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100600 modeidx = MCS_HT40;
601 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 modeidx = MCS_HT20;
603
604 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
605 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100606
607 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530608 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530609 }
610 }
611
612 /*
613 * limit aggregate size by the minimum rate if rate selected is
614 * not a probe rate, if rate selected is a probe rate then
615 * avoid aggregation of this packet.
616 */
617 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
618 return 0;
619
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530620 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
621 aggr_limit = min((max_4ms_framelen * 3) / 8,
622 (u32)ATH_AMPDU_LIMIT_MAX);
623 else
624 aggr_limit = min(max_4ms_framelen,
625 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530626
627 /*
628 * h/w can accept aggregates upto 16 bit lengths (65535).
629 * The IE, however can hold upto 65536, which shows up here
630 * as zero. Ignore 65536 since we are constrained by hw.
631 */
Sujith4ef70842009-07-23 15:32:41 +0530632 if (tid->an->maxampdu)
633 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530634
635 return aggr_limit;
636}
637
638/*
Sujithd43f30152009-01-16 21:38:53 +0530639 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530640 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530641 */
642static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
643 struct ath_buf *bf, u16 frmlen)
644{
Sujithe8324352009-01-16 21:38:42 +0530645 struct sk_buff *skb = bf->bf_mpdu;
646 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530647 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530648 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100649 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200650 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100651 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530652
653 /* Select standard number of delimiters based on frame length alone */
654 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
655
656 /*
657 * If encryption enabled, hardware requires some more padding between
658 * subframes.
659 * TODO - this could be improved to be dependent on the rate.
660 * The hardware can keep up at lower rates, but not higher rates
661 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100662 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530663 ndelim += ATH_AGGR_ENCRYPTDELIM;
664
665 /*
666 * Convert desired mpdu density from microeconds to bytes based
667 * on highest rate in rate series (i.e. first rate) to determine
668 * required minimum length for subframe. Take into account
669 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530670 *
Sujithe8324352009-01-16 21:38:42 +0530671 * If there is no mpdu density restriction, no further calculation
672 * is needed.
673 */
Sujith4ef70842009-07-23 15:32:41 +0530674
675 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530676 return ndelim;
677
678 rix = tx_info->control.rates[0].idx;
679 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530680 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
681 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
682
683 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530684 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530685 else
Sujith4ef70842009-07-23 15:32:41 +0530686 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530687
688 if (nsymbols == 0)
689 nsymbols = 1;
690
Felix Fietkauc6663872010-04-19 19:57:33 +0200691 streams = HT_RC_2_STREAMS(rix);
692 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530693 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
694
Sujithe8324352009-01-16 21:38:42 +0530695 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530696 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
697 ndelim = max(mindelim, ndelim);
698 }
699
700 return ndelim;
701}
702
703static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530704 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530705 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100706 struct list_head *bf_q,
707 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530708{
709#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530710 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
711 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530712 u16 aggr_limit = 0, al = 0, bpad = 0,
713 al_delta, h_baw = tid->baw_size / 2;
714 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200715 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100716 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530717
718 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
719
720 do {
721 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100722 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530723
Sujithd43f30152009-01-16 21:38:53 +0530724 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530726 status = ATH_AGGR_BAW_CLOSED;
727 break;
728 }
729
730 if (!rl) {
731 aggr_limit = ath_lookup_rate(sc, bf, tid);
732 rl = 1;
733 }
734
Sujithd43f30152009-01-16 21:38:53 +0530735 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100736 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530737
Sujithd43f30152009-01-16 21:38:53 +0530738 if (nframes &&
739 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530740 status = ATH_AGGR_LIMITED;
741 break;
742 }
743
Felix Fietkau0299a502010-10-21 02:47:24 +0200744 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
745 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
746 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
747 break;
748
Sujithd43f30152009-01-16 21:38:53 +0530749 /* do not exceed subframe limit */
750 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530751 status = ATH_AGGR_LIMITED;
752 break;
753 }
Sujithd43f30152009-01-16 21:38:53 +0530754 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530755
Sujithd43f30152009-01-16 21:38:53 +0530756 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530757 al += bpad + al_delta;
758
759 /*
760 * Get the delimiters needed to meet the MPDU
761 * density for this node.
762 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100763 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530764 bpad = PADBYTES(al_delta) + (ndelim << 2);
765
766 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400767 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530768
Sujithd43f30152009-01-16 21:38:53 +0530769 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100770 if (!fi->retries)
771 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530772 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
773 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530774 if (bf_prev) {
775 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400776 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
777 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530778 }
779 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530780
Sujithe8324352009-01-16 21:38:42 +0530781 } while (!list_empty(&tid->buf_q));
782
Felix Fietkau269c44b2010-11-14 15:20:06 +0100783 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530784
Sujithe8324352009-01-16 21:38:42 +0530785 return status;
786#undef PADBYTES
787}
788
789static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
790 struct ath_atx_tid *tid)
791{
Sujithd43f30152009-01-16 21:38:53 +0530792 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530793 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100794 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530795 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100796 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530797
798 do {
799 if (list_empty(&tid->buf_q))
800 return;
801
802 INIT_LIST_HEAD(&bf_q);
803
Felix Fietkau269c44b2010-11-14 15:20:06 +0100804 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530805
806 /*
Sujithd43f30152009-01-16 21:38:53 +0530807 * no frames picked up to be aggregated;
808 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530809 */
810 if (list_empty(&bf_q))
811 break;
812
813 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530814 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530815
Sujithd43f30152009-01-16 21:38:53 +0530816 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100817 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100818 fi = get_frame_info(bf->bf_mpdu);
819
Sujithe8324352009-01-16 21:38:42 +0530820 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530821 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100822 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530823 ath_tx_txqaddbuf(sc, txq, &bf_q);
824 continue;
825 }
826
Sujithd43f30152009-01-16 21:38:53 +0530827 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530828 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100829 ath_buf_set_rate(sc, bf, aggr_len);
830 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530831
Sujithd43f30152009-01-16 21:38:53 +0530832 /* anchor last desc of aggregate */
833 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithe8324352009-01-16 21:38:42 +0530835 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530836 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530837
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100838 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530839 status != ATH_AGGR_BAW_CLOSED);
840}
841
Felix Fietkau231c3a12010-09-20 19:35:28 +0200842int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
843 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530844{
845 struct ath_atx_tid *txtid;
846 struct ath_node *an;
847
848 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530849 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200850
851 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
852 return -EAGAIN;
853
Sujithf83da962009-07-23 15:32:37 +0530854 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200855 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700856 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200857
Felix Fietkau2ed72222011-01-10 17:05:49 -0700858 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
859 txtid->baw_head = txtid->baw_tail = 0;
860
Felix Fietkau231c3a12010-09-20 19:35:28 +0200861 return 0;
Sujithe8324352009-01-16 21:38:42 +0530862}
863
Sujithf83da962009-07-23 15:32:37 +0530864void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530865{
866 struct ath_node *an = (struct ath_node *)sta->drv_priv;
867 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100868 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530869
870 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530871 return;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530874 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530875 return;
Sujithe8324352009-01-16 21:38:42 +0530876 }
877
Sujithe8324352009-01-16 21:38:42 +0530878 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200879 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200880
881 /*
882 * If frames are still being transmitted for this TID, they will be
883 * cleaned up during tx completion. To prevent race conditions, this
884 * TID can only be reused after all in-progress subframes have been
885 * completed.
886 */
887 if (txtid->baw_head != txtid->baw_tail)
888 txtid->state |= AGGR_CLEANUP;
889 else
890 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530891 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530892
Felix Fietkau90fa5392010-09-20 13:45:38 +0200893 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530894}
895
896void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
897{
898 struct ath_atx_tid *txtid;
899 struct ath_node *an;
900
901 an = (struct ath_node *)sta->drv_priv;
902
903 if (sc->sc_flags & SC_OP_TXAGGR) {
904 txtid = ATH_AN_2_TID(an, tid);
905 txtid->baw_size =
906 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
907 txtid->state |= AGGR_ADDBA_COMPLETE;
908 txtid->state &= ~AGGR_ADDBA_PROGRESS;
909 ath_tx_resume_tid(sc, txtid);
910 }
911}
912
Sujithe8324352009-01-16 21:38:42 +0530913/********************/
914/* Queue Management */
915/********************/
916
Sujithe8324352009-01-16 21:38:42 +0530917static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
918 struct ath_txq *txq)
919{
920 struct ath_atx_ac *ac, *ac_tmp;
921 struct ath_atx_tid *tid, *tid_tmp;
922
923 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
924 list_del(&ac->list);
925 ac->sched = false;
926 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
927 list_del(&tid->list);
928 tid->sched = false;
929 ath_tid_drain(sc, txq, tid);
930 }
931 }
932}
933
934struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
935{
Sujithcbe61d82009-02-09 13:27:12 +0530936 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700937 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530938 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100939 static const int subtype_txq_to_hwq[] = {
940 [WME_AC_BE] = ATH_TXQ_AC_BE,
941 [WME_AC_BK] = ATH_TXQ_AC_BK,
942 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 };
Ben Greear60f2d1d2011-01-09 23:11:52 -0800945 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530946
947 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100948 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530949 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
950 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
951 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
952 qi.tqi_physCompBuf = 0;
953
954 /*
955 * Enable interrupts only for EOL and DESC conditions.
956 * We mark tx descriptors to receive a DESC interrupt
957 * when a tx queue gets deep; otherwise waiting for the
958 * EOL to reap descriptors. Note that this is done to
959 * reduce interrupt load and this only defers reaping
960 * descriptors, never transmitting frames. Aside from
961 * reducing interrupts this also permits more concurrency.
962 * The only potential downside is if the tx queue backs
963 * up in which case the top half of the kernel may backup
964 * due to a lack of tx descriptors.
965 *
966 * The UAPSD queue is an exception, since we take a desc-
967 * based intr on the EOSP frames.
968 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400969 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
970 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
971 TXQ_FLAG_TXERRINT_ENABLE;
972 } else {
973 if (qtype == ATH9K_TX_QUEUE_UAPSD)
974 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
975 else
976 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
977 TXQ_FLAG_TXDESCINT_ENABLE;
978 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800979 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
980 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +0530981 /*
982 * NB: don't print a message, this happens
983 * normally on parts with too few tx queues
984 */
985 return NULL;
986 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800987 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800988 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -0800989 axq_qnum, ARRAY_SIZE(sc->tx.txq));
990 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +0530991 return NULL;
992 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800993 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
994 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +0530995
Ben Greear60f2d1d2011-01-09 23:11:52 -0800996 txq->axq_qnum = axq_qnum;
997 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +0530998 txq->axq_link = NULL;
999 INIT_LIST_HEAD(&txq->axq_q);
1000 INIT_LIST_HEAD(&txq->axq_acq);
1001 spin_lock_init(&txq->axq_lock);
1002 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001003 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001004 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001005 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001006
1007 txq->txq_headidx = txq->txq_tailidx = 0;
1008 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1009 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1010 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301011 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001012 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301013}
1014
Sujithe8324352009-01-16 21:38:42 +05301015int ath_txq_update(struct ath_softc *sc, int qnum,
1016 struct ath9k_tx_queue_info *qinfo)
1017{
Sujithcbe61d82009-02-09 13:27:12 +05301018 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301019 int error = 0;
1020 struct ath9k_tx_queue_info qi;
1021
1022 if (qnum == sc->beacon.beaconq) {
1023 /*
1024 * XXX: for beacon queue, we just save the parameter.
1025 * It will be picked up by ath_beaconq_config when
1026 * it's necessary.
1027 */
1028 sc->beacon.beacon_qi = *qinfo;
1029 return 0;
1030 }
1031
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001032 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301033
1034 ath9k_hw_get_txq_props(ah, qnum, &qi);
1035 qi.tqi_aifs = qinfo->tqi_aifs;
1036 qi.tqi_cwmin = qinfo->tqi_cwmin;
1037 qi.tqi_cwmax = qinfo->tqi_cwmax;
1038 qi.tqi_burstTime = qinfo->tqi_burstTime;
1039 qi.tqi_readyTime = qinfo->tqi_readyTime;
1040
1041 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001042 ath_err(ath9k_hw_common(sc->sc_ah),
1043 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301044 error = -EIO;
1045 } else {
1046 ath9k_hw_resettxqueue(ah, qnum);
1047 }
1048
1049 return error;
1050}
1051
1052int ath_cabq_update(struct ath_softc *sc)
1053{
1054 struct ath9k_tx_queue_info qi;
1055 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301056
1057 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1058 /*
1059 * Ensure the readytime % is within the bounds.
1060 */
Sujith17d79042009-02-09 13:27:03 +05301061 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1062 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1063 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1064 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301065
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001066 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301067 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301068 ath_txq_update(sc, qnum, &qi);
1069
1070 return 0;
1071}
1072
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001073static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1074{
1075 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1076 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1077}
1078
Sujith043a0402009-01-16 21:38:47 +05301079/*
1080 * Drain a given TX queue (could be Beacon or Data)
1081 *
1082 * This assumes output has been stopped and
1083 * we do not need to block ath_tx_tasklet.
1084 */
1085void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301086{
1087 struct ath_buf *bf, *lastbf;
1088 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001089 struct ath_tx_status ts;
1090
1091 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301092 INIT_LIST_HEAD(&bf_head);
1093
Sujithe8324352009-01-16 21:38:42 +05301094 for (;;) {
1095 spin_lock_bh(&txq->axq_lock);
1096
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001097 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1098 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1099 txq->txq_headidx = txq->txq_tailidx = 0;
1100 spin_unlock_bh(&txq->axq_lock);
1101 break;
1102 } else {
1103 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1104 struct ath_buf, list);
1105 }
1106 } else {
1107 if (list_empty(&txq->axq_q)) {
1108 txq->axq_link = NULL;
1109 spin_unlock_bh(&txq->axq_lock);
1110 break;
1111 }
1112 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1113 list);
Sujithe8324352009-01-16 21:38:42 +05301114
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001115 if (bf->bf_stale) {
1116 list_del(&bf->list);
1117 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301118
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001119 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001120 continue;
1121 }
Sujithe8324352009-01-16 21:38:42 +05301122 }
1123
1124 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301125
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001126 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1127 list_cut_position(&bf_head,
1128 &txq->txq_fifo[txq->txq_tailidx],
1129 &lastbf->list);
1130 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1131 } else {
1132 /* remove ath_buf's of the same mpdu from txq */
1133 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1134 }
1135
Sujithe8324352009-01-16 21:38:42 +05301136 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001137 if (bf_is_ampdu_not_probing(bf))
1138 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301139 spin_unlock_bh(&txq->axq_lock);
1140
1141 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001142 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1143 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301144 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001145 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301146 }
1147
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001148 spin_lock_bh(&txq->axq_lock);
1149 txq->axq_tx_inprogress = false;
1150 spin_unlock_bh(&txq->axq_lock);
1151
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001152 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1153 spin_lock_bh(&txq->axq_lock);
1154 while (!list_empty(&txq->txq_fifo_pending)) {
1155 bf = list_first_entry(&txq->txq_fifo_pending,
1156 struct ath_buf, list);
1157 list_cut_position(&bf_head,
1158 &txq->txq_fifo_pending,
1159 &bf->bf_lastbf->list);
1160 spin_unlock_bh(&txq->axq_lock);
1161
1162 if (bf_isampdu(bf))
1163 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001164 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001165 else
1166 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1167 &ts, 0, 0);
1168 spin_lock_bh(&txq->axq_lock);
1169 }
1170 spin_unlock_bh(&txq->axq_lock);
1171 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001172
1173 /* flush any pending frames if aggregation is enabled */
1174 if (sc->sc_flags & SC_OP_TXAGGR) {
1175 if (!retry_tx) {
1176 spin_lock_bh(&txq->axq_lock);
1177 ath_txq_drain_pending_buffers(sc, txq);
1178 spin_unlock_bh(&txq->axq_lock);
1179 }
1180 }
Sujithe8324352009-01-16 21:38:42 +05301181}
1182
Felix Fietkau080e1a22010-12-05 20:17:53 +01001183bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301184{
Sujithcbe61d82009-02-09 13:27:12 +05301185 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001186 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301187 struct ath_txq *txq;
1188 int i, npend = 0;
1189
1190 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001191 return true;
Sujith043a0402009-01-16 21:38:47 +05301192
1193 /* Stop beacon queue */
1194 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1195
1196 /* Stop data queues */
1197 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1198 if (ATH_TXQ_SETUP(sc, i)) {
1199 txq = &sc->tx.txq[i];
1200 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1201 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1202 }
1203 }
1204
Felix Fietkau080e1a22010-12-05 20:17:53 +01001205 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001206 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301207
1208 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001209 if (!ATH_TXQ_SETUP(sc, i))
1210 continue;
1211
1212 /*
1213 * The caller will resume queues with ieee80211_wake_queues.
1214 * Mark the queue as not stopped to prevent ath_tx_complete
1215 * from waking the queue too early.
1216 */
1217 txq = &sc->tx.txq[i];
1218 txq->stopped = false;
1219 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301220 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001221
1222 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301223}
1224
Sujithe8324352009-01-16 21:38:42 +05301225void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1226{
1227 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1228 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1229}
1230
Ben Greear7755bad2011-01-18 17:30:00 -08001231/* For each axq_acq entry, for each tid, try to schedule packets
1232 * for transmit until ampdu_depth has reached min Q depth.
1233 */
Sujithe8324352009-01-16 21:38:42 +05301234void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1235{
Ben Greear7755bad2011-01-18 17:30:00 -08001236 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1237 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301238
Felix Fietkau21f28e62011-01-15 14:30:14 +01001239 if (list_empty(&txq->axq_acq) ||
1240 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301241 return;
1242
1243 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001244 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301245
Ben Greear7755bad2011-01-18 17:30:00 -08001246 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1247 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1248 list_del(&ac->list);
1249 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301250
Ben Greear7755bad2011-01-18 17:30:00 -08001251 while (!list_empty(&ac->tid_q)) {
1252 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1253 list);
1254 list_del(&tid->list);
1255 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301256
Ben Greear7755bad2011-01-18 17:30:00 -08001257 if (tid->paused)
1258 continue;
Sujithe8324352009-01-16 21:38:42 +05301259
Ben Greear7755bad2011-01-18 17:30:00 -08001260 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301261
Ben Greear7755bad2011-01-18 17:30:00 -08001262 /*
1263 * add tid to round-robin queue if more frames
1264 * are pending for the tid
1265 */
1266 if (!list_empty(&tid->buf_q))
1267 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301268
Ben Greear7755bad2011-01-18 17:30:00 -08001269 if (tid == last_tid ||
1270 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1271 break;
Sujithe8324352009-01-16 21:38:42 +05301272 }
Ben Greear7755bad2011-01-18 17:30:00 -08001273
1274 if (!list_empty(&ac->tid_q)) {
1275 if (!ac->sched) {
1276 ac->sched = true;
1277 list_add_tail(&ac->list, &txq->axq_acq);
1278 }
1279 }
1280
1281 if (ac == last_ac ||
1282 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1283 return;
Sujithe8324352009-01-16 21:38:42 +05301284 }
1285}
1286
Sujithe8324352009-01-16 21:38:42 +05301287/***********/
1288/* TX, DMA */
1289/***********/
1290
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001291/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001292 * Insert a chain of ath_buf (descriptors) on a txq and
1293 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294 */
Sujith102e0572008-10-29 10:15:16 +05301295static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1296 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001297{
Sujithcbe61d82009-02-09 13:27:12 +05301298 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001299 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001300 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301301
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001302 /*
1303 * Insert the frame on the outbound list and
1304 * pass it on to the hardware.
1305 */
1306
1307 if (list_empty(head))
1308 return;
1309
1310 bf = list_first_entry(head, struct ath_buf, list);
1311
Joe Perches226afe62010-12-02 19:12:37 -08001312 ath_dbg(common, ATH_DBG_QUEUE,
1313 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001314
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001315 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1316 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1317 list_splice_tail_init(head, &txq->txq_fifo_pending);
1318 return;
1319 }
1320 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001321 ath_dbg(common, ATH_DBG_XMIT,
1322 "Initializing tx fifo %d which is non-empty\n",
1323 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001324 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1325 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1326 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001327 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001328 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001329 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1330 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001331 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001332 list_splice_tail_init(head, &txq->axq_q);
1333
1334 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001335 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001336 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001337 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1338 txq->axq_qnum, ito64(bf->bf_daddr),
1339 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001340 } else {
1341 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001342 ath_dbg(common, ATH_DBG_XMIT,
1343 "link[%u] (%p)=%llx (%p)\n",
1344 txq->axq_qnum, txq->axq_link,
1345 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001346 }
1347 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1348 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001349 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001350 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001351 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001352 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001353 if (bf_is_ampdu_not_probing(bf))
1354 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001355}
1356
Sujithe8324352009-01-16 21:38:42 +05301357static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001358 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301359{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001360 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001361 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301362
Sujithe8324352009-01-16 21:38:42 +05301363 bf->bf_state.bf_type |= BUF_AMPDU;
1364
1365 /*
1366 * Do not queue to h/w when any of the following conditions is true:
1367 * - there are pending frames in software queue
1368 * - the TID is currently paused for ADDBA/BAR request
1369 * - seqno is not within block-ack window
1370 * - h/w queue depth exceeds low water mark
1371 */
1372 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001373 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001374 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001375 /*
Sujithe8324352009-01-16 21:38:42 +05301376 * Add this frame to software queue for scheduling later
1377 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001378 */
Ben Greearbda8add2011-01-09 23:11:48 -08001379 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001380 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301381 ath_tx_queue_tid(txctl->txq, tid);
1382 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001383 }
1384
Felix Fietkau04caf862010-11-14 15:20:12 +01001385 INIT_LIST_HEAD(&bf_head);
1386 list_add(&bf->list, &bf_head);
1387
Sujithe8324352009-01-16 21:38:42 +05301388 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001389 if (!fi->retries)
1390 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301391
1392 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001393 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301394 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001395 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001396 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301397}
1398
Felix Fietkau82b873a2010-11-11 03:18:37 +01001399static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1400 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001401 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001402{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001403 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301404 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001405
Sujithe8324352009-01-16 21:38:42 +05301406 bf = list_first_entry(bf_head, struct ath_buf, list);
1407 bf->bf_state.bf_type &= ~BUF_AMPDU;
1408
1409 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001410 if (tid)
1411 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301412
Sujithd43f30152009-01-16 21:38:53 +05301413 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001414 fi = get_frame_info(bf->bf_mpdu);
1415 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301416 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301417 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001418}
1419
Sujith528f0c62008-10-29 10:14:26 +05301420static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001421{
Sujith528f0c62008-10-29 10:14:26 +05301422 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001423 enum ath9k_pkt_type htype;
1424 __le16 fc;
1425
Sujith528f0c62008-10-29 10:14:26 +05301426 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001427 fc = hdr->frame_control;
1428
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001429 if (ieee80211_is_beacon(fc))
1430 htype = ATH9K_PKT_TYPE_BEACON;
1431 else if (ieee80211_is_probe_resp(fc))
1432 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1433 else if (ieee80211_is_atim(fc))
1434 htype = ATH9K_PKT_TYPE_ATIM;
1435 else if (ieee80211_is_pspoll(fc))
1436 htype = ATH9K_PKT_TYPE_PSPOLL;
1437 else
1438 htype = ATH9K_PKT_TYPE_NORMAL;
1439
1440 return htype;
1441}
1442
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001443static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1444 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301445{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001446 struct ath_wiphy *aphy = hw->priv;
1447 struct ath_softc *sc = aphy->sc;
Sujith528f0c62008-10-29 10:14:26 +05301448 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001449 struct ieee80211_sta *sta = tx_info->control.sta;
1450 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301451 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001452 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301453 struct ath_node *an;
1454 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001455 enum ath9k_key_type keytype;
1456 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001457 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301458
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001459 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301460
Sujith528f0c62008-10-29 10:14:26 +05301461 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001462 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1463 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001464
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001465 an = (struct ath_node *) sta->drv_priv;
1466 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1467
1468 /*
1469 * Override seqno set by upper layer with the one
1470 * in tx aggregation state.
1471 */
1472 tid = ATH_AN_2_TID(an, tidno);
1473 seqno = tid->seq_next;
1474 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1475 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1476 }
1477
1478 memset(fi, 0, sizeof(*fi));
1479 if (hw_key)
1480 fi->keyix = hw_key->hw_key_idx;
1481 else
1482 fi->keyix = ATH9K_TXKEYIX_INVALID;
1483 fi->keytype = keytype;
1484 fi->framelen = framelen;
1485 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301486}
1487
Felix Fietkau82b873a2010-11-11 03:18:37 +01001488static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301489{
1490 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1491 int flags = 0;
1492
1493 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1494 flags |= ATH9K_TXDESC_INTREQ;
1495
1496 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1497 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301498
Felix Fietkau82b873a2010-11-11 03:18:37 +01001499 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001500 flags |= ATH9K_TXDESC_LDPC;
1501
Sujith528f0c62008-10-29 10:14:26 +05301502 return flags;
1503}
1504
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001505/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001506 * rix - rate index
1507 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1508 * width - 0 for 20 MHz, 1 for 40 MHz
1509 * half_gi - to use 4us v/s 3.6 us for symbol time
1510 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001511static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301512 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001513{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001514 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001515 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301516
1517 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001518 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001519 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001520 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001521 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1522
1523 if (!half_gi)
1524 duration = SYMBOL_TIME(nsymbols);
1525 else
1526 duration = SYMBOL_TIME_HALFGI(nsymbols);
1527
Sujithe63835b2008-11-18 09:07:53 +05301528 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001529 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301530
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001531 return duration;
1532}
1533
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301534u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1535{
1536 struct ath_hw *ah = sc->sc_ah;
1537 struct ath9k_channel *curchan = ah->curchan;
1538 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1539 (curchan->channelFlags & CHANNEL_5GHZ) &&
1540 (chainmask == 0x7) && (rate < 0x90))
1541 return 0x3;
1542 else
1543 return chainmask;
1544}
1545
Felix Fietkau269c44b2010-11-14 15:20:06 +01001546static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001547{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001548 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001549 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301550 struct sk_buff *skb;
1551 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301552 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001553 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301554 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301555 int i, flags = 0;
1556 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301557 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301558
1559 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301560
Sujitha22be222009-03-30 15:28:36 +05301561 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301562 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301563 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301564 hdr = (struct ieee80211_hdr *)skb->data;
1565 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301566
Sujithc89424d2009-01-30 14:29:28 +05301567 /*
1568 * We check if Short Preamble is needed for the CTS rate by
1569 * checking the BSS's global flag.
1570 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1571 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001572 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1573 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301574 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001575 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001576
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001578 bool is_40, is_sgi, is_sp;
1579 int phy;
1580
Sujithe63835b2008-11-18 09:07:53 +05301581 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001582 continue;
1583
Sujitha8efee42008-11-18 09:07:30 +05301584 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301585 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001586
Felix Fietkau27032052010-01-17 21:08:50 +01001587 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1588 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301589 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001590 flags |= ATH9K_TXDESC_RTSENA;
1591 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1592 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1593 flags |= ATH9K_TXDESC_CTSENA;
1594 }
1595
Sujithc89424d2009-01-30 14:29:28 +05301596 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1597 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1598 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1599 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001600
Felix Fietkau545750d2009-11-23 22:21:01 +01001601 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1602 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1603 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1604
1605 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1606 /* MCS rates */
1607 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301608 series[i].ChSel = ath_txchainmask_reduction(sc,
1609 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001610 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001611 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001612 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1613 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001614 continue;
1615 }
1616
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301617 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001618 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1619 !(rate->flags & IEEE80211_RATE_ERP_G))
1620 phy = WLAN_RC_PHY_CCK;
1621 else
1622 phy = WLAN_RC_PHY_OFDM;
1623
1624 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1625 series[i].Rate = rate->hw_value;
1626 if (rate->hw_value_short) {
1627 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1628 series[i].Rate |= rate->hw_value_short;
1629 } else {
1630 is_sp = false;
1631 }
1632
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301633 if (bf->bf_state.bfs_paprd)
1634 series[i].ChSel = common->tx_chainmask;
1635 else
1636 series[i].ChSel = ath_txchainmask_reduction(sc,
1637 common->tx_chainmask, series[i].Rate);
1638
Felix Fietkau545750d2009-11-23 22:21:01 +01001639 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001640 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001641 }
1642
Felix Fietkau27032052010-01-17 21:08:50 +01001643 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001644 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001645 flags &= ~ATH9K_TXDESC_RTSENA;
1646
1647 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1648 if (flags & ATH9K_TXDESC_RTSENA)
1649 flags &= ~ATH9K_TXDESC_CTSENA;
1650
Sujithe63835b2008-11-18 09:07:53 +05301651 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301652 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1653 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301654 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301655 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301656
Sujith17d79042009-02-09 13:27:03 +05301657 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301658 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001659}
1660
Felix Fietkau82b873a2010-11-11 03:18:37 +01001661static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001662 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001663 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301664{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001665 struct ath_wiphy *aphy = hw->priv;
1666 struct ath_softc *sc = aphy->sc;
Felix Fietkau04caf862010-11-14 15:20:12 +01001667 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001668 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001669 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001670 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001671 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001672 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001673
1674 bf = ath_tx_get_buffer(sc);
1675 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001676 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001677 return NULL;
1678 }
Sujithe8324352009-01-16 21:38:42 +05301679
Sujithe8324352009-01-16 21:38:42 +05301680 ATH_TXBUF_RESET(bf);
1681
Felix Fietkau82b873a2010-11-11 03:18:37 +01001682 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301683 bf->bf_mpdu = skb;
1684
Ben Greearc1739eb32010-10-14 12:45:29 -07001685 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1686 skb->len, DMA_TO_DEVICE);
1687 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301688 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001689 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001690 ath_err(ath9k_hw_common(sc->sc_ah),
1691 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001692 ath_tx_return_buffer(sc, bf);
1693 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301694 }
1695
Sujithe8324352009-01-16 21:38:42 +05301696 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301697
1698 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001699 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301700
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001701 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1702 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301703
1704 ath9k_hw_filltxdesc(ah, ds,
1705 skb->len, /* segment length */
1706 true, /* first segment */
1707 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001708 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001709 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001710 txq->axq_qnum);
1711
1712
1713 return bf;
1714}
1715
1716/* FIXME: tx power */
1717static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1718 struct ath_tx_control *txctl)
1719{
1720 struct sk_buff *skb = bf->bf_mpdu;
1721 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1722 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001723 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001724 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001725 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301726
Sujithe8324352009-01-16 21:38:42 +05301727 spin_lock_bh(&txctl->txq->axq_lock);
1728
Felix Fietkau248a38d2010-12-10 21:16:46 +01001729 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001730 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1731 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001732 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001733
Felix Fietkau066dae92010-11-07 14:59:39 +01001734 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001735 }
1736
1737 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001738 /*
1739 * Try aggregation if it's a unicast data frame
1740 * and the destination is HT capable.
1741 */
1742 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301743 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001744 INIT_LIST_HEAD(&bf_head);
1745 list_add_tail(&bf->list, &bf_head);
1746
Felix Fietkau61117f02010-11-11 03:18:36 +01001747 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001748 bf->bf_state.bfs_paprd = txctl->paprd;
1749
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001750 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001751 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1752 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001753
Felix Fietkau248a38d2010-12-10 21:16:46 +01001754 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301755 }
1756
1757 spin_unlock_bh(&txctl->txq->axq_lock);
1758}
1759
1760/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001761int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301762 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001763{
Felix Fietkau28d16702010-11-14 15:20:10 +01001764 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1765 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001766 struct ieee80211_sta *sta = info->control.sta;
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001767 struct ath_wiphy *aphy = hw->priv;
1768 struct ath_softc *sc = aphy->sc;
Felix Fietkau84642d62010-06-01 21:33:13 +02001769 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001770 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001771 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001772 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001773 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001774
Ben Greeara9927ba2010-12-06 21:13:49 -08001775 /* NOTE: sta can be NULL according to net/mac80211.h */
1776 if (sta)
1777 txctl->an = (struct ath_node *)sta->drv_priv;
1778
Felix Fietkau04caf862010-11-14 15:20:12 +01001779 if (info->control.hw_key)
1780 frmlen += info->control.hw_key->icv_len;
1781
Felix Fietkau28d16702010-11-14 15:20:10 +01001782 /*
1783 * As a temporary workaround, assign seq# here; this will likely need
1784 * to be cleaned up to work better with Beacon transmission and virtual
1785 * BSSes.
1786 */
1787 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1788 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1789 sc->tx.seq_no += 0x10;
1790 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1791 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1792 }
1793
1794 /* Add the padding after the header if this is not already done */
1795 padpos = ath9k_cmn_padpos(hdr->frame_control);
1796 padsize = padpos & 3;
1797 if (padsize && skb->len > padpos) {
1798 if (skb_headroom(skb) < padsize)
1799 return -ENOMEM;
1800
1801 skb_push(skb, padsize);
1802 memmove(skb->data, skb->data + padsize, padpos);
1803 }
1804
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001805 setup_frame_info(hw, skb, frmlen);
1806
1807 /*
1808 * At this point, the vif, hw_key and sta pointers in the tx control
1809 * info are no longer valid (overwritten by the ath_frame_info data.
1810 */
1811
1812 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001813 if (unlikely(!bf))
1814 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001815
Felix Fietkau066dae92010-11-07 14:59:39 +01001816 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001817 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001818 if (txq == sc->tx.txq_map[q] &&
1819 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001820 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001821 txq->stopped = 1;
1822 }
1823 spin_unlock_bh(&txq->axq_lock);
1824
Sujithe8324352009-01-16 21:38:42 +05301825 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001826
1827 return 0;
1828}
1829
Sujithe8324352009-01-16 21:38:42 +05301830/*****************/
1831/* TX Completion */
1832/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001833
Sujithe8324352009-01-16 21:38:42 +05301834static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001835 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001836{
Sujithe8324352009-01-16 21:38:42 +05301837 struct ieee80211_hw *hw = sc->hw;
1838 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001839 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001840 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001841 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301842
Joe Perches226afe62010-12-02 19:12:37 -08001843 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301844
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301845 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301846 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301847
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301848 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301849 /* Frame was ACKed */
1850 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1851 }
1852
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001853 padpos = ath9k_cmn_padpos(hdr->frame_control);
1854 padsize = padpos & 3;
1855 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301856 /*
1857 * Remove MAC header padding before giving the frame back to
1858 * mac80211.
1859 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001860 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301861 skb_pull(skb, padsize);
1862 }
1863
Sujith1b04b932010-01-08 10:36:05 +05301864 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1865 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001866 ath_dbg(common, ATH_DBG_PS,
1867 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301868 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1869 PS_WAIT_FOR_CAB |
1870 PS_WAIT_FOR_PSPOLL_DATA |
1871 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001872 }
1873
Felix Fietkau7545daf2011-01-24 19:23:16 +01001874 q = skb_get_queue_mapping(skb);
1875 if (txq == sc->tx.txq_map[q]) {
1876 spin_lock_bh(&txq->axq_lock);
1877 if (WARN_ON(--txq->pending_frames < 0))
1878 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001879
Felix Fietkau7545daf2011-01-24 19:23:16 +01001880 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1881 ieee80211_wake_queue(sc->hw, q);
1882 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001883 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001884 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001885 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001886
1887 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301888}
1889
1890static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001891 struct ath_txq *txq, struct list_head *bf_q,
1892 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301893{
1894 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301895 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301896 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301897
Sujithe8324352009-01-16 21:38:42 +05301898 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301899 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301900
1901 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301902 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301903
1904 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301905 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301906 }
1907
Ben Greearc1739eb32010-10-14 12:45:29 -07001908 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001909 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001910
1911 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001912 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001913 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001914 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001915 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001916 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001917 ath_debug_stat_tx(sc, bf, ts);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001918 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f02010-11-11 03:18:36 +01001919 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001920 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001921 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1922 * accidentally reference it later.
1923 */
1924 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301925
1926 /*
1927 * Return the list of ath_buf of this mpdu to free queue
1928 */
1929 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1930 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1931 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1932}
1933
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001934static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1935 struct ath_tx_status *ts, int nframes, int nbad,
1936 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301937{
Sujitha22be222009-03-30 15:28:36 +05301938 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301939 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301940 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001941 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001942 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301943 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301944
Sujith95e4acb2009-03-13 08:56:09 +05301945 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001946 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301947
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001948 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301949 WARN_ON(tx_rateindex >= hw->max_rates);
1950
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001951 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301952 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001953 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001954 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301955
Felix Fietkaub572d032010-11-14 15:20:07 +01001956 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001957
Felix Fietkaub572d032010-11-14 15:20:07 +01001958 tx_info->status.ampdu_len = nframes;
1959 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001960 }
1961
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001962 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301963 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001964 /*
1965 * If an underrun error is seen assume it as an excessive
1966 * retry only if max frame trigger level has been reached
1967 * (2 KB for single stream, and 4 KB for dual stream).
1968 * Adjust the long retry as if the frame was tried
1969 * hw->max_rate_tries times to affect how rate control updates
1970 * PER for the failed rate.
1971 * In case of congestion on the bus penalizing this type of
1972 * underruns should help hardware actually transmit new frames
1973 * successfully by eventually preferring slower rates.
1974 * This itself should also alleviate congestion on the bus.
1975 */
1976 if (ieee80211_is_data(hdr->frame_control) &&
1977 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1978 ATH9K_TX_DELIM_UNDERRUN)) &&
1979 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1980 tx_info->status.rates[tx_rateindex].count =
1981 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301982 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301983
Felix Fietkau545750d2009-11-23 22:21:01 +01001984 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301985 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001986 tx_info->status.rates[i].idx = -1;
1987 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301988
Felix Fietkau78c46532010-06-25 01:26:16 +02001989 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301990}
1991
Sujithc4288392008-11-18 09:09:30 +05301992static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001993{
Sujithcbe61d82009-02-09 13:27:12 +05301994 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001995 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001996 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1997 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301998 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07001999 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302000 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002001 int status;
2002
Joe Perches226afe62010-12-02 19:12:37 -08002003 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2004 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2005 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002006
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002007 for (;;) {
2008 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002009 if (list_empty(&txq->axq_q)) {
2010 txq->axq_link = NULL;
Ben Greear082f6532011-01-09 23:11:47 -08002011 if (sc->sc_flags & SC_OP_TXAGGR)
2012 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002013 spin_unlock_bh(&txq->axq_lock);
2014 break;
2015 }
2016 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2017
2018 /*
2019 * There is a race condition that a BH gets scheduled
2020 * after sw writes TxE and before hw re-load the last
2021 * descriptor to get the newly chained one.
2022 * Software must keep the last DONE descriptor as a
2023 * holding descriptor - software does so by marking
2024 * it with the STALE flag.
2025 */
2026 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302027 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002028 bf_held = bf;
2029 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302030 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002031 break;
2032 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002033 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302034 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002035 }
2036 }
2037
2038 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302039 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040
Felix Fietkau29bffa92010-03-29 20:14:23 -07002041 memset(&ts, 0, sizeof(ts));
2042 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043 if (status == -EINPROGRESS) {
2044 spin_unlock_bh(&txq->axq_lock);
2045 break;
2046 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002047 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048
2049 /*
2050 * Remove ath_buf's of the same transmit unit from txq,
2051 * however leave the last descriptor back as the holding
2052 * descriptor for hw.
2053 */
Sujitha119cc42009-03-30 15:28:38 +05302054 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002055 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002056 if (!list_is_singular(&lastbf->list))
2057 list_cut_position(&bf_head,
2058 &txq->axq_q, lastbf->list.prev);
2059
2060 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002061 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002062 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002063 if (bf_held)
2064 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002065
2066 if (bf_is_ampdu_not_probing(bf))
2067 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002068 spin_unlock_bh(&txq->axq_lock);
2069
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002070 if (bf_held)
2071 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072
Sujithcd3d39a2008-08-11 14:03:34 +05302073 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002074 /*
2075 * This frame is sent out as a single frame.
2076 * Use hardware retry status for this frame.
2077 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002078 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302079 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002080 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002081 }
Johannes Berge6a98542008-10-21 12:40:02 +02002082
Sujithcd3d39a2008-08-11 14:03:34 +05302083 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002084 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2085 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002087 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002088
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002090
Sujith672840a2008-08-11 14:05:08 +05302091 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092 ath_txq_schedule(sc, txq);
2093 spin_unlock_bh(&txq->axq_lock);
2094 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002095}
2096
Sujith305fe472009-07-23 15:32:29 +05302097static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002098{
2099 struct ath_softc *sc = container_of(work, struct ath_softc,
2100 tx_complete_work.work);
2101 struct ath_txq *txq;
2102 int i;
2103 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002104#ifdef CONFIG_ATH9K_DEBUGFS
2105 sc->tx_complete_poll_work_seen++;
2106#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002107
2108 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2109 if (ATH_TXQ_SETUP(sc, i)) {
2110 txq = &sc->tx.txq[i];
2111 spin_lock_bh(&txq->axq_lock);
2112 if (txq->axq_depth) {
2113 if (txq->axq_tx_inprogress) {
2114 needreset = true;
2115 spin_unlock_bh(&txq->axq_lock);
2116 break;
2117 } else {
2118 txq->axq_tx_inprogress = true;
2119 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08002120 } else {
2121 /* If the queue has pending buffers, then it
2122 * should be doing tx work (and have axq_depth).
2123 * Shouldn't get to this state I think..but
2124 * we do.
2125 */
2126 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2127 (txq->pending_frames > 0 ||
2128 !list_empty(&txq->axq_acq) ||
2129 txq->stopped)) {
2130 ath_err(ath9k_hw_common(sc->sc_ah),
2131 "txq: %p axq_qnum: %u,"
2132 " mac80211_qnum: %i"
2133 " axq_link: %p"
2134 " pending frames: %i"
2135 " axq_acq empty: %i"
2136 " stopped: %i"
2137 " axq_depth: 0 Attempting to"
2138 " restart tx logic.\n",
2139 txq, txq->axq_qnum,
2140 txq->mac80211_qnum,
2141 txq->axq_link,
2142 txq->pending_frames,
2143 list_empty(&txq->axq_acq),
2144 txq->stopped);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002145 ath_txq_schedule(sc, txq);
2146 }
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002147 }
2148 spin_unlock_bh(&txq->axq_lock);
2149 }
2150
2151 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002152 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2153 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302154 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002155 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302156 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002157 }
2158
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002159 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002160 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2161}
2162
2163
Sujithe8324352009-01-16 21:38:42 +05302164
2165void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166{
Sujithe8324352009-01-16 21:38:42 +05302167 int i;
2168 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169
Sujithe8324352009-01-16 21:38:42 +05302170 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171
2172 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302173 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2174 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002175 }
2176}
2177
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002178void ath_tx_edma_tasklet(struct ath_softc *sc)
2179{
2180 struct ath_tx_status txs;
2181 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2182 struct ath_hw *ah = sc->sc_ah;
2183 struct ath_txq *txq;
2184 struct ath_buf *bf, *lastbf;
2185 struct list_head bf_head;
2186 int status;
2187 int txok;
2188
2189 for (;;) {
2190 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2191 if (status == -EINPROGRESS)
2192 break;
2193 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002194 ath_dbg(common, ATH_DBG_XMIT,
2195 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002196 break;
2197 }
2198
2199 /* Skip beacon completions */
2200 if (txs.qid == sc->beacon.beaconq)
2201 continue;
2202
2203 txq = &sc->tx.txq[txs.qid];
2204
2205 spin_lock_bh(&txq->axq_lock);
2206 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2207 spin_unlock_bh(&txq->axq_lock);
2208 return;
2209 }
2210
2211 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2212 struct ath_buf, list);
2213 lastbf = bf->bf_lastbf;
2214
2215 INIT_LIST_HEAD(&bf_head);
2216 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2217 &lastbf->list);
2218 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2219 txq->axq_depth--;
2220 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002221 if (bf_is_ampdu_not_probing(bf))
2222 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002223 spin_unlock_bh(&txq->axq_lock);
2224
2225 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2226
2227 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002228 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2229 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002230 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002231 }
2232
2233 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002234 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2235 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002236 else
2237 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2238 &txs, txok, 0);
2239
2240 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002241
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002242 if (!list_empty(&txq->txq_fifo_pending)) {
2243 INIT_LIST_HEAD(&bf_head);
2244 bf = list_first_entry(&txq->txq_fifo_pending,
2245 struct ath_buf, list);
2246 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2247 &bf->bf_lastbf->list);
2248 ath_tx_txqaddbuf(sc, txq, &bf_head);
2249 } else if (sc->sc_flags & SC_OP_TXAGGR)
2250 ath_txq_schedule(sc, txq);
2251 spin_unlock_bh(&txq->axq_lock);
2252 }
2253}
2254
Sujithe8324352009-01-16 21:38:42 +05302255/*****************/
2256/* Init, Cleanup */
2257/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002258
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002259static int ath_txstatus_setup(struct ath_softc *sc, int size)
2260{
2261 struct ath_descdma *dd = &sc->txsdma;
2262 u8 txs_len = sc->sc_ah->caps.txs_len;
2263
2264 dd->dd_desc_len = size * txs_len;
2265 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2266 &dd->dd_desc_paddr, GFP_KERNEL);
2267 if (!dd->dd_desc)
2268 return -ENOMEM;
2269
2270 return 0;
2271}
2272
2273static int ath_tx_edma_init(struct ath_softc *sc)
2274{
2275 int err;
2276
2277 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2278 if (!err)
2279 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2280 sc->txsdma.dd_desc_paddr,
2281 ATH_TXSTATUS_RING_SIZE);
2282
2283 return err;
2284}
2285
2286static void ath_tx_edma_cleanup(struct ath_softc *sc)
2287{
2288 struct ath_descdma *dd = &sc->txsdma;
2289
2290 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2291 dd->dd_desc_paddr);
2292}
2293
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002294int ath_tx_init(struct ath_softc *sc, int nbufs)
2295{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002296 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002297 int error = 0;
2298
Sujith797fe5cb2009-03-30 15:28:45 +05302299 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002300
Sujith797fe5cb2009-03-30 15:28:45 +05302301 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002302 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302303 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002304 ath_err(common,
2305 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302306 goto err;
2307 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002308
Sujith797fe5cb2009-03-30 15:28:45 +05302309 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002310 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302311 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002312 ath_err(common,
2313 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302314 goto err;
2315 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002317 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2318
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002319 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2320 error = ath_tx_edma_init(sc);
2321 if (error)
2322 goto err;
2323 }
2324
Sujith797fe5cb2009-03-30 15:28:45 +05302325err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002326 if (error != 0)
2327 ath_tx_cleanup(sc);
2328
2329 return error;
2330}
2331
Sujith797fe5cb2009-03-30 15:28:45 +05302332void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002333{
Sujithb77f4832008-12-07 21:44:03 +05302334 if (sc->beacon.bdma.dd_desc_len != 0)
2335 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002336
Sujithb77f4832008-12-07 21:44:03 +05302337 if (sc->tx.txdma.dd_desc_len != 0)
2338 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002339
2340 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2341 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342}
2343
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2345{
Sujithc5170162008-10-29 10:13:59 +05302346 struct ath_atx_tid *tid;
2347 struct ath_atx_ac *ac;
2348 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349
Sujith8ee5afb2008-12-07 21:43:36 +05302350 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302351 tidno < WME_NUM_TID;
2352 tidno++, tid++) {
2353 tid->an = an;
2354 tid->tidno = tidno;
2355 tid->seq_start = tid->seq_next = 0;
2356 tid->baw_size = WME_MAX_BA;
2357 tid->baw_head = tid->baw_tail = 0;
2358 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302359 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302360 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302361 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302362 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302363 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302364 tid->state &= ~AGGR_ADDBA_COMPLETE;
2365 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302366 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002367
Sujith8ee5afb2008-12-07 21:43:36 +05302368 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302369 acno < WME_NUM_AC; acno++, ac++) {
2370 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002371 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302372 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373 }
2374}
2375
Sujithb5aa9bf2008-10-29 10:13:31 +05302376void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002377{
Felix Fietkau2b409942010-07-07 19:42:08 +02002378 struct ath_atx_ac *ac;
2379 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002381 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302382
Felix Fietkau2b409942010-07-07 19:42:08 +02002383 for (tidno = 0, tid = &an->tid[tidno];
2384 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385
Felix Fietkau2b409942010-07-07 19:42:08 +02002386 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002387 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388
Felix Fietkau2b409942010-07-07 19:42:08 +02002389 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002390
Felix Fietkau2b409942010-07-07 19:42:08 +02002391 if (tid->sched) {
2392 list_del(&tid->list);
2393 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002395
2396 if (ac->sched) {
2397 list_del(&ac->list);
2398 tid->ac->sched = false;
2399 }
2400
2401 ath_tid_drain(sc, txq, tid);
2402 tid->state &= ~AGGR_ADDBA_COMPLETE;
2403 tid->state &= ~AGGR_CLEANUP;
2404
2405 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002406 }
2407}