blob: 10a3dbefaa0923b1aa4014c06ed6e5ebb26b30d8 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010058static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020061static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
Sujithe8324352009-01-16 21:38:42 +053063
Felix Fietkau545750d2009-11-23 22:21:01 +010064enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020065 MCS_HT20,
66 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010067 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010083 },
84 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020085 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 }
96};
97
Sujithe8324352009-01-16 21:38:42 +053098/*********************/
99/* Aggregation logic */
100/*********************/
101
Sujithe8324352009-01-16 21:38:42 +0530102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
103{
104 struct ath_atx_ac *ac = tid->ac;
105
106 if (tid->paused)
107 return;
108
109 if (tid->sched)
110 return;
111
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
114
115 if (ac->sched)
116 return;
117
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
121
Sujithe8324352009-01-16 21:38:42 +0530122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123{
Felix Fietkau066dae92010-11-07 14:59:39 +0100124 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530125
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200126 WARN_ON(!tid->paused);
127
Sujithe8324352009-01-16 21:38:42 +0530128 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530130
131 if (list_empty(&tid->buf_q))
132 goto unlock;
133
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
138}
139
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100146}
147
Sujithe8324352009-01-16 21:38:42 +0530148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
149{
Felix Fietkau066dae92010-11-07 14:59:39 +0100150 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530151 struct ath_buf *bf;
152 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200153 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100154 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155
Sujithe8324352009-01-16 21:38:42 +0530156 INIT_LIST_HEAD(&bf_head);
157
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530159 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530160
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530163 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164
Felix Fietkaue1566d12010-11-20 03:08:46 +0100165 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
170 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200172 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100173 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530174 }
175
176 spin_unlock_bh(&txq->axq_lock);
177}
178
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
181{
182 int index, cindex;
183
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200187 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530188
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
193}
194
195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100196 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530197{
198 int index, cindex;
199
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100200 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200202 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530203
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 }
209}
210
211/*
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
216 */
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
219
220{
221 struct ath_buf *bf;
222 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700223 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100224 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700225
226 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530227 INIT_LIST_HEAD(&bf_head);
228
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
Sujithe8324352009-01-16 21:38:42 +0530232
Sujithd43f30152009-01-16 21:38:53 +0530233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530239
240 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530242 spin_lock(&txq->axq_lock);
243 }
244
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
247}
248
Sujithfec247c2009-07-27 12:08:16 +0530249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530251{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100252 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530253 struct ieee80211_hdr *hdr;
254
Sujithfec247c2009-07-27 12:08:16 +0530255 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100256 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100257 return;
Sujithe8324352009-01-16 21:38:42 +0530258
Sujithe8324352009-01-16 21:38:42 +0530259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261}
262
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
264{
265 struct ath_buf *bf = NULL;
266
267 spin_lock_bh(&sc->tx.txbuflock);
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
277 spin_unlock_bh(&sc->tx.txbuflock);
278
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
Sujithd43f30152009-01-16 21:38:53 +0530289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530295 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530296
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100346 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530360
Sujitha22be222009-03-30 15:28:36 +0530361 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530362 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530363
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800364 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800365
Felix Fietkau78c46532010-06-25 01:26:16 +0200366 memcpy(rates, tx_info->control.rates, sizeof(rates));
367
Sujith1286ec62009-01-27 13:30:37 +0530368 rcu_read_lock();
369
Ben Greear686b9cb2010-09-23 09:44:36 -0700370 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530371 if (!sta) {
372 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200373
Felix Fietkau31e79a52010-07-12 23:16:34 +0200374 INIT_LIST_HEAD(&bf_head);
375 while (bf) {
376 bf_next = bf->bf_next;
377
378 bf->bf_state.bf_type |= BUF_XRETRY;
379 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
380 !bf->bf_stale || bf_next != NULL)
381 list_move_tail(&bf->list, &bf_head);
382
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
385 0, 0);
386
387 bf = bf_next;
388 }
Sujith1286ec62009-01-27 13:30:37 +0530389 return;
Sujithe8324352009-01-16 21:38:42 +0530390 }
391
Sujith1286ec62009-01-27 13:30:37 +0530392 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100393 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
394 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530395
Felix Fietkaub11b1602010-07-11 12:48:44 +0200396 /*
397 * The hardware occasionally sends a tx status for the wrong TID.
398 * In this case, the BA status cannot be considered valid and all
399 * subframes need to be retransmitted
400 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100401 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200402 txok = false;
403
Sujithe8324352009-01-16 21:38:42 +0530404 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530405 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530406
Sujithd43f30152009-01-16 21:38:53 +0530407 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700408 if (ts->ts_flags & ATH9K_TX_BA) {
409 seq_st = ts->ts_seqnum;
410 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530411 } else {
Sujithd43f30152009-01-16 21:38:53 +0530412 /*
413 * AR5416 can become deaf/mute when BA
414 * issue happens. Chip needs to be reset.
415 * But AP code may have sychronization issues
416 * when perform internal reset in this routine.
417 * Only enable reset in STA mode for now.
418 */
Sujith2660b812009-02-09 13:27:26 +0530419 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530420 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530421 }
422 }
423
424 INIT_LIST_HEAD(&bf_pending);
425 INIT_LIST_HEAD(&bf_head);
426
Felix Fietkaub572d032010-11-14 15:20:07 +0100427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530428 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100429 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530430 bf_next = bf->bf_next;
431
Felix Fietkau78c46532010-06-25 01:26:16 +0200432 skb = bf->bf_mpdu;
433 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100434 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200435
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100436 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530437 /* transmit completion, subframe is
438 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530439 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530440 } else if (!isaggr && txok) {
441 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100444 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100445 if (fi->retries < ATH_MAX_SW_RETRIES) {
446 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530447 txpending = 1;
448 } else {
449 bf->bf_state.bf_type |= BUF_XRETRY;
450 txfail = 1;
451 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 }
454 } else {
455 /*
456 * cleanup in progress, just fail
457 * the un-acked sub-frames
458 */
459 txfail = 1;
460 }
461 }
462
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400463 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
464 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530465 /*
466 * Make sure the last desc is reclaimed if it
467 * not a holding desc.
468 */
469 if (!bf_last->bf_stale)
470 list_move_tail(&bf->list, &bf_head);
471 else
472 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530473 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700474 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530475 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 }
477
Felix Fietkau90fa5392010-09-20 13:45:38 +0200478 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530479 /*
480 * complete the acked-ones/xretried ones; update
481 * block-ack window
482 */
483 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100484 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530485 spin_unlock_bh(&txq->axq_lock);
486
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200488 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 rc_update = false;
491 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 }
494
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
496 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530497 } else {
Sujithd43f30152009-01-16 21:38:53 +0530498 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
500 if (bf->bf_next == NULL && bf_last->bf_stale) {
501 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530502
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400503 tbf = ath_clone_txbuf(sc, bf_last);
504 /*
505 * Update tx baw and complete the
506 * frame with failed status if we
507 * run out of tx buf.
508 */
509 if (!tbf) {
510 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100511 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400513
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400514 bf->bf_state.bf_type |=
515 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100516 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100517 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400518 ath_tx_complete_buf(sc, bf, txq,
519 &bf_head,
520 ts, 0, 0);
521 break;
522 }
523
524 ath9k_hw_cleartxdesc(sc->sc_ah,
525 tbf->bf_desc);
526 list_add_tail(&tbf->list, &bf_head);
527 } else {
528 /*
529 * Clear descriptor status words for
530 * software retry
531 */
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400534 }
Sujithe8324352009-01-16 21:38:42 +0530535 }
536
537 /*
538 * Put this buffer to the temporary pending
539 * queue to retain ordering
540 */
541 list_splice_tail_init(&bf_head, &bf_pending);
542 }
543
544 bf = bf_next;
545 }
546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 /* prepend un-acked frames to the beginning of the pending frame queue */
548 if (!list_empty(&bf_pending)) {
549 spin_lock_bh(&txq->axq_lock);
550 list_splice(&bf_pending, &tid->buf_q);
551 ath_tx_queue_tid(txq, tid);
552 spin_unlock_bh(&txq->axq_lock);
553 }
554
Sujithe8324352009-01-16 21:38:42 +0530555 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200556 ath_tx_flush_tid(sc, tid);
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530560 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530561 }
Sujithe8324352009-01-16 21:38:42 +0530562 }
563
Sujith1286ec62009-01-27 13:30:37 +0530564 rcu_read_unlock();
565
Sujithe8324352009-01-16 21:38:42 +0530566 if (needreset)
567 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530568}
569
570static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
571 struct ath_atx_tid *tid)
572{
Sujithe8324352009-01-16 21:38:42 +0530573 struct sk_buff *skb;
574 struct ieee80211_tx_info *tx_info;
575 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530576 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530577 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530578 int i;
579
Sujitha22be222009-03-30 15:28:36 +0530580 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530581 tx_info = IEEE80211_SKB_CB(skb);
582 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530583
584 /*
585 * Find the lowest frame length among the rate series that will have a
586 * 4ms transmit duration.
587 * TODO - TXOP limit needs to be considered.
588 */
589 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
590
591 for (i = 0; i < 4; i++) {
592 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100593 int modeidx;
594 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530595 legacy = 1;
596 break;
597 }
598
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200599 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100600 modeidx = MCS_HT40;
601 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 modeidx = MCS_HT20;
603
604 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
605 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100606
607 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530608 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530609 }
610 }
611
612 /*
613 * limit aggregate size by the minimum rate if rate selected is
614 * not a probe rate, if rate selected is a probe rate then
615 * avoid aggregation of this packet.
616 */
617 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
618 return 0;
619
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530620 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
621 aggr_limit = min((max_4ms_framelen * 3) / 8,
622 (u32)ATH_AMPDU_LIMIT_MAX);
623 else
624 aggr_limit = min(max_4ms_framelen,
625 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530626
627 /*
628 * h/w can accept aggregates upto 16 bit lengths (65535).
629 * The IE, however can hold upto 65536, which shows up here
630 * as zero. Ignore 65536 since we are constrained by hw.
631 */
Sujith4ef70842009-07-23 15:32:41 +0530632 if (tid->an->maxampdu)
633 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530634
635 return aggr_limit;
636}
637
638/*
Sujithd43f30152009-01-16 21:38:53 +0530639 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530640 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530641 */
642static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
643 struct ath_buf *bf, u16 frmlen)
644{
Sujithe8324352009-01-16 21:38:42 +0530645 struct sk_buff *skb = bf->bf_mpdu;
646 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530647 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530648 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100649 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200650 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100651 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530652
653 /* Select standard number of delimiters based on frame length alone */
654 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
655
656 /*
657 * If encryption enabled, hardware requires some more padding between
658 * subframes.
659 * TODO - this could be improved to be dependent on the rate.
660 * The hardware can keep up at lower rates, but not higher rates
661 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100662 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530663 ndelim += ATH_AGGR_ENCRYPTDELIM;
664
665 /*
666 * Convert desired mpdu density from microeconds to bytes based
667 * on highest rate in rate series (i.e. first rate) to determine
668 * required minimum length for subframe. Take into account
669 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530670 *
Sujithe8324352009-01-16 21:38:42 +0530671 * If there is no mpdu density restriction, no further calculation
672 * is needed.
673 */
Sujith4ef70842009-07-23 15:32:41 +0530674
675 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530676 return ndelim;
677
678 rix = tx_info->control.rates[0].idx;
679 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530680 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
681 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
682
683 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530684 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530685 else
Sujith4ef70842009-07-23 15:32:41 +0530686 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530687
688 if (nsymbols == 0)
689 nsymbols = 1;
690
Felix Fietkauc6663872010-04-19 19:57:33 +0200691 streams = HT_RC_2_STREAMS(rix);
692 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530693 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
694
Sujithe8324352009-01-16 21:38:42 +0530695 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530696 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
697 ndelim = max(mindelim, ndelim);
698 }
699
700 return ndelim;
701}
702
703static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530704 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530705 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100706 struct list_head *bf_q,
707 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530708{
709#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530710 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
711 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530712 u16 aggr_limit = 0, al = 0, bpad = 0,
713 al_delta, h_baw = tid->baw_size / 2;
714 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200715 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100716 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530717
718 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
719
720 do {
721 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100722 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530723
Sujithd43f30152009-01-16 21:38:53 +0530724 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530726 status = ATH_AGGR_BAW_CLOSED;
727 break;
728 }
729
730 if (!rl) {
731 aggr_limit = ath_lookup_rate(sc, bf, tid);
732 rl = 1;
733 }
734
Sujithd43f30152009-01-16 21:38:53 +0530735 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100736 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530737
Sujithd43f30152009-01-16 21:38:53 +0530738 if (nframes &&
739 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530740 status = ATH_AGGR_LIMITED;
741 break;
742 }
743
Felix Fietkau0299a502010-10-21 02:47:24 +0200744 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
745 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
746 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
747 break;
748
Sujithd43f30152009-01-16 21:38:53 +0530749 /* do not exceed subframe limit */
750 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530751 status = ATH_AGGR_LIMITED;
752 break;
753 }
Sujithd43f30152009-01-16 21:38:53 +0530754 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530755
Sujithd43f30152009-01-16 21:38:53 +0530756 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530757 al += bpad + al_delta;
758
759 /*
760 * Get the delimiters needed to meet the MPDU
761 * density for this node.
762 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100763 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530764 bpad = PADBYTES(al_delta) + (ndelim << 2);
765
766 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400767 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530768
Sujithd43f30152009-01-16 21:38:53 +0530769 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100770 if (!fi->retries)
771 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530772 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
773 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530774 if (bf_prev) {
775 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400776 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
777 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530778 }
779 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530780
Sujithe8324352009-01-16 21:38:42 +0530781 } while (!list_empty(&tid->buf_q));
782
Felix Fietkau269c44b2010-11-14 15:20:06 +0100783 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530784
Sujithe8324352009-01-16 21:38:42 +0530785 return status;
786#undef PADBYTES
787}
788
789static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
790 struct ath_atx_tid *tid)
791{
Sujithd43f30152009-01-16 21:38:53 +0530792 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530793 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100794 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530795 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100796 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530797
798 do {
799 if (list_empty(&tid->buf_q))
800 return;
801
802 INIT_LIST_HEAD(&bf_q);
803
Felix Fietkau269c44b2010-11-14 15:20:06 +0100804 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530805
806 /*
Sujithd43f30152009-01-16 21:38:53 +0530807 * no frames picked up to be aggregated;
808 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530809 */
810 if (list_empty(&bf_q))
811 break;
812
813 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530814 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530815
Sujithd43f30152009-01-16 21:38:53 +0530816 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100817 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100818 fi = get_frame_info(bf->bf_mpdu);
819
Sujithe8324352009-01-16 21:38:42 +0530820 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530821 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100822 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530823 ath_tx_txqaddbuf(sc, txq, &bf_q);
824 continue;
825 }
826
Sujithd43f30152009-01-16 21:38:53 +0530827 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530828 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100829 ath_buf_set_rate(sc, bf, aggr_len);
830 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530831
Sujithd43f30152009-01-16 21:38:53 +0530832 /* anchor last desc of aggregate */
833 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithe8324352009-01-16 21:38:42 +0530835 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530836 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530837
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100838 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530839 status != ATH_AGGR_BAW_CLOSED);
840}
841
Felix Fietkau231c3a12010-09-20 19:35:28 +0200842int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
843 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530844{
845 struct ath_atx_tid *txtid;
846 struct ath_node *an;
847
848 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530849 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200850
851 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
852 return -EAGAIN;
853
Sujithf83da962009-07-23 15:32:37 +0530854 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200855 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700856 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200857
Felix Fietkau2ed72222011-01-10 17:05:49 -0700858 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
859 txtid->baw_head = txtid->baw_tail = 0;
860
Felix Fietkau231c3a12010-09-20 19:35:28 +0200861 return 0;
Sujithe8324352009-01-16 21:38:42 +0530862}
863
Sujithf83da962009-07-23 15:32:37 +0530864void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530865{
866 struct ath_node *an = (struct ath_node *)sta->drv_priv;
867 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100868 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530869
870 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530871 return;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530874 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530875 return;
Sujithe8324352009-01-16 21:38:42 +0530876 }
877
Sujithe8324352009-01-16 21:38:42 +0530878 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200879 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200880
881 /*
882 * If frames are still being transmitted for this TID, they will be
883 * cleaned up during tx completion. To prevent race conditions, this
884 * TID can only be reused after all in-progress subframes have been
885 * completed.
886 */
887 if (txtid->baw_head != txtid->baw_tail)
888 txtid->state |= AGGR_CLEANUP;
889 else
890 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530891 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530892
Felix Fietkau90fa5392010-09-20 13:45:38 +0200893 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530894}
895
896void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
897{
898 struct ath_atx_tid *txtid;
899 struct ath_node *an;
900
901 an = (struct ath_node *)sta->drv_priv;
902
903 if (sc->sc_flags & SC_OP_TXAGGR) {
904 txtid = ATH_AN_2_TID(an, tid);
905 txtid->baw_size =
906 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
907 txtid->state |= AGGR_ADDBA_COMPLETE;
908 txtid->state &= ~AGGR_ADDBA_PROGRESS;
909 ath_tx_resume_tid(sc, txtid);
910 }
911}
912
Sujithe8324352009-01-16 21:38:42 +0530913/********************/
914/* Queue Management */
915/********************/
916
Sujithe8324352009-01-16 21:38:42 +0530917static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
918 struct ath_txq *txq)
919{
920 struct ath_atx_ac *ac, *ac_tmp;
921 struct ath_atx_tid *tid, *tid_tmp;
922
923 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
924 list_del(&ac->list);
925 ac->sched = false;
926 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
927 list_del(&tid->list);
928 tid->sched = false;
929 ath_tid_drain(sc, txq, tid);
930 }
931 }
932}
933
934struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
935{
Sujithcbe61d82009-02-09 13:27:12 +0530936 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700937 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530938 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100939 static const int subtype_txq_to_hwq[] = {
940 [WME_AC_BE] = ATH_TXQ_AC_BE,
941 [WME_AC_BK] = ATH_TXQ_AC_BK,
942 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 };
Ben Greear60f2d1d2011-01-09 23:11:52 -0800945 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530946
947 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100948 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530949 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
950 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
951 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
952 qi.tqi_physCompBuf = 0;
953
954 /*
955 * Enable interrupts only for EOL and DESC conditions.
956 * We mark tx descriptors to receive a DESC interrupt
957 * when a tx queue gets deep; otherwise waiting for the
958 * EOL to reap descriptors. Note that this is done to
959 * reduce interrupt load and this only defers reaping
960 * descriptors, never transmitting frames. Aside from
961 * reducing interrupts this also permits more concurrency.
962 * The only potential downside is if the tx queue backs
963 * up in which case the top half of the kernel may backup
964 * due to a lack of tx descriptors.
965 *
966 * The UAPSD queue is an exception, since we take a desc-
967 * based intr on the EOSP frames.
968 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400969 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
970 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
971 TXQ_FLAG_TXERRINT_ENABLE;
972 } else {
973 if (qtype == ATH9K_TX_QUEUE_UAPSD)
974 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
975 else
976 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
977 TXQ_FLAG_TXDESCINT_ENABLE;
978 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800979 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
980 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +0530981 /*
982 * NB: don't print a message, this happens
983 * normally on parts with too few tx queues
984 */
985 return NULL;
986 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800987 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800988 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -0800989 axq_qnum, ARRAY_SIZE(sc->tx.txq));
990 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +0530991 return NULL;
992 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800993 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
994 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +0530995
Ben Greear60f2d1d2011-01-09 23:11:52 -0800996 txq->axq_qnum = axq_qnum;
997 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +0530998 txq->axq_link = NULL;
999 INIT_LIST_HEAD(&txq->axq_q);
1000 INIT_LIST_HEAD(&txq->axq_acq);
1001 spin_lock_init(&txq->axq_lock);
1002 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001003 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001004 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001005 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001006
1007 txq->txq_headidx = txq->txq_tailidx = 0;
1008 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1009 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1010 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301011 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001012 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301013}
1014
Sujithe8324352009-01-16 21:38:42 +05301015int ath_txq_update(struct ath_softc *sc, int qnum,
1016 struct ath9k_tx_queue_info *qinfo)
1017{
Sujithcbe61d82009-02-09 13:27:12 +05301018 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301019 int error = 0;
1020 struct ath9k_tx_queue_info qi;
1021
1022 if (qnum == sc->beacon.beaconq) {
1023 /*
1024 * XXX: for beacon queue, we just save the parameter.
1025 * It will be picked up by ath_beaconq_config when
1026 * it's necessary.
1027 */
1028 sc->beacon.beacon_qi = *qinfo;
1029 return 0;
1030 }
1031
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001032 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301033
1034 ath9k_hw_get_txq_props(ah, qnum, &qi);
1035 qi.tqi_aifs = qinfo->tqi_aifs;
1036 qi.tqi_cwmin = qinfo->tqi_cwmin;
1037 qi.tqi_cwmax = qinfo->tqi_cwmax;
1038 qi.tqi_burstTime = qinfo->tqi_burstTime;
1039 qi.tqi_readyTime = qinfo->tqi_readyTime;
1040
1041 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001042 ath_err(ath9k_hw_common(sc->sc_ah),
1043 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301044 error = -EIO;
1045 } else {
1046 ath9k_hw_resettxqueue(ah, qnum);
1047 }
1048
1049 return error;
1050}
1051
1052int ath_cabq_update(struct ath_softc *sc)
1053{
1054 struct ath9k_tx_queue_info qi;
1055 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301056
1057 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1058 /*
1059 * Ensure the readytime % is within the bounds.
1060 */
Sujith17d79042009-02-09 13:27:03 +05301061 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1062 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1063 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1064 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301065
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001066 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301067 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301068 ath_txq_update(sc, qnum, &qi);
1069
1070 return 0;
1071}
1072
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001073static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1074{
1075 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1076 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1077}
1078
Sujith043a0402009-01-16 21:38:47 +05301079/*
1080 * Drain a given TX queue (could be Beacon or Data)
1081 *
1082 * This assumes output has been stopped and
1083 * we do not need to block ath_tx_tasklet.
1084 */
1085void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301086{
1087 struct ath_buf *bf, *lastbf;
1088 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001089 struct ath_tx_status ts;
1090
1091 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301092 INIT_LIST_HEAD(&bf_head);
1093
Sujithe8324352009-01-16 21:38:42 +05301094 for (;;) {
1095 spin_lock_bh(&txq->axq_lock);
1096
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001097 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1098 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1099 txq->txq_headidx = txq->txq_tailidx = 0;
1100 spin_unlock_bh(&txq->axq_lock);
1101 break;
1102 } else {
1103 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1104 struct ath_buf, list);
1105 }
1106 } else {
1107 if (list_empty(&txq->axq_q)) {
1108 txq->axq_link = NULL;
1109 spin_unlock_bh(&txq->axq_lock);
1110 break;
1111 }
1112 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1113 list);
Sujithe8324352009-01-16 21:38:42 +05301114
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001115 if (bf->bf_stale) {
1116 list_del(&bf->list);
1117 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301118
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001119 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001120 continue;
1121 }
Sujithe8324352009-01-16 21:38:42 +05301122 }
1123
1124 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301125
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001126 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1127 list_cut_position(&bf_head,
1128 &txq->txq_fifo[txq->txq_tailidx],
1129 &lastbf->list);
1130 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1131 } else {
1132 /* remove ath_buf's of the same mpdu from txq */
1133 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1134 }
1135
Sujithe8324352009-01-16 21:38:42 +05301136 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001137 if (bf_is_ampdu_not_probing(bf))
1138 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301139 spin_unlock_bh(&txq->axq_lock);
1140
1141 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001142 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1143 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301144 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001145 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301146 }
1147
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001148 spin_lock_bh(&txq->axq_lock);
1149 txq->axq_tx_inprogress = false;
1150 spin_unlock_bh(&txq->axq_lock);
1151
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001152 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1153 spin_lock_bh(&txq->axq_lock);
1154 while (!list_empty(&txq->txq_fifo_pending)) {
1155 bf = list_first_entry(&txq->txq_fifo_pending,
1156 struct ath_buf, list);
1157 list_cut_position(&bf_head,
1158 &txq->txq_fifo_pending,
1159 &bf->bf_lastbf->list);
1160 spin_unlock_bh(&txq->axq_lock);
1161
1162 if (bf_isampdu(bf))
1163 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001164 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001165 else
1166 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1167 &ts, 0, 0);
1168 spin_lock_bh(&txq->axq_lock);
1169 }
1170 spin_unlock_bh(&txq->axq_lock);
1171 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001172
1173 /* flush any pending frames if aggregation is enabled */
1174 if (sc->sc_flags & SC_OP_TXAGGR) {
1175 if (!retry_tx) {
1176 spin_lock_bh(&txq->axq_lock);
1177 ath_txq_drain_pending_buffers(sc, txq);
1178 spin_unlock_bh(&txq->axq_lock);
1179 }
1180 }
Sujithe8324352009-01-16 21:38:42 +05301181}
1182
Felix Fietkau080e1a22010-12-05 20:17:53 +01001183bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301184{
Sujithcbe61d82009-02-09 13:27:12 +05301185 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001186 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301187 struct ath_txq *txq;
1188 int i, npend = 0;
1189
1190 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001191 return true;
Sujith043a0402009-01-16 21:38:47 +05301192
1193 /* Stop beacon queue */
1194 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1195
1196 /* Stop data queues */
1197 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1198 if (ATH_TXQ_SETUP(sc, i)) {
1199 txq = &sc->tx.txq[i];
1200 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1201 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1202 }
1203 }
1204
Felix Fietkau080e1a22010-12-05 20:17:53 +01001205 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001206 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301207
1208 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001209 if (!ATH_TXQ_SETUP(sc, i))
1210 continue;
1211
1212 /*
1213 * The caller will resume queues with ieee80211_wake_queues.
1214 * Mark the queue as not stopped to prevent ath_tx_complete
1215 * from waking the queue too early.
1216 */
1217 txq = &sc->tx.txq[i];
1218 txq->stopped = false;
1219 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301220 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001221
1222 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301223}
1224
Sujithe8324352009-01-16 21:38:42 +05301225void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1226{
1227 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1228 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1229}
1230
Ben Greear7755bad2011-01-18 17:30:00 -08001231/* For each axq_acq entry, for each tid, try to schedule packets
1232 * for transmit until ampdu_depth has reached min Q depth.
1233 */
Sujithe8324352009-01-16 21:38:42 +05301234void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1235{
Ben Greear7755bad2011-01-18 17:30:00 -08001236 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1237 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301238
Felix Fietkau21f28e62011-01-15 14:30:14 +01001239 if (list_empty(&txq->axq_acq) ||
1240 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301241 return;
1242
1243 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001244 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301245
Ben Greear7755bad2011-01-18 17:30:00 -08001246 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1247 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1248 list_del(&ac->list);
1249 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301250
Ben Greear7755bad2011-01-18 17:30:00 -08001251 while (!list_empty(&ac->tid_q)) {
1252 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1253 list);
1254 list_del(&tid->list);
1255 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301256
Ben Greear7755bad2011-01-18 17:30:00 -08001257 if (tid->paused)
1258 continue;
Sujithe8324352009-01-16 21:38:42 +05301259
Ben Greear7755bad2011-01-18 17:30:00 -08001260 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301261
Ben Greear7755bad2011-01-18 17:30:00 -08001262 /*
1263 * add tid to round-robin queue if more frames
1264 * are pending for the tid
1265 */
1266 if (!list_empty(&tid->buf_q))
1267 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301268
Ben Greear7755bad2011-01-18 17:30:00 -08001269 if (tid == last_tid ||
1270 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1271 break;
Sujithe8324352009-01-16 21:38:42 +05301272 }
Ben Greear7755bad2011-01-18 17:30:00 -08001273
1274 if (!list_empty(&ac->tid_q)) {
1275 if (!ac->sched) {
1276 ac->sched = true;
1277 list_add_tail(&ac->list, &txq->axq_acq);
1278 }
1279 }
1280
1281 if (ac == last_ac ||
1282 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1283 return;
Sujithe8324352009-01-16 21:38:42 +05301284 }
1285}
1286
Sujithe8324352009-01-16 21:38:42 +05301287/***********/
1288/* TX, DMA */
1289/***********/
1290
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001291/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001292 * Insert a chain of ath_buf (descriptors) on a txq and
1293 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294 */
Sujith102e0572008-10-29 10:15:16 +05301295static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1296 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001297{
Sujithcbe61d82009-02-09 13:27:12 +05301298 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001299 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001300 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301301
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001302 /*
1303 * Insert the frame on the outbound list and
1304 * pass it on to the hardware.
1305 */
1306
1307 if (list_empty(head))
1308 return;
1309
1310 bf = list_first_entry(head, struct ath_buf, list);
1311
Joe Perches226afe62010-12-02 19:12:37 -08001312 ath_dbg(common, ATH_DBG_QUEUE,
1313 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001314
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001315 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1316 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1317 list_splice_tail_init(head, &txq->txq_fifo_pending);
1318 return;
1319 }
1320 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001321 ath_dbg(common, ATH_DBG_XMIT,
1322 "Initializing tx fifo %d which is non-empty\n",
1323 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001324 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1325 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1326 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001327 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001328 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001329 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1330 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001331 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001332 list_splice_tail_init(head, &txq->axq_q);
1333
1334 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001335 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001336 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001337 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1338 txq->axq_qnum, ito64(bf->bf_daddr),
1339 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001340 } else {
1341 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001342 ath_dbg(common, ATH_DBG_XMIT,
1343 "link[%u] (%p)=%llx (%p)\n",
1344 txq->axq_qnum, txq->axq_link,
1345 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001346 }
1347 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1348 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001349 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001350 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001351 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001352 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001353 if (bf_is_ampdu_not_probing(bf))
1354 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001355}
1356
Sujithe8324352009-01-16 21:38:42 +05301357static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001358 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301359{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001360 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001361 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301362
Sujithe8324352009-01-16 21:38:42 +05301363 bf->bf_state.bf_type |= BUF_AMPDU;
1364
1365 /*
1366 * Do not queue to h/w when any of the following conditions is true:
1367 * - there are pending frames in software queue
1368 * - the TID is currently paused for ADDBA/BAR request
1369 * - seqno is not within block-ack window
1370 * - h/w queue depth exceeds low water mark
1371 */
1372 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001373 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001374 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001375 /*
Sujithe8324352009-01-16 21:38:42 +05301376 * Add this frame to software queue for scheduling later
1377 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001378 */
Ben Greearbda8add2011-01-09 23:11:48 -08001379 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001380 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301381 ath_tx_queue_tid(txctl->txq, tid);
1382 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001383 }
1384
Felix Fietkau04caf862010-11-14 15:20:12 +01001385 INIT_LIST_HEAD(&bf_head);
1386 list_add(&bf->list, &bf_head);
1387
Sujithe8324352009-01-16 21:38:42 +05301388 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001389 if (!fi->retries)
1390 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301391
1392 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001393 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301394 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001395 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001396 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301397}
1398
Felix Fietkau82b873a2010-11-11 03:18:37 +01001399static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1400 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001401 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001402{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001403 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301404 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001405
Sujithe8324352009-01-16 21:38:42 +05301406 bf = list_first_entry(bf_head, struct ath_buf, list);
1407 bf->bf_state.bf_type &= ~BUF_AMPDU;
1408
1409 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001410 if (tid)
1411 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301412
Sujithd43f30152009-01-16 21:38:53 +05301413 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001414 fi = get_frame_info(bf->bf_mpdu);
1415 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301416 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301417 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001418}
1419
Sujith528f0c62008-10-29 10:14:26 +05301420static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001421{
Sujith528f0c62008-10-29 10:14:26 +05301422 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001423 enum ath9k_pkt_type htype;
1424 __le16 fc;
1425
Sujith528f0c62008-10-29 10:14:26 +05301426 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001427 fc = hdr->frame_control;
1428
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001429 if (ieee80211_is_beacon(fc))
1430 htype = ATH9K_PKT_TYPE_BEACON;
1431 else if (ieee80211_is_probe_resp(fc))
1432 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1433 else if (ieee80211_is_atim(fc))
1434 htype = ATH9K_PKT_TYPE_ATIM;
1435 else if (ieee80211_is_pspoll(fc))
1436 htype = ATH9K_PKT_TYPE_PSPOLL;
1437 else
1438 htype = ATH9K_PKT_TYPE_NORMAL;
1439
1440 return htype;
1441}
1442
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001443static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1444 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301445{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001446 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301447 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001448 struct ieee80211_sta *sta = tx_info->control.sta;
1449 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301450 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001451 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301452 struct ath_node *an;
1453 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001454 enum ath9k_key_type keytype;
1455 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001456 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301457
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001458 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301459
Sujith528f0c62008-10-29 10:14:26 +05301460 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001461 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1462 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001463
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001464 an = (struct ath_node *) sta->drv_priv;
1465 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1466
1467 /*
1468 * Override seqno set by upper layer with the one
1469 * in tx aggregation state.
1470 */
1471 tid = ATH_AN_2_TID(an, tidno);
1472 seqno = tid->seq_next;
1473 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1474 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1475 }
1476
1477 memset(fi, 0, sizeof(*fi));
1478 if (hw_key)
1479 fi->keyix = hw_key->hw_key_idx;
1480 else
1481 fi->keyix = ATH9K_TXKEYIX_INVALID;
1482 fi->keytype = keytype;
1483 fi->framelen = framelen;
1484 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301485}
1486
Felix Fietkau82b873a2010-11-11 03:18:37 +01001487static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301488{
1489 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1490 int flags = 0;
1491
1492 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1493 flags |= ATH9K_TXDESC_INTREQ;
1494
1495 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1496 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301497
Felix Fietkau82b873a2010-11-11 03:18:37 +01001498 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001499 flags |= ATH9K_TXDESC_LDPC;
1500
Sujith528f0c62008-10-29 10:14:26 +05301501 return flags;
1502}
1503
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001505 * rix - rate index
1506 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1507 * width - 0 for 20 MHz, 1 for 40 MHz
1508 * half_gi - to use 4us v/s 3.6 us for symbol time
1509 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001510static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301511 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001512{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001513 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001514 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301515
1516 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001517 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001518 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001519 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001520 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1521
1522 if (!half_gi)
1523 duration = SYMBOL_TIME(nsymbols);
1524 else
1525 duration = SYMBOL_TIME_HALFGI(nsymbols);
1526
Sujithe63835b2008-11-18 09:07:53 +05301527 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001528 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301529
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001530 return duration;
1531}
1532
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301533u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1534{
1535 struct ath_hw *ah = sc->sc_ah;
1536 struct ath9k_channel *curchan = ah->curchan;
1537 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1538 (curchan->channelFlags & CHANNEL_5GHZ) &&
1539 (chainmask == 0x7) && (rate < 0x90))
1540 return 0x3;
1541 else
1542 return chainmask;
1543}
1544
Felix Fietkau269c44b2010-11-14 15:20:06 +01001545static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001546{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001547 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001548 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301549 struct sk_buff *skb;
1550 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301551 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001552 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301553 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301554 int i, flags = 0;
1555 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301556 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301557
1558 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301559
Sujitha22be222009-03-30 15:28:36 +05301560 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301561 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301562 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301563 hdr = (struct ieee80211_hdr *)skb->data;
1564 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301565
Sujithc89424d2009-01-30 14:29:28 +05301566 /*
1567 * We check if Short Preamble is needed for the CTS rate by
1568 * checking the BSS's global flag.
1569 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1570 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001571 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1572 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301573 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001574 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001575
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001576 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001577 bool is_40, is_sgi, is_sp;
1578 int phy;
1579
Sujithe63835b2008-11-18 09:07:53 +05301580 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001581 continue;
1582
Sujitha8efee42008-11-18 09:07:30 +05301583 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301584 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001585
Felix Fietkau27032052010-01-17 21:08:50 +01001586 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1587 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301588 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001589 flags |= ATH9K_TXDESC_RTSENA;
1590 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1591 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1592 flags |= ATH9K_TXDESC_CTSENA;
1593 }
1594
Sujithc89424d2009-01-30 14:29:28 +05301595 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1596 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1597 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1598 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001599
Felix Fietkau545750d2009-11-23 22:21:01 +01001600 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1601 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1602 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1603
1604 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1605 /* MCS rates */
1606 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301607 series[i].ChSel = ath_txchainmask_reduction(sc,
1608 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001609 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001610 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001611 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1612 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001613 continue;
1614 }
1615
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301616 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001617 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1618 !(rate->flags & IEEE80211_RATE_ERP_G))
1619 phy = WLAN_RC_PHY_CCK;
1620 else
1621 phy = WLAN_RC_PHY_OFDM;
1622
1623 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1624 series[i].Rate = rate->hw_value;
1625 if (rate->hw_value_short) {
1626 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1627 series[i].Rate |= rate->hw_value_short;
1628 } else {
1629 is_sp = false;
1630 }
1631
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301632 if (bf->bf_state.bfs_paprd)
1633 series[i].ChSel = common->tx_chainmask;
1634 else
1635 series[i].ChSel = ath_txchainmask_reduction(sc,
1636 common->tx_chainmask, series[i].Rate);
1637
Felix Fietkau545750d2009-11-23 22:21:01 +01001638 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001639 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001640 }
1641
Felix Fietkau27032052010-01-17 21:08:50 +01001642 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001643 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001644 flags &= ~ATH9K_TXDESC_RTSENA;
1645
1646 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1647 if (flags & ATH9K_TXDESC_RTSENA)
1648 flags &= ~ATH9K_TXDESC_CTSENA;
1649
Sujithe63835b2008-11-18 09:07:53 +05301650 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301651 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1652 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301653 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301654 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301655
Sujith17d79042009-02-09 13:27:03 +05301656 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301657 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001658}
1659
Felix Fietkau82b873a2010-11-11 03:18:37 +01001660static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001661 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001662 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301663{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001664 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001665 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001666 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001667 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001668 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001669 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001670 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001671
1672 bf = ath_tx_get_buffer(sc);
1673 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001674 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001675 return NULL;
1676 }
Sujithe8324352009-01-16 21:38:42 +05301677
Sujithe8324352009-01-16 21:38:42 +05301678 ATH_TXBUF_RESET(bf);
1679
Felix Fietkau82b873a2010-11-11 03:18:37 +01001680 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301681 bf->bf_mpdu = skb;
1682
Ben Greearc1739eb32010-10-14 12:45:29 -07001683 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1684 skb->len, DMA_TO_DEVICE);
1685 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301686 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001687 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001688 ath_err(ath9k_hw_common(sc->sc_ah),
1689 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001690 ath_tx_return_buffer(sc, bf);
1691 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301692 }
1693
Sujithe8324352009-01-16 21:38:42 +05301694 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301695
1696 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001697 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301698
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001699 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1700 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301701
1702 ath9k_hw_filltxdesc(ah, ds,
1703 skb->len, /* segment length */
1704 true, /* first segment */
1705 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001706 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001707 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001708 txq->axq_qnum);
1709
1710
1711 return bf;
1712}
1713
1714/* FIXME: tx power */
1715static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1716 struct ath_tx_control *txctl)
1717{
1718 struct sk_buff *skb = bf->bf_mpdu;
1719 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1720 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001721 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001722 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001723 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301724
Sujithe8324352009-01-16 21:38:42 +05301725 spin_lock_bh(&txctl->txq->axq_lock);
1726
Felix Fietkau248a38d2010-12-10 21:16:46 +01001727 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001728 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1729 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001730 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001731
Felix Fietkau066dae92010-11-07 14:59:39 +01001732 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001733 }
1734
1735 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001736 /*
1737 * Try aggregation if it's a unicast data frame
1738 * and the destination is HT capable.
1739 */
1740 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301741 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001742 INIT_LIST_HEAD(&bf_head);
1743 list_add_tail(&bf->list, &bf_head);
1744
Felix Fietkau61117f02010-11-11 03:18:36 +01001745 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001746 bf->bf_state.bfs_paprd = txctl->paprd;
1747
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001748 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001749 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1750 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001751
Felix Fietkau248a38d2010-12-10 21:16:46 +01001752 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301753 }
1754
1755 spin_unlock_bh(&txctl->txq->axq_lock);
1756}
1757
1758/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001759int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301760 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001761{
Felix Fietkau28d16702010-11-14 15:20:10 +01001762 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1763 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001764 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001765 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001766 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001767 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001768 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001769 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001770 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001771
Ben Greeara9927ba2010-12-06 21:13:49 -08001772 /* NOTE: sta can be NULL according to net/mac80211.h */
1773 if (sta)
1774 txctl->an = (struct ath_node *)sta->drv_priv;
1775
Felix Fietkau04caf862010-11-14 15:20:12 +01001776 if (info->control.hw_key)
1777 frmlen += info->control.hw_key->icv_len;
1778
Felix Fietkau28d16702010-11-14 15:20:10 +01001779 /*
1780 * As a temporary workaround, assign seq# here; this will likely need
1781 * to be cleaned up to work better with Beacon transmission and virtual
1782 * BSSes.
1783 */
1784 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1785 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1786 sc->tx.seq_no += 0x10;
1787 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1788 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1789 }
1790
1791 /* Add the padding after the header if this is not already done */
1792 padpos = ath9k_cmn_padpos(hdr->frame_control);
1793 padsize = padpos & 3;
1794 if (padsize && skb->len > padpos) {
1795 if (skb_headroom(skb) < padsize)
1796 return -ENOMEM;
1797
1798 skb_push(skb, padsize);
1799 memmove(skb->data, skb->data + padsize, padpos);
1800 }
1801
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001802 setup_frame_info(hw, skb, frmlen);
1803
1804 /*
1805 * At this point, the vif, hw_key and sta pointers in the tx control
1806 * info are no longer valid (overwritten by the ath_frame_info data.
1807 */
1808
1809 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001810 if (unlikely(!bf))
1811 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001812
Felix Fietkau066dae92010-11-07 14:59:39 +01001813 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001814 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001815 if (txq == sc->tx.txq_map[q] &&
1816 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001817 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001818 txq->stopped = 1;
1819 }
1820 spin_unlock_bh(&txq->axq_lock);
1821
Sujithe8324352009-01-16 21:38:42 +05301822 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001823
1824 return 0;
1825}
1826
Sujithe8324352009-01-16 21:38:42 +05301827/*****************/
1828/* TX Completion */
1829/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001830
Sujithe8324352009-01-16 21:38:42 +05301831static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001832 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001833{
Sujithe8324352009-01-16 21:38:42 +05301834 struct ieee80211_hw *hw = sc->hw;
1835 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001836 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001837 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001838 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301839
Joe Perches226afe62010-12-02 19:12:37 -08001840 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301841
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301842 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301843 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301844
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301845 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301846 /* Frame was ACKed */
1847 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1848 }
1849
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001850 padpos = ath9k_cmn_padpos(hdr->frame_control);
1851 padsize = padpos & 3;
1852 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301853 /*
1854 * Remove MAC header padding before giving the frame back to
1855 * mac80211.
1856 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001857 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301858 skb_pull(skb, padsize);
1859 }
1860
Sujith1b04b932010-01-08 10:36:05 +05301861 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1862 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001863 ath_dbg(common, ATH_DBG_PS,
1864 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301865 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1866 PS_WAIT_FOR_CAB |
1867 PS_WAIT_FOR_PSPOLL_DATA |
1868 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001869 }
1870
Felix Fietkau7545daf2011-01-24 19:23:16 +01001871 q = skb_get_queue_mapping(skb);
1872 if (txq == sc->tx.txq_map[q]) {
1873 spin_lock_bh(&txq->axq_lock);
1874 if (WARN_ON(--txq->pending_frames < 0))
1875 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001876
Felix Fietkau7545daf2011-01-24 19:23:16 +01001877 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1878 ieee80211_wake_queue(sc->hw, q);
1879 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001880 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001881 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001882 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001883
1884 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301885}
1886
1887static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001888 struct ath_txq *txq, struct list_head *bf_q,
1889 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301890{
1891 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301892 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301893 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301894
Sujithe8324352009-01-16 21:38:42 +05301895 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301896 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301897
1898 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301899 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301900
1901 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301902 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301903 }
1904
Ben Greearc1739eb32010-10-14 12:45:29 -07001905 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001906 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001907
1908 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001909 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001910 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001911 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001912 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001913 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001914 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001915 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f02010-11-11 03:18:36 +01001916 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001917 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001918 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1919 * accidentally reference it later.
1920 */
1921 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301922
1923 /*
1924 * Return the list of ath_buf of this mpdu to free queue
1925 */
1926 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1927 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1928 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1929}
1930
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001931static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1932 struct ath_tx_status *ts, int nframes, int nbad,
1933 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301934{
Sujitha22be222009-03-30 15:28:36 +05301935 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301937 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001938 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001939 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301940 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301941
Sujith95e4acb2009-03-13 08:56:09 +05301942 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001943 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301944
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001945 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301946 WARN_ON(tx_rateindex >= hw->max_rates);
1947
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001948 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301949 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001950 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001951 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301952
Felix Fietkaub572d032010-11-14 15:20:07 +01001953 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001954
Felix Fietkaub572d032010-11-14 15:20:07 +01001955 tx_info->status.ampdu_len = nframes;
1956 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001957 }
1958
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001959 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301960 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001961 /*
1962 * If an underrun error is seen assume it as an excessive
1963 * retry only if max frame trigger level has been reached
1964 * (2 KB for single stream, and 4 KB for dual stream).
1965 * Adjust the long retry as if the frame was tried
1966 * hw->max_rate_tries times to affect how rate control updates
1967 * PER for the failed rate.
1968 * In case of congestion on the bus penalizing this type of
1969 * underruns should help hardware actually transmit new frames
1970 * successfully by eventually preferring slower rates.
1971 * This itself should also alleviate congestion on the bus.
1972 */
1973 if (ieee80211_is_data(hdr->frame_control) &&
1974 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1975 ATH9K_TX_DELIM_UNDERRUN)) &&
1976 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1977 tx_info->status.rates[tx_rateindex].count =
1978 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301979 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301980
Felix Fietkau545750d2009-11-23 22:21:01 +01001981 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301982 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001983 tx_info->status.rates[i].idx = -1;
1984 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301985
Felix Fietkau78c46532010-06-25 01:26:16 +02001986 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301987}
1988
Sujithc4288392008-11-18 09:09:30 +05301989static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001990{
Sujithcbe61d82009-02-09 13:27:12 +05301991 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001992 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001993 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1994 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301995 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07001996 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05301997 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001998 int status;
1999
Joe Perches226afe62010-12-02 19:12:37 -08002000 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2001 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2002 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002003
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002004 for (;;) {
2005 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002006 if (list_empty(&txq->axq_q)) {
2007 txq->axq_link = NULL;
Ben Greear082f6532011-01-09 23:11:47 -08002008 if (sc->sc_flags & SC_OP_TXAGGR)
2009 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002010 spin_unlock_bh(&txq->axq_lock);
2011 break;
2012 }
2013 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2014
2015 /*
2016 * There is a race condition that a BH gets scheduled
2017 * after sw writes TxE and before hw re-load the last
2018 * descriptor to get the newly chained one.
2019 * Software must keep the last DONE descriptor as a
2020 * holding descriptor - software does so by marking
2021 * it with the STALE flag.
2022 */
2023 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302024 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002025 bf_held = bf;
2026 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302027 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002028 break;
2029 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002030 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302031 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002032 }
2033 }
2034
2035 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302036 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002037
Felix Fietkau29bffa92010-03-29 20:14:23 -07002038 memset(&ts, 0, sizeof(ts));
2039 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040 if (status == -EINPROGRESS) {
2041 spin_unlock_bh(&txq->axq_lock);
2042 break;
2043 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002044 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002045
2046 /*
2047 * Remove ath_buf's of the same transmit unit from txq,
2048 * however leave the last descriptor back as the holding
2049 * descriptor for hw.
2050 */
Sujitha119cc42009-03-30 15:28:38 +05302051 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002052 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002053 if (!list_is_singular(&lastbf->list))
2054 list_cut_position(&bf_head,
2055 &txq->axq_q, lastbf->list.prev);
2056
2057 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002058 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002059 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002060 if (bf_held)
2061 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002062
2063 if (bf_is_ampdu_not_probing(bf))
2064 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002065 spin_unlock_bh(&txq->axq_lock);
2066
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002067 if (bf_held)
2068 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002069
Sujithcd3d39a2008-08-11 14:03:34 +05302070 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002071 /*
2072 * This frame is sent out as a single frame.
2073 * Use hardware retry status for this frame.
2074 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002075 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302076 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002077 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078 }
Johannes Berge6a98542008-10-21 12:40:02 +02002079
Sujithcd3d39a2008-08-11 14:03:34 +05302080 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002081 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2082 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002083 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002084 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002087
Sujith672840a2008-08-11 14:05:08 +05302088 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 ath_txq_schedule(sc, txq);
2090 spin_unlock_bh(&txq->axq_lock);
2091 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092}
2093
Vivek Natarajan181fb182011-01-27 14:45:08 +05302094static void ath_hw_pll_work(struct work_struct *work)
2095{
2096 struct ath_softc *sc = container_of(work, struct ath_softc,
2097 hw_pll_work.work);
2098 static int count;
2099
2100 if (AR_SREV_9485(sc->sc_ah)) {
2101 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2102 count++;
2103
2104 if (count == 3) {
2105 /* Rx is hung for more than 500ms. Reset it */
2106 ath_reset(sc, true);
2107 count = 0;
2108 }
2109 } else
2110 count = 0;
2111
2112 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2113 }
2114}
2115
Sujith305fe472009-07-23 15:32:29 +05302116static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002117{
2118 struct ath_softc *sc = container_of(work, struct ath_softc,
2119 tx_complete_work.work);
2120 struct ath_txq *txq;
2121 int i;
2122 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002123#ifdef CONFIG_ATH9K_DEBUGFS
2124 sc->tx_complete_poll_work_seen++;
2125#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002126
2127 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2128 if (ATH_TXQ_SETUP(sc, i)) {
2129 txq = &sc->tx.txq[i];
2130 spin_lock_bh(&txq->axq_lock);
2131 if (txq->axq_depth) {
2132 if (txq->axq_tx_inprogress) {
2133 needreset = true;
2134 spin_unlock_bh(&txq->axq_lock);
2135 break;
2136 } else {
2137 txq->axq_tx_inprogress = true;
2138 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08002139 } else {
2140 /* If the queue has pending buffers, then it
2141 * should be doing tx work (and have axq_depth).
2142 * Shouldn't get to this state I think..but
2143 * we do.
2144 */
2145 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2146 (txq->pending_frames > 0 ||
2147 !list_empty(&txq->axq_acq) ||
2148 txq->stopped)) {
2149 ath_err(ath9k_hw_common(sc->sc_ah),
2150 "txq: %p axq_qnum: %u,"
2151 " mac80211_qnum: %i"
2152 " axq_link: %p"
2153 " pending frames: %i"
2154 " axq_acq empty: %i"
2155 " stopped: %i"
2156 " axq_depth: 0 Attempting to"
2157 " restart tx logic.\n",
2158 txq, txq->axq_qnum,
2159 txq->mac80211_qnum,
2160 txq->axq_link,
2161 txq->pending_frames,
2162 list_empty(&txq->axq_acq),
2163 txq->stopped);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002164 ath_txq_schedule(sc, txq);
2165 }
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002166 }
2167 spin_unlock_bh(&txq->axq_lock);
2168 }
2169
2170 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002171 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2172 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302173 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002174 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302175 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002176 }
2177
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002178 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002179 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2180}
2181
2182
Sujithe8324352009-01-16 21:38:42 +05302183
2184void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002185{
Sujithe8324352009-01-16 21:38:42 +05302186 int i;
2187 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002188
Sujithe8324352009-01-16 21:38:42 +05302189 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002190
2191 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302192 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2193 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002194 }
2195}
2196
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002197void ath_tx_edma_tasklet(struct ath_softc *sc)
2198{
2199 struct ath_tx_status txs;
2200 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2201 struct ath_hw *ah = sc->sc_ah;
2202 struct ath_txq *txq;
2203 struct ath_buf *bf, *lastbf;
2204 struct list_head bf_head;
2205 int status;
2206 int txok;
2207
2208 for (;;) {
2209 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2210 if (status == -EINPROGRESS)
2211 break;
2212 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002213 ath_dbg(common, ATH_DBG_XMIT,
2214 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002215 break;
2216 }
2217
2218 /* Skip beacon completions */
2219 if (txs.qid == sc->beacon.beaconq)
2220 continue;
2221
2222 txq = &sc->tx.txq[txs.qid];
2223
2224 spin_lock_bh(&txq->axq_lock);
2225 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2226 spin_unlock_bh(&txq->axq_lock);
2227 return;
2228 }
2229
2230 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2231 struct ath_buf, list);
2232 lastbf = bf->bf_lastbf;
2233
2234 INIT_LIST_HEAD(&bf_head);
2235 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2236 &lastbf->list);
2237 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2238 txq->axq_depth--;
2239 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002240 if (bf_is_ampdu_not_probing(bf))
2241 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002242 spin_unlock_bh(&txq->axq_lock);
2243
2244 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2245
2246 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002247 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2248 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002249 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002250 }
2251
2252 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002253 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2254 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002255 else
2256 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2257 &txs, txok, 0);
2258
2259 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002260
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261 if (!list_empty(&txq->txq_fifo_pending)) {
2262 INIT_LIST_HEAD(&bf_head);
2263 bf = list_first_entry(&txq->txq_fifo_pending,
2264 struct ath_buf, list);
2265 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2266 &bf->bf_lastbf->list);
2267 ath_tx_txqaddbuf(sc, txq, &bf_head);
2268 } else if (sc->sc_flags & SC_OP_TXAGGR)
2269 ath_txq_schedule(sc, txq);
2270 spin_unlock_bh(&txq->axq_lock);
2271 }
2272}
2273
Sujithe8324352009-01-16 21:38:42 +05302274/*****************/
2275/* Init, Cleanup */
2276/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002277
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002278static int ath_txstatus_setup(struct ath_softc *sc, int size)
2279{
2280 struct ath_descdma *dd = &sc->txsdma;
2281 u8 txs_len = sc->sc_ah->caps.txs_len;
2282
2283 dd->dd_desc_len = size * txs_len;
2284 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2285 &dd->dd_desc_paddr, GFP_KERNEL);
2286 if (!dd->dd_desc)
2287 return -ENOMEM;
2288
2289 return 0;
2290}
2291
2292static int ath_tx_edma_init(struct ath_softc *sc)
2293{
2294 int err;
2295
2296 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2297 if (!err)
2298 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2299 sc->txsdma.dd_desc_paddr,
2300 ATH_TXSTATUS_RING_SIZE);
2301
2302 return err;
2303}
2304
2305static void ath_tx_edma_cleanup(struct ath_softc *sc)
2306{
2307 struct ath_descdma *dd = &sc->txsdma;
2308
2309 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2310 dd->dd_desc_paddr);
2311}
2312
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002313int ath_tx_init(struct ath_softc *sc, int nbufs)
2314{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002315 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316 int error = 0;
2317
Sujith797fe5cb2009-03-30 15:28:45 +05302318 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002319
Sujith797fe5cb2009-03-30 15:28:45 +05302320 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002321 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302322 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002323 ath_err(common,
2324 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302325 goto err;
2326 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002327
Sujith797fe5cb2009-03-30 15:28:45 +05302328 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002329 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302330 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002331 ath_err(common,
2332 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302333 goto err;
2334 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002335
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002336 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
Vivek Natarajan181fb182011-01-27 14:45:08 +05302337 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002338
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002339 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2340 error = ath_tx_edma_init(sc);
2341 if (error)
2342 goto err;
2343 }
2344
Sujith797fe5cb2009-03-30 15:28:45 +05302345err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002346 if (error != 0)
2347 ath_tx_cleanup(sc);
2348
2349 return error;
2350}
2351
Sujith797fe5cb2009-03-30 15:28:45 +05302352void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002353{
Sujithb77f4832008-12-07 21:44:03 +05302354 if (sc->beacon.bdma.dd_desc_len != 0)
2355 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002356
Sujithb77f4832008-12-07 21:44:03 +05302357 if (sc->tx.txdma.dd_desc_len != 0)
2358 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002359
2360 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2361 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362}
2363
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002364void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2365{
Sujithc5170162008-10-29 10:13:59 +05302366 struct ath_atx_tid *tid;
2367 struct ath_atx_ac *ac;
2368 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002369
Sujith8ee5afb2008-12-07 21:43:36 +05302370 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302371 tidno < WME_NUM_TID;
2372 tidno++, tid++) {
2373 tid->an = an;
2374 tid->tidno = tidno;
2375 tid->seq_start = tid->seq_next = 0;
2376 tid->baw_size = WME_MAX_BA;
2377 tid->baw_head = tid->baw_tail = 0;
2378 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302379 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302380 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302381 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302382 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302383 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302384 tid->state &= ~AGGR_ADDBA_COMPLETE;
2385 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302386 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387
Sujith8ee5afb2008-12-07 21:43:36 +05302388 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302389 acno < WME_NUM_AC; acno++, ac++) {
2390 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002391 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302392 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393 }
2394}
2395
Sujithb5aa9bf2008-10-29 10:13:31 +05302396void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397{
Felix Fietkau2b409942010-07-07 19:42:08 +02002398 struct ath_atx_ac *ac;
2399 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002400 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002401 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302402
Felix Fietkau2b409942010-07-07 19:42:08 +02002403 for (tidno = 0, tid = &an->tid[tidno];
2404 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405
Felix Fietkau2b409942010-07-07 19:42:08 +02002406 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002407 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002408
Felix Fietkau2b409942010-07-07 19:42:08 +02002409 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410
Felix Fietkau2b409942010-07-07 19:42:08 +02002411 if (tid->sched) {
2412 list_del(&tid->list);
2413 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002414 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002415
2416 if (ac->sched) {
2417 list_del(&ac->list);
2418 tid->ac->sched = false;
2419 }
2420
2421 ath_tid_drain(sc, txq, tid);
2422 tid->state &= ~AGGR_ADDBA_COMPLETE;
2423 tid->state &= ~AGGR_CLEANUP;
2424
2425 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002426 }
2427}