blob: bb1d29e90eb1e2cf725177505f72e11dd8a1901d [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010058static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020061static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
Sujithe8324352009-01-16 21:38:42 +053063
Felix Fietkau545750d2009-11-23 22:21:01 +010064enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020065 MCS_HT20,
66 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010067 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010083 },
84 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020085 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 }
96};
97
Sujithe8324352009-01-16 21:38:42 +053098/*********************/
99/* Aggregation logic */
100/*********************/
101
Sujithe8324352009-01-16 21:38:42 +0530102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
103{
104 struct ath_atx_ac *ac = tid->ac;
105
106 if (tid->paused)
107 return;
108
109 if (tid->sched)
110 return;
111
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
114
115 if (ac->sched)
116 return;
117
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
121
Sujithe8324352009-01-16 21:38:42 +0530122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123{
Felix Fietkau066dae92010-11-07 14:59:39 +0100124 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530125
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200126 WARN_ON(!tid->paused);
127
Sujithe8324352009-01-16 21:38:42 +0530128 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530130
131 if (list_empty(&tid->buf_q))
132 goto unlock;
133
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
138}
139
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100146}
147
Sujithe8324352009-01-16 21:38:42 +0530148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
149{
Felix Fietkau066dae92010-11-07 14:59:39 +0100150 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530151 struct ath_buf *bf;
152 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200153 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100154 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155
Sujithe8324352009-01-16 21:38:42 +0530156 INIT_LIST_HEAD(&bf_head);
157
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530159 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530160
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530163 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164
Felix Fietkaue1566d12010-11-20 03:08:46 +0100165 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
170 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200172 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100173 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530174 }
175
176 spin_unlock_bh(&txq->axq_lock);
177}
178
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
181{
182 int index, cindex;
183
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200187 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530188
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
193}
194
195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100196 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530197{
198 int index, cindex;
199
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100200 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200202 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530203
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 }
209}
210
211/*
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
216 */
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
219
220{
221 struct ath_buf *bf;
222 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700223 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100224 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700225
226 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530227 INIT_LIST_HEAD(&bf_head);
228
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
Sujithe8324352009-01-16 21:38:42 +0530232
Sujithd43f30152009-01-16 21:38:53 +0530233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530239
240 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530242 spin_lock(&txq->axq_lock);
243 }
244
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
247}
248
Sujithfec247c2009-07-27 12:08:16 +0530249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530251{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100252 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530253 struct ieee80211_hdr *hdr;
254
Sujithfec247c2009-07-27 12:08:16 +0530255 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100256 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100257 return;
Sujithe8324352009-01-16 21:38:42 +0530258
Sujithe8324352009-01-16 21:38:42 +0530259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261}
262
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
264{
265 struct ath_buf *bf = NULL;
266
267 spin_lock_bh(&sc->tx.txbuflock);
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
277 spin_unlock_bh(&sc->tx.txbuflock);
278
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
Sujithd43f30152009-01-16 21:38:53 +0530289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530295 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530296
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100346 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530360
Sujitha22be222009-03-30 15:28:36 +0530361 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530362 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530363
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800364 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800365
Felix Fietkau78c46532010-06-25 01:26:16 +0200366 memcpy(rates, tx_info->control.rates, sizeof(rates));
367
Sujith1286ec62009-01-27 13:30:37 +0530368 rcu_read_lock();
369
Ben Greear686b9cb2010-09-23 09:44:36 -0700370 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530371 if (!sta) {
372 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200373
Felix Fietkau31e79a52010-07-12 23:16:34 +0200374 INIT_LIST_HEAD(&bf_head);
375 while (bf) {
376 bf_next = bf->bf_next;
377
378 bf->bf_state.bf_type |= BUF_XRETRY;
379 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
380 !bf->bf_stale || bf_next != NULL)
381 list_move_tail(&bf->list, &bf_head);
382
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
385 0, 0);
386
387 bf = bf_next;
388 }
Sujith1286ec62009-01-27 13:30:37 +0530389 return;
Sujithe8324352009-01-16 21:38:42 +0530390 }
391
Sujith1286ec62009-01-27 13:30:37 +0530392 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100393 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
394 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530395
Felix Fietkaub11b1602010-07-11 12:48:44 +0200396 /*
397 * The hardware occasionally sends a tx status for the wrong TID.
398 * In this case, the BA status cannot be considered valid and all
399 * subframes need to be retransmitted
400 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100401 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200402 txok = false;
403
Sujithe8324352009-01-16 21:38:42 +0530404 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530405 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530406
Sujithd43f30152009-01-16 21:38:53 +0530407 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700408 if (ts->ts_flags & ATH9K_TX_BA) {
409 seq_st = ts->ts_seqnum;
410 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530411 } else {
Sujithd43f30152009-01-16 21:38:53 +0530412 /*
413 * AR5416 can become deaf/mute when BA
414 * issue happens. Chip needs to be reset.
415 * But AP code may have sychronization issues
416 * when perform internal reset in this routine.
417 * Only enable reset in STA mode for now.
418 */
Sujith2660b812009-02-09 13:27:26 +0530419 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530420 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530421 }
422 }
423
424 INIT_LIST_HEAD(&bf_pending);
425 INIT_LIST_HEAD(&bf_head);
426
Felix Fietkaub572d032010-11-14 15:20:07 +0100427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530428 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100429 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530430 bf_next = bf->bf_next;
431
Felix Fietkau78c46532010-06-25 01:26:16 +0200432 skb = bf->bf_mpdu;
433 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100434 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200435
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100436 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530437 /* transmit completion, subframe is
438 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530439 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530440 } else if (!isaggr && txok) {
441 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100444 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100445 if (fi->retries < ATH_MAX_SW_RETRIES) {
446 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530447 txpending = 1;
448 } else {
449 bf->bf_state.bf_type |= BUF_XRETRY;
450 txfail = 1;
451 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 }
454 } else {
455 /*
456 * cleanup in progress, just fail
457 * the un-acked sub-frames
458 */
459 txfail = 1;
460 }
461 }
462
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400463 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
464 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530465 /*
466 * Make sure the last desc is reclaimed if it
467 * not a holding desc.
468 */
469 if (!bf_last->bf_stale)
470 list_move_tail(&bf->list, &bf_head);
471 else
472 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530473 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700474 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530475 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 }
477
Felix Fietkau90fa5392010-09-20 13:45:38 +0200478 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530479 /*
480 * complete the acked-ones/xretried ones; update
481 * block-ack window
482 */
483 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100484 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530485 spin_unlock_bh(&txq->axq_lock);
486
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200488 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 rc_update = false;
491 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 }
494
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
496 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530497 } else {
Sujithd43f30152009-01-16 21:38:53 +0530498 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
500 if (bf->bf_next == NULL && bf_last->bf_stale) {
501 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530502
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400503 tbf = ath_clone_txbuf(sc, bf_last);
504 /*
505 * Update tx baw and complete the
506 * frame with failed status if we
507 * run out of tx buf.
508 */
509 if (!tbf) {
510 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100511 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400513
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400514 bf->bf_state.bf_type |=
515 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100516 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100517 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400518 ath_tx_complete_buf(sc, bf, txq,
519 &bf_head,
520 ts, 0, 0);
521 break;
522 }
523
524 ath9k_hw_cleartxdesc(sc->sc_ah,
525 tbf->bf_desc);
526 list_add_tail(&tbf->list, &bf_head);
527 } else {
528 /*
529 * Clear descriptor status words for
530 * software retry
531 */
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400534 }
Sujithe8324352009-01-16 21:38:42 +0530535 }
536
537 /*
538 * Put this buffer to the temporary pending
539 * queue to retain ordering
540 */
541 list_splice_tail_init(&bf_head, &bf_pending);
542 }
543
544 bf = bf_next;
545 }
546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 /* prepend un-acked frames to the beginning of the pending frame queue */
548 if (!list_empty(&bf_pending)) {
549 spin_lock_bh(&txq->axq_lock);
550 list_splice(&bf_pending, &tid->buf_q);
551 ath_tx_queue_tid(txq, tid);
552 spin_unlock_bh(&txq->axq_lock);
553 }
554
Sujithe8324352009-01-16 21:38:42 +0530555 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200556 ath_tx_flush_tid(sc, tid);
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530560 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530561 }
Sujithe8324352009-01-16 21:38:42 +0530562 }
563
Sujith1286ec62009-01-27 13:30:37 +0530564 rcu_read_unlock();
565
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530566 if (needreset) {
567 spin_unlock_bh(&sc->sc_pcu_lock);
Sujithe8324352009-01-16 21:38:42 +0530568 ath_reset(sc, false);
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530569 spin_lock_bh(&sc->sc_pcu_lock);
570 }
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
Felix Fietkau2ed72222011-01-10 17:05:49 -0700861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
Felix Fietkau231c3a12010-09-20 19:35:28 +0200864 return 0;
Sujithe8324352009-01-16 21:38:42 +0530865}
866
Sujithf83da962009-07-23 15:32:37 +0530867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100871 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530874 return;
Sujithe8324352009-01-16 21:38:42 +0530875
876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530878 return;
Sujithe8324352009-01-16 21:38:42 +0530879 }
880
Sujithe8324352009-01-16 21:38:42 +0530881 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200882 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200883
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
891 txtid->state |= AGGR_CLEANUP;
892 else
893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530894 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau90fa5392010-09-20 13:45:38 +0200896 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530897}
898
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
914}
915
Sujithe8324352009-01-16 21:38:42 +0530916/********************/
917/* Queue Management */
918/********************/
919
Sujithe8324352009-01-16 21:38:42 +0530920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
922{
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
925
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
934 }
935}
936
937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938{
Sujithcbe61d82009-02-09 13:27:12 +0530939 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530941 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
Ben Greear60f2d1d2011-01-09 23:11:52 -0800948 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530949
950 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
956
957 /*
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
971 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800982 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +0530984 /*
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
987 */
988 return NULL;
989 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800990 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800991 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -0800992 axq_qnum, ARRAY_SIZE(sc->tx.txq));
993 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +0530994 return NULL;
995 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800996 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +0530998
Ben Greear60f2d1d2011-01-09 23:11:52 -0800999 txq->axq_qnum = axq_qnum;
1000 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301001 txq->axq_link = NULL;
1002 INIT_LIST_HEAD(&txq->axq_q);
1003 INIT_LIST_HEAD(&txq->axq_acq);
1004 spin_lock_init(&txq->axq_lock);
1005 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001006 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001007 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001008 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001009
1010 txq->txq_headidx = txq->txq_tailidx = 0;
1011 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1012 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1013 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301014 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001015 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301016}
1017
Sujithe8324352009-01-16 21:38:42 +05301018int ath_txq_update(struct ath_softc *sc, int qnum,
1019 struct ath9k_tx_queue_info *qinfo)
1020{
Sujithcbe61d82009-02-09 13:27:12 +05301021 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301022 int error = 0;
1023 struct ath9k_tx_queue_info qi;
1024
1025 if (qnum == sc->beacon.beaconq) {
1026 /*
1027 * XXX: for beacon queue, we just save the parameter.
1028 * It will be picked up by ath_beaconq_config when
1029 * it's necessary.
1030 */
1031 sc->beacon.beacon_qi = *qinfo;
1032 return 0;
1033 }
1034
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001035 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301036
1037 ath9k_hw_get_txq_props(ah, qnum, &qi);
1038 qi.tqi_aifs = qinfo->tqi_aifs;
1039 qi.tqi_cwmin = qinfo->tqi_cwmin;
1040 qi.tqi_cwmax = qinfo->tqi_cwmax;
1041 qi.tqi_burstTime = qinfo->tqi_burstTime;
1042 qi.tqi_readyTime = qinfo->tqi_readyTime;
1043
1044 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001045 ath_err(ath9k_hw_common(sc->sc_ah),
1046 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301047 error = -EIO;
1048 } else {
1049 ath9k_hw_resettxqueue(ah, qnum);
1050 }
1051
1052 return error;
1053}
1054
1055int ath_cabq_update(struct ath_softc *sc)
1056{
1057 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001058 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301059 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301060
1061 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1062 /*
1063 * Ensure the readytime % is within the bounds.
1064 */
Sujith17d79042009-02-09 13:27:03 +05301065 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1066 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1067 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1068 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301069
Steve Brown9814f6b2011-02-07 17:10:39 -07001070 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301071 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301072 ath_txq_update(sc, qnum, &qi);
1073
1074 return 0;
1075}
1076
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001077static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1078{
1079 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1080 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1081}
1082
Sujith043a0402009-01-16 21:38:47 +05301083/*
1084 * Drain a given TX queue (could be Beacon or Data)
1085 *
1086 * This assumes output has been stopped and
1087 * we do not need to block ath_tx_tasklet.
1088 */
1089void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301090{
1091 struct ath_buf *bf, *lastbf;
1092 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001093 struct ath_tx_status ts;
1094
1095 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301096 INIT_LIST_HEAD(&bf_head);
1097
Sujithe8324352009-01-16 21:38:42 +05301098 for (;;) {
1099 spin_lock_bh(&txq->axq_lock);
1100
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001101 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1102 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1103 txq->txq_headidx = txq->txq_tailidx = 0;
1104 spin_unlock_bh(&txq->axq_lock);
1105 break;
1106 } else {
1107 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1108 struct ath_buf, list);
1109 }
1110 } else {
1111 if (list_empty(&txq->axq_q)) {
1112 txq->axq_link = NULL;
1113 spin_unlock_bh(&txq->axq_lock);
1114 break;
1115 }
1116 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1117 list);
Sujithe8324352009-01-16 21:38:42 +05301118
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001119 if (bf->bf_stale) {
1120 list_del(&bf->list);
1121 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301122
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001123 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001124 continue;
1125 }
Sujithe8324352009-01-16 21:38:42 +05301126 }
1127
1128 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301129
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001130 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1131 list_cut_position(&bf_head,
1132 &txq->txq_fifo[txq->txq_tailidx],
1133 &lastbf->list);
1134 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1135 } else {
1136 /* remove ath_buf's of the same mpdu from txq */
1137 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1138 }
1139
Sujithe8324352009-01-16 21:38:42 +05301140 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001141 if (bf_is_ampdu_not_probing(bf))
1142 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301143 spin_unlock_bh(&txq->axq_lock);
1144
1145 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001146 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1147 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301148 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001149 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301150 }
1151
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001152 spin_lock_bh(&txq->axq_lock);
1153 txq->axq_tx_inprogress = false;
1154 spin_unlock_bh(&txq->axq_lock);
1155
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001156 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1157 spin_lock_bh(&txq->axq_lock);
1158 while (!list_empty(&txq->txq_fifo_pending)) {
1159 bf = list_first_entry(&txq->txq_fifo_pending,
1160 struct ath_buf, list);
1161 list_cut_position(&bf_head,
1162 &txq->txq_fifo_pending,
1163 &bf->bf_lastbf->list);
1164 spin_unlock_bh(&txq->axq_lock);
1165
1166 if (bf_isampdu(bf))
1167 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001168 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001169 else
1170 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1171 &ts, 0, 0);
1172 spin_lock_bh(&txq->axq_lock);
1173 }
1174 spin_unlock_bh(&txq->axq_lock);
1175 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001176
1177 /* flush any pending frames if aggregation is enabled */
1178 if (sc->sc_flags & SC_OP_TXAGGR) {
1179 if (!retry_tx) {
1180 spin_lock_bh(&txq->axq_lock);
1181 ath_txq_drain_pending_buffers(sc, txq);
1182 spin_unlock_bh(&txq->axq_lock);
1183 }
1184 }
Sujithe8324352009-01-16 21:38:42 +05301185}
1186
Felix Fietkau080e1a22010-12-05 20:17:53 +01001187bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301188{
Sujithcbe61d82009-02-09 13:27:12 +05301189 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001190 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301191 struct ath_txq *txq;
1192 int i, npend = 0;
1193
1194 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001195 return true;
Sujith043a0402009-01-16 21:38:47 +05301196
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001197 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301198
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001199 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301200 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001201 if (!ATH_TXQ_SETUP(sc, i))
1202 continue;
1203
1204 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301205 }
1206
Felix Fietkau080e1a22010-12-05 20:17:53 +01001207 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001208 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301209
1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001211 if (!ATH_TXQ_SETUP(sc, i))
1212 continue;
1213
1214 /*
1215 * The caller will resume queues with ieee80211_wake_queues.
1216 * Mark the queue as not stopped to prevent ath_tx_complete
1217 * from waking the queue too early.
1218 */
1219 txq = &sc->tx.txq[i];
1220 txq->stopped = false;
1221 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301222 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001223
1224 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301225}
1226
Sujithe8324352009-01-16 21:38:42 +05301227void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1228{
1229 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1230 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1231}
1232
Ben Greear7755bad2011-01-18 17:30:00 -08001233/* For each axq_acq entry, for each tid, try to schedule packets
1234 * for transmit until ampdu_depth has reached min Q depth.
1235 */
Sujithe8324352009-01-16 21:38:42 +05301236void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1237{
Ben Greear7755bad2011-01-18 17:30:00 -08001238 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1239 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301240
Felix Fietkau21f28e62011-01-15 14:30:14 +01001241 if (list_empty(&txq->axq_acq) ||
1242 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301243 return;
1244
1245 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001246 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301247
Ben Greear7755bad2011-01-18 17:30:00 -08001248 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1249 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1250 list_del(&ac->list);
1251 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301252
Ben Greear7755bad2011-01-18 17:30:00 -08001253 while (!list_empty(&ac->tid_q)) {
1254 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1255 list);
1256 list_del(&tid->list);
1257 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301258
Ben Greear7755bad2011-01-18 17:30:00 -08001259 if (tid->paused)
1260 continue;
Sujithe8324352009-01-16 21:38:42 +05301261
Ben Greear7755bad2011-01-18 17:30:00 -08001262 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301263
Ben Greear7755bad2011-01-18 17:30:00 -08001264 /*
1265 * add tid to round-robin queue if more frames
1266 * are pending for the tid
1267 */
1268 if (!list_empty(&tid->buf_q))
1269 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301270
Ben Greear7755bad2011-01-18 17:30:00 -08001271 if (tid == last_tid ||
1272 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1273 break;
Sujithe8324352009-01-16 21:38:42 +05301274 }
Ben Greear7755bad2011-01-18 17:30:00 -08001275
1276 if (!list_empty(&ac->tid_q)) {
1277 if (!ac->sched) {
1278 ac->sched = true;
1279 list_add_tail(&ac->list, &txq->axq_acq);
1280 }
1281 }
1282
1283 if (ac == last_ac ||
1284 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1285 return;
Sujithe8324352009-01-16 21:38:42 +05301286 }
1287}
1288
Sujithe8324352009-01-16 21:38:42 +05301289/***********/
1290/* TX, DMA */
1291/***********/
1292
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001293/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294 * Insert a chain of ath_buf (descriptors) on a txq and
1295 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001296 */
Sujith102e0572008-10-29 10:15:16 +05301297static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1298 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001299{
Sujithcbe61d82009-02-09 13:27:12 +05301300 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001301 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001302 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301303
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001304 /*
1305 * Insert the frame on the outbound list and
1306 * pass it on to the hardware.
1307 */
1308
1309 if (list_empty(head))
1310 return;
1311
1312 bf = list_first_entry(head, struct ath_buf, list);
1313
Joe Perches226afe62010-12-02 19:12:37 -08001314 ath_dbg(common, ATH_DBG_QUEUE,
1315 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001316
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001317 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1318 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1319 list_splice_tail_init(head, &txq->txq_fifo_pending);
1320 return;
1321 }
1322 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001323 ath_dbg(common, ATH_DBG_XMIT,
1324 "Initializing tx fifo %d which is non-empty\n",
1325 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001326 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1327 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1328 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001329 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001330 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001331 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1332 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001333 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001334 list_splice_tail_init(head, &txq->axq_q);
1335
1336 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001337 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001338 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001339 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1340 txq->axq_qnum, ito64(bf->bf_daddr),
1341 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001342 } else {
1343 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001344 ath_dbg(common, ATH_DBG_XMIT,
1345 "link[%u] (%p)=%llx (%p)\n",
1346 txq->axq_qnum, txq->axq_link,
1347 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001348 }
1349 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1350 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001351 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001352 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001353 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001354 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001355 if (bf_is_ampdu_not_probing(bf))
1356 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001357}
1358
Sujithe8324352009-01-16 21:38:42 +05301359static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001360 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301361{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001362 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001363 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301364
Sujithe8324352009-01-16 21:38:42 +05301365 bf->bf_state.bf_type |= BUF_AMPDU;
1366
1367 /*
1368 * Do not queue to h/w when any of the following conditions is true:
1369 * - there are pending frames in software queue
1370 * - the TID is currently paused for ADDBA/BAR request
1371 * - seqno is not within block-ack window
1372 * - h/w queue depth exceeds low water mark
1373 */
1374 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001375 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001376 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001377 /*
Sujithe8324352009-01-16 21:38:42 +05301378 * Add this frame to software queue for scheduling later
1379 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001380 */
Ben Greearbda8add2011-01-09 23:11:48 -08001381 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001382 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301383 ath_tx_queue_tid(txctl->txq, tid);
1384 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001385 }
1386
Felix Fietkau04caf862010-11-14 15:20:12 +01001387 INIT_LIST_HEAD(&bf_head);
1388 list_add(&bf->list, &bf_head);
1389
Sujithe8324352009-01-16 21:38:42 +05301390 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001391 if (!fi->retries)
1392 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301393
1394 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001395 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301396 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001397 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001398 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301399}
1400
Felix Fietkau82b873a2010-11-11 03:18:37 +01001401static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1402 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001403 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001404{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001405 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301406 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001407
Sujithe8324352009-01-16 21:38:42 +05301408 bf = list_first_entry(bf_head, struct ath_buf, list);
1409 bf->bf_state.bf_type &= ~BUF_AMPDU;
1410
1411 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001412 if (tid)
1413 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301414
Sujithd43f30152009-01-16 21:38:53 +05301415 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001416 fi = get_frame_info(bf->bf_mpdu);
1417 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301418 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301419 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001420}
1421
Sujith528f0c62008-10-29 10:14:26 +05301422static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001423{
Sujith528f0c62008-10-29 10:14:26 +05301424 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001425 enum ath9k_pkt_type htype;
1426 __le16 fc;
1427
Sujith528f0c62008-10-29 10:14:26 +05301428 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001429 fc = hdr->frame_control;
1430
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001431 if (ieee80211_is_beacon(fc))
1432 htype = ATH9K_PKT_TYPE_BEACON;
1433 else if (ieee80211_is_probe_resp(fc))
1434 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1435 else if (ieee80211_is_atim(fc))
1436 htype = ATH9K_PKT_TYPE_ATIM;
1437 else if (ieee80211_is_pspoll(fc))
1438 htype = ATH9K_PKT_TYPE_PSPOLL;
1439 else
1440 htype = ATH9K_PKT_TYPE_NORMAL;
1441
1442 return htype;
1443}
1444
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001445static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1446 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301447{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001448 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301449 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001450 struct ieee80211_sta *sta = tx_info->control.sta;
1451 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301452 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001453 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301454 struct ath_node *an;
1455 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001456 enum ath9k_key_type keytype;
1457 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001458 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301459
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001460 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301461
Sujith528f0c62008-10-29 10:14:26 +05301462 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001463 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1464 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001465
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001466 an = (struct ath_node *) sta->drv_priv;
1467 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1468
1469 /*
1470 * Override seqno set by upper layer with the one
1471 * in tx aggregation state.
1472 */
1473 tid = ATH_AN_2_TID(an, tidno);
1474 seqno = tid->seq_next;
1475 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1476 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1477 }
1478
1479 memset(fi, 0, sizeof(*fi));
1480 if (hw_key)
1481 fi->keyix = hw_key->hw_key_idx;
1482 else
1483 fi->keyix = ATH9K_TXKEYIX_INVALID;
1484 fi->keytype = keytype;
1485 fi->framelen = framelen;
1486 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301487}
1488
Felix Fietkau82b873a2010-11-11 03:18:37 +01001489static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301490{
1491 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1492 int flags = 0;
1493
1494 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1495 flags |= ATH9K_TXDESC_INTREQ;
1496
1497 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1498 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301499
Felix Fietkau82b873a2010-11-11 03:18:37 +01001500 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001501 flags |= ATH9K_TXDESC_LDPC;
1502
Sujith528f0c62008-10-29 10:14:26 +05301503 return flags;
1504}
1505
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001506/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001507 * rix - rate index
1508 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1509 * width - 0 for 20 MHz, 1 for 40 MHz
1510 * half_gi - to use 4us v/s 3.6 us for symbol time
1511 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001512static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301513 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001514{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001515 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001516 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301517
1518 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001519 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001520 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001521 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1523
1524 if (!half_gi)
1525 duration = SYMBOL_TIME(nsymbols);
1526 else
1527 duration = SYMBOL_TIME_HALFGI(nsymbols);
1528
Sujithe63835b2008-11-18 09:07:53 +05301529 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001530 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301531
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001532 return duration;
1533}
1534
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301535u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1536{
1537 struct ath_hw *ah = sc->sc_ah;
1538 struct ath9k_channel *curchan = ah->curchan;
1539 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1540 (curchan->channelFlags & CHANNEL_5GHZ) &&
1541 (chainmask == 0x7) && (rate < 0x90))
1542 return 0x3;
1543 else
1544 return chainmask;
1545}
1546
Felix Fietkau269c44b2010-11-14 15:20:06 +01001547static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001548{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001549 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301551 struct sk_buff *skb;
1552 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301553 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001554 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301555 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301556 int i, flags = 0;
1557 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301558 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301559
1560 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301561
Sujitha22be222009-03-30 15:28:36 +05301562 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301563 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301564 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301565 hdr = (struct ieee80211_hdr *)skb->data;
1566 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301567
Sujithc89424d2009-01-30 14:29:28 +05301568 /*
1569 * We check if Short Preamble is needed for the CTS rate by
1570 * checking the BSS's global flag.
1571 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1572 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001573 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1574 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301575 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001576 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001577
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001578 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001579 bool is_40, is_sgi, is_sp;
1580 int phy;
1581
Sujithe63835b2008-11-18 09:07:53 +05301582 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583 continue;
1584
Sujitha8efee42008-11-18 09:07:30 +05301585 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301586 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001587
Felix Fietkau27032052010-01-17 21:08:50 +01001588 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1589 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301590 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001591 flags |= ATH9K_TXDESC_RTSENA;
1592 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1593 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1594 flags |= ATH9K_TXDESC_CTSENA;
1595 }
1596
Sujithc89424d2009-01-30 14:29:28 +05301597 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1598 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1599 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1600 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001601
Felix Fietkau545750d2009-11-23 22:21:01 +01001602 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1603 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1604 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1605
1606 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1607 /* MCS rates */
1608 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301609 series[i].ChSel = ath_txchainmask_reduction(sc,
1610 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001611 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001612 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001613 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1614 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001615 continue;
1616 }
1617
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301618 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001619 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1620 !(rate->flags & IEEE80211_RATE_ERP_G))
1621 phy = WLAN_RC_PHY_CCK;
1622 else
1623 phy = WLAN_RC_PHY_OFDM;
1624
1625 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1626 series[i].Rate = rate->hw_value;
1627 if (rate->hw_value_short) {
1628 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1629 series[i].Rate |= rate->hw_value_short;
1630 } else {
1631 is_sp = false;
1632 }
1633
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301634 if (bf->bf_state.bfs_paprd)
1635 series[i].ChSel = common->tx_chainmask;
1636 else
1637 series[i].ChSel = ath_txchainmask_reduction(sc,
1638 common->tx_chainmask, series[i].Rate);
1639
Felix Fietkau545750d2009-11-23 22:21:01 +01001640 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001641 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001642 }
1643
Felix Fietkau27032052010-01-17 21:08:50 +01001644 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001645 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001646 flags &= ~ATH9K_TXDESC_RTSENA;
1647
1648 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1649 if (flags & ATH9K_TXDESC_RTSENA)
1650 flags &= ~ATH9K_TXDESC_CTSENA;
1651
Sujithe63835b2008-11-18 09:07:53 +05301652 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301653 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1654 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301655 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301656 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301657
Sujith17d79042009-02-09 13:27:03 +05301658 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301659 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001660}
1661
Felix Fietkau82b873a2010-11-11 03:18:37 +01001662static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001663 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001664 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301665{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001666 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001667 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001668 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001669 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001670 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001671 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001672 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001673
1674 bf = ath_tx_get_buffer(sc);
1675 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001676 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001677 return NULL;
1678 }
Sujithe8324352009-01-16 21:38:42 +05301679
Sujithe8324352009-01-16 21:38:42 +05301680 ATH_TXBUF_RESET(bf);
1681
Felix Fietkau82b873a2010-11-11 03:18:37 +01001682 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301683 bf->bf_mpdu = skb;
1684
Ben Greearc1739eb32010-10-14 12:45:29 -07001685 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1686 skb->len, DMA_TO_DEVICE);
1687 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301688 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001689 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001690 ath_err(ath9k_hw_common(sc->sc_ah),
1691 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001692 ath_tx_return_buffer(sc, bf);
1693 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301694 }
1695
Sujithe8324352009-01-16 21:38:42 +05301696 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301697
1698 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001699 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301700
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001701 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1702 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301703
1704 ath9k_hw_filltxdesc(ah, ds,
1705 skb->len, /* segment length */
1706 true, /* first segment */
1707 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001708 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001709 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001710 txq->axq_qnum);
1711
1712
1713 return bf;
1714}
1715
1716/* FIXME: tx power */
1717static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1718 struct ath_tx_control *txctl)
1719{
1720 struct sk_buff *skb = bf->bf_mpdu;
1721 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1722 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001723 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001724 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001725 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301726
Sujithe8324352009-01-16 21:38:42 +05301727 spin_lock_bh(&txctl->txq->axq_lock);
1728
Felix Fietkau248a38d2010-12-10 21:16:46 +01001729 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001730 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1731 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001732 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001733
Felix Fietkau066dae92010-11-07 14:59:39 +01001734 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001735 }
1736
1737 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001738 /*
1739 * Try aggregation if it's a unicast data frame
1740 * and the destination is HT capable.
1741 */
1742 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301743 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001744 INIT_LIST_HEAD(&bf_head);
1745 list_add_tail(&bf->list, &bf_head);
1746
Felix Fietkau61117f02010-11-11 03:18:36 +01001747 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001748 bf->bf_state.bfs_paprd = txctl->paprd;
1749
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001750 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001751 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1752 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001753
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301754 if (txctl->paprd)
1755 bf->bf_state.bfs_paprd_timestamp = jiffies;
1756
Felix Fietkau248a38d2010-12-10 21:16:46 +01001757 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301758 }
1759
1760 spin_unlock_bh(&txctl->txq->axq_lock);
1761}
1762
1763/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001764int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301765 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001766{
Felix Fietkau28d16702010-11-14 15:20:10 +01001767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1768 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001769 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001770 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001771 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001772 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001773 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001774 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001775 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001776
Ben Greeara9927ba2010-12-06 21:13:49 -08001777 /* NOTE: sta can be NULL according to net/mac80211.h */
1778 if (sta)
1779 txctl->an = (struct ath_node *)sta->drv_priv;
1780
Felix Fietkau04caf862010-11-14 15:20:12 +01001781 if (info->control.hw_key)
1782 frmlen += info->control.hw_key->icv_len;
1783
Felix Fietkau28d16702010-11-14 15:20:10 +01001784 /*
1785 * As a temporary workaround, assign seq# here; this will likely need
1786 * to be cleaned up to work better with Beacon transmission and virtual
1787 * BSSes.
1788 */
1789 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1790 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1791 sc->tx.seq_no += 0x10;
1792 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1793 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1794 }
1795
1796 /* Add the padding after the header if this is not already done */
1797 padpos = ath9k_cmn_padpos(hdr->frame_control);
1798 padsize = padpos & 3;
1799 if (padsize && skb->len > padpos) {
1800 if (skb_headroom(skb) < padsize)
1801 return -ENOMEM;
1802
1803 skb_push(skb, padsize);
1804 memmove(skb->data, skb->data + padsize, padpos);
1805 }
1806
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001807 setup_frame_info(hw, skb, frmlen);
1808
1809 /*
1810 * At this point, the vif, hw_key and sta pointers in the tx control
1811 * info are no longer valid (overwritten by the ath_frame_info data.
1812 */
1813
1814 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001815 if (unlikely(!bf))
1816 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001817
Felix Fietkau066dae92010-11-07 14:59:39 +01001818 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001819 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001820 if (txq == sc->tx.txq_map[q] &&
1821 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001822 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001823 txq->stopped = 1;
1824 }
1825 spin_unlock_bh(&txq->axq_lock);
1826
Sujithe8324352009-01-16 21:38:42 +05301827 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001828
1829 return 0;
1830}
1831
Sujithe8324352009-01-16 21:38:42 +05301832/*****************/
1833/* TX Completion */
1834/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001835
Sujithe8324352009-01-16 21:38:42 +05301836static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001837 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001838{
Sujithe8324352009-01-16 21:38:42 +05301839 struct ieee80211_hw *hw = sc->hw;
1840 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001841 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001842 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001843 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301844
Joe Perches226afe62010-12-02 19:12:37 -08001845 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301846
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301847 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301848 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301849
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301850 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301851 /* Frame was ACKed */
1852 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1853 }
1854
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001855 padpos = ath9k_cmn_padpos(hdr->frame_control);
1856 padsize = padpos & 3;
1857 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301858 /*
1859 * Remove MAC header padding before giving the frame back to
1860 * mac80211.
1861 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001862 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301863 skb_pull(skb, padsize);
1864 }
1865
Sujith1b04b932010-01-08 10:36:05 +05301866 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1867 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001868 ath_dbg(common, ATH_DBG_PS,
1869 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301870 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1871 PS_WAIT_FOR_CAB |
1872 PS_WAIT_FOR_PSPOLL_DATA |
1873 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001874 }
1875
Felix Fietkau7545daf2011-01-24 19:23:16 +01001876 q = skb_get_queue_mapping(skb);
1877 if (txq == sc->tx.txq_map[q]) {
1878 spin_lock_bh(&txq->axq_lock);
1879 if (WARN_ON(--txq->pending_frames < 0))
1880 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001881
Felix Fietkau7545daf2011-01-24 19:23:16 +01001882 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1883 ieee80211_wake_queue(sc->hw, q);
1884 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001885 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001886 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001887 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001888
1889 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301890}
1891
1892static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001893 struct ath_txq *txq, struct list_head *bf_q,
1894 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301895{
1896 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301897 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301898 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301899
Sujithe8324352009-01-16 21:38:42 +05301900 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301901 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301902
1903 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301904 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301905
1906 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301907 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301908 }
1909
Ben Greearc1739eb32010-10-14 12:45:29 -07001910 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001911 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001912
1913 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301914 if (time_after(jiffies,
1915 bf->bf_state.bfs_paprd_timestamp +
1916 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001917 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001918 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001919 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001920 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001921 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001922 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f02010-11-11 03:18:36 +01001923 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001924 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001925 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1926 * accidentally reference it later.
1927 */
1928 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301929
1930 /*
1931 * Return the list of ath_buf of this mpdu to free queue
1932 */
1933 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1934 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1935 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1936}
1937
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001938static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1939 struct ath_tx_status *ts, int nframes, int nbad,
1940 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301941{
Sujitha22be222009-03-30 15:28:36 +05301942 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301943 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301944 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001945 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001946 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301947 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301948
Sujith95e4acb2009-03-13 08:56:09 +05301949 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001950 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301951
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001952 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301953 WARN_ON(tx_rateindex >= hw->max_rates);
1954
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001955 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301956 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001957 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001958 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301959
Felix Fietkaub572d032010-11-14 15:20:07 +01001960 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001961
Felix Fietkaub572d032010-11-14 15:20:07 +01001962 tx_info->status.ampdu_len = nframes;
1963 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001964 }
1965
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001966 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301967 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001968 /*
1969 * If an underrun error is seen assume it as an excessive
1970 * retry only if max frame trigger level has been reached
1971 * (2 KB for single stream, and 4 KB for dual stream).
1972 * Adjust the long retry as if the frame was tried
1973 * hw->max_rate_tries times to affect how rate control updates
1974 * PER for the failed rate.
1975 * In case of congestion on the bus penalizing this type of
1976 * underruns should help hardware actually transmit new frames
1977 * successfully by eventually preferring slower rates.
1978 * This itself should also alleviate congestion on the bus.
1979 */
1980 if (ieee80211_is_data(hdr->frame_control) &&
1981 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1982 ATH9K_TX_DELIM_UNDERRUN)) &&
1983 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1984 tx_info->status.rates[tx_rateindex].count =
1985 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301986 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301987
Felix Fietkau545750d2009-11-23 22:21:01 +01001988 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301989 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001990 tx_info->status.rates[i].idx = -1;
1991 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301992
Felix Fietkau78c46532010-06-25 01:26:16 +02001993 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301994}
1995
Sujithc4288392008-11-18 09:09:30 +05301996static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001997{
Sujithcbe61d82009-02-09 13:27:12 +05301998 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001999 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002000 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2001 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302002 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002003 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302004 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002005 int status;
2006
Joe Perches226afe62010-12-02 19:12:37 -08002007 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2008 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2009 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002010
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002011 for (;;) {
2012 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002013 if (list_empty(&txq->axq_q)) {
2014 txq->axq_link = NULL;
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002015 if (sc->sc_flags & SC_OP_TXAGGR &&
2016 !txq->txq_flush_inprogress)
Ben Greear082f6532011-01-09 23:11:47 -08002017 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002018 spin_unlock_bh(&txq->axq_lock);
2019 break;
2020 }
2021 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2022
2023 /*
2024 * There is a race condition that a BH gets scheduled
2025 * after sw writes TxE and before hw re-load the last
2026 * descriptor to get the newly chained one.
2027 * Software must keep the last DONE descriptor as a
2028 * holding descriptor - software does so by marking
2029 * it with the STALE flag.
2030 */
2031 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302032 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002033 bf_held = bf;
2034 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302035 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002036 break;
2037 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002038 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302039 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040 }
2041 }
2042
2043 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302044 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002045
Felix Fietkau29bffa92010-03-29 20:14:23 -07002046 memset(&ts, 0, sizeof(ts));
2047 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048 if (status == -EINPROGRESS) {
2049 spin_unlock_bh(&txq->axq_lock);
2050 break;
2051 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002052 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002053
2054 /*
2055 * Remove ath_buf's of the same transmit unit from txq,
2056 * however leave the last descriptor back as the holding
2057 * descriptor for hw.
2058 */
Sujitha119cc42009-03-30 15:28:38 +05302059 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002060 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002061 if (!list_is_singular(&lastbf->list))
2062 list_cut_position(&bf_head,
2063 &txq->axq_q, lastbf->list.prev);
2064
2065 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002066 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002067 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002068 if (bf_held)
2069 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002070
2071 if (bf_is_ampdu_not_probing(bf))
2072 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002073
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002074 spin_unlock_bh(&txq->axq_lock);
2075
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002076 if (bf_held)
2077 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078
Sujithcd3d39a2008-08-11 14:03:34 +05302079 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002080 /*
2081 * This frame is sent out as a single frame.
2082 * Use hardware retry status for this frame.
2083 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002084 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302085 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002086 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002087 }
Johannes Berge6a98542008-10-21 12:40:02 +02002088
Sujithcd3d39a2008-08-11 14:03:34 +05302089 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002090 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2091 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002093 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002095 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002096
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002097 if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002098 ath_txq_schedule(sc, txq);
2099 spin_unlock_bh(&txq->axq_lock);
2100 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002101}
2102
Vivek Natarajan181fb182011-01-27 14:45:08 +05302103static void ath_hw_pll_work(struct work_struct *work)
2104{
2105 struct ath_softc *sc = container_of(work, struct ath_softc,
2106 hw_pll_work.work);
2107 static int count;
2108
2109 if (AR_SREV_9485(sc->sc_ah)) {
2110 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2111 count++;
2112
2113 if (count == 3) {
2114 /* Rx is hung for more than 500ms. Reset it */
2115 ath_reset(sc, true);
2116 count = 0;
2117 }
2118 } else
2119 count = 0;
2120
2121 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2122 }
2123}
2124
Sujith305fe472009-07-23 15:32:29 +05302125static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002126{
2127 struct ath_softc *sc = container_of(work, struct ath_softc,
2128 tx_complete_work.work);
2129 struct ath_txq *txq;
2130 int i;
2131 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002132#ifdef CONFIG_ATH9K_DEBUGFS
2133 sc->tx_complete_poll_work_seen++;
2134#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002135
2136 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2137 if (ATH_TXQ_SETUP(sc, i)) {
2138 txq = &sc->tx.txq[i];
2139 spin_lock_bh(&txq->axq_lock);
2140 if (txq->axq_depth) {
2141 if (txq->axq_tx_inprogress) {
2142 needreset = true;
2143 spin_unlock_bh(&txq->axq_lock);
2144 break;
2145 } else {
2146 txq->axq_tx_inprogress = true;
2147 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08002148 } else {
2149 /* If the queue has pending buffers, then it
2150 * should be doing tx work (and have axq_depth).
2151 * Shouldn't get to this state I think..but
2152 * we do.
2153 */
2154 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2155 (txq->pending_frames > 0 ||
2156 !list_empty(&txq->axq_acq) ||
2157 txq->stopped)) {
2158 ath_err(ath9k_hw_common(sc->sc_ah),
2159 "txq: %p axq_qnum: %u,"
2160 " mac80211_qnum: %i"
2161 " axq_link: %p"
2162 " pending frames: %i"
2163 " axq_acq empty: %i"
2164 " stopped: %i"
2165 " axq_depth: 0 Attempting to"
2166 " restart tx logic.\n",
2167 txq, txq->axq_qnum,
2168 txq->mac80211_qnum,
2169 txq->axq_link,
2170 txq->pending_frames,
2171 list_empty(&txq->axq_acq),
2172 txq->stopped);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002173 ath_txq_schedule(sc, txq);
2174 }
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002175 }
2176 spin_unlock_bh(&txq->axq_lock);
2177 }
2178
2179 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002180 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2181 "tx hung, resetting the chip\n");
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002182 ath_reset(sc, true);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002183 }
2184
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002185 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002186 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2187}
2188
2189
Sujithe8324352009-01-16 21:38:42 +05302190
2191void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192{
Sujithe8324352009-01-16 21:38:42 +05302193 int i;
2194 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002195
Sujithe8324352009-01-16 21:38:42 +05302196 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002197
2198 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302199 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2200 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002201 }
2202}
2203
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002204void ath_tx_edma_tasklet(struct ath_softc *sc)
2205{
2206 struct ath_tx_status txs;
2207 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2208 struct ath_hw *ah = sc->sc_ah;
2209 struct ath_txq *txq;
2210 struct ath_buf *bf, *lastbf;
2211 struct list_head bf_head;
2212 int status;
2213 int txok;
2214
2215 for (;;) {
2216 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2217 if (status == -EINPROGRESS)
2218 break;
2219 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002220 ath_dbg(common, ATH_DBG_XMIT,
2221 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002222 break;
2223 }
2224
2225 /* Skip beacon completions */
2226 if (txs.qid == sc->beacon.beaconq)
2227 continue;
2228
2229 txq = &sc->tx.txq[txs.qid];
2230
2231 spin_lock_bh(&txq->axq_lock);
2232 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2233 spin_unlock_bh(&txq->axq_lock);
2234 return;
2235 }
2236
2237 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2238 struct ath_buf, list);
2239 lastbf = bf->bf_lastbf;
2240
2241 INIT_LIST_HEAD(&bf_head);
2242 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2243 &lastbf->list);
2244 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2245 txq->axq_depth--;
2246 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002247 if (bf_is_ampdu_not_probing(bf))
2248 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249 spin_unlock_bh(&txq->axq_lock);
2250
2251 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2252
2253 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002254 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2255 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002256 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002257 }
2258
2259 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002260 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2261 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002262 else
2263 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2264 &txs, txok, 0);
2265
2266 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002267
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002268 if (!txq->txq_flush_inprogress) {
2269 if (!list_empty(&txq->txq_fifo_pending)) {
2270 INIT_LIST_HEAD(&bf_head);
2271 bf = list_first_entry(&txq->txq_fifo_pending,
2272 struct ath_buf, list);
2273 list_cut_position(&bf_head,
2274 &txq->txq_fifo_pending,
2275 &bf->bf_lastbf->list);
2276 ath_tx_txqaddbuf(sc, txq, &bf_head);
2277 } else if (sc->sc_flags & SC_OP_TXAGGR)
2278 ath_txq_schedule(sc, txq);
2279 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002280 spin_unlock_bh(&txq->axq_lock);
2281 }
2282}
2283
Sujithe8324352009-01-16 21:38:42 +05302284/*****************/
2285/* Init, Cleanup */
2286/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002287
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002288static int ath_txstatus_setup(struct ath_softc *sc, int size)
2289{
2290 struct ath_descdma *dd = &sc->txsdma;
2291 u8 txs_len = sc->sc_ah->caps.txs_len;
2292
2293 dd->dd_desc_len = size * txs_len;
2294 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2295 &dd->dd_desc_paddr, GFP_KERNEL);
2296 if (!dd->dd_desc)
2297 return -ENOMEM;
2298
2299 return 0;
2300}
2301
2302static int ath_tx_edma_init(struct ath_softc *sc)
2303{
2304 int err;
2305
2306 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2307 if (!err)
2308 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2309 sc->txsdma.dd_desc_paddr,
2310 ATH_TXSTATUS_RING_SIZE);
2311
2312 return err;
2313}
2314
2315static void ath_tx_edma_cleanup(struct ath_softc *sc)
2316{
2317 struct ath_descdma *dd = &sc->txsdma;
2318
2319 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2320 dd->dd_desc_paddr);
2321}
2322
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002323int ath_tx_init(struct ath_softc *sc, int nbufs)
2324{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002325 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002326 int error = 0;
2327
Sujith797fe5cb2009-03-30 15:28:45 +05302328 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002329
Sujith797fe5cb2009-03-30 15:28:45 +05302330 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002331 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302332 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002333 ath_err(common,
2334 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302335 goto err;
2336 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337
Sujith797fe5cb2009-03-30 15:28:45 +05302338 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002339 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302340 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002341 ath_err(common,
2342 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302343 goto err;
2344 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002345
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002346 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
Vivek Natarajan181fb182011-01-27 14:45:08 +05302347 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002348
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002349 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2350 error = ath_tx_edma_init(sc);
2351 if (error)
2352 goto err;
2353 }
2354
Sujith797fe5cb2009-03-30 15:28:45 +05302355err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002356 if (error != 0)
2357 ath_tx_cleanup(sc);
2358
2359 return error;
2360}
2361
Sujith797fe5cb2009-03-30 15:28:45 +05302362void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363{
Sujithb77f4832008-12-07 21:44:03 +05302364 if (sc->beacon.bdma.dd_desc_len != 0)
2365 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujithb77f4832008-12-07 21:44:03 +05302367 if (sc->tx.txdma.dd_desc_len != 0)
2368 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002369
2370 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2371 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002372}
2373
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2375{
Sujithc5170162008-10-29 10:13:59 +05302376 struct ath_atx_tid *tid;
2377 struct ath_atx_ac *ac;
2378 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Sujith8ee5afb2008-12-07 21:43:36 +05302380 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302381 tidno < WME_NUM_TID;
2382 tidno++, tid++) {
2383 tid->an = an;
2384 tid->tidno = tidno;
2385 tid->seq_start = tid->seq_next = 0;
2386 tid->baw_size = WME_MAX_BA;
2387 tid->baw_head = tid->baw_tail = 0;
2388 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302389 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302390 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302391 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302392 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302393 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302394 tid->state &= ~AGGR_ADDBA_COMPLETE;
2395 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302396 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397
Sujith8ee5afb2008-12-07 21:43:36 +05302398 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302399 acno < WME_NUM_AC; acno++, ac++) {
2400 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002401 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302402 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002403 }
2404}
2405
Sujithb5aa9bf2008-10-29 10:13:31 +05302406void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407{
Felix Fietkau2b409942010-07-07 19:42:08 +02002408 struct ath_atx_ac *ac;
2409 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002411 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302412
Felix Fietkau2b409942010-07-07 19:42:08 +02002413 for (tidno = 0, tid = &an->tid[tidno];
2414 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415
Felix Fietkau2b409942010-07-07 19:42:08 +02002416 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002417 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418
Felix Fietkau2b409942010-07-07 19:42:08 +02002419 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 if (tid->sched) {
2422 list_del(&tid->list);
2423 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002425
2426 if (ac->sched) {
2427 list_del(&ac->list);
2428 tid->ac->sched = false;
2429 }
2430
2431 ath_tid_drain(sc, txq, tid);
2432 tid->state &= ~AGGR_ADDBA_COMPLETE;
2433 tid->state &= ~AGGR_CLEANUP;
2434
2435 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002436 }
2437}